1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/arm/highbd_idct_neon.h"
16 #include "vpx_dsp/arm/idct_neon.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/txfm_common.h"
19 
20 // Only for the first pass of the  _34_ variant. Since it only uses values from
21 // the top left 8x8 it can safely assume all the remaining values are 0 and skip
22 // an awful lot of calculations. In fact, only the first 6 columns make the cut.
23 // None of the elements in the 7th or 8th column are used so it skips any calls
24 // to input[67] too.
25 // In C this does a single row of 32 for each call. Here it transposes the top
26 // left 8x8 to allow using SIMD.
27 
28 // vp9/common/vp9_scan.c:vp9_default_iscan_32x32 arranges the first 34 non-zero
29 // coefficients as follows:
30 //    0  1  2  3  4  5  6  7
31 // 0  0  2  5 10 17 25
32 // 1  1  4  8 15 22 30
33 // 2  3  7 12 18 28
34 // 3  6 11 16 23 31
35 // 4  9 14 19 29
36 // 5 13 20 26
37 // 6 21 27 33
38 // 7 24 32
vpx_highbd_idct32_6_neon(const tran_low_t * input,int32_t * output)39 static void vpx_highbd_idct32_6_neon(const tran_low_t *input, int32_t *output) {
40   int32x4x2_t in[8], s1[32], s2[32], s3[32];
41 
42   in[0].val[0] = vld1q_s32(input);
43   in[0].val[1] = vld1q_s32(input + 4);
44   input += 32;
45   in[1].val[0] = vld1q_s32(input);
46   in[1].val[1] = vld1q_s32(input + 4);
47   input += 32;
48   in[2].val[0] = vld1q_s32(input);
49   in[2].val[1] = vld1q_s32(input + 4);
50   input += 32;
51   in[3].val[0] = vld1q_s32(input);
52   in[3].val[1] = vld1q_s32(input + 4);
53   input += 32;
54   in[4].val[0] = vld1q_s32(input);
55   in[4].val[1] = vld1q_s32(input + 4);
56   input += 32;
57   in[5].val[0] = vld1q_s32(input);
58   in[5].val[1] = vld1q_s32(input + 4);
59   input += 32;
60   in[6].val[0] = vld1q_s32(input);
61   in[6].val[1] = vld1q_s32(input + 4);
62   input += 32;
63   in[7].val[0] = vld1q_s32(input);
64   in[7].val[1] = vld1q_s32(input + 4);
65   transpose_s32_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
66                     &in[7]);
67 
68   // stage 1
69   // input[1] * cospi_31_64 - input[31] * cospi_1_64 (but input[31] == 0)
70   s1[16] = multiply_shift_and_narrow_s32_dual(in[1], cospi_31_64);
71   // input[1] * cospi_1_64 + input[31] * cospi_31_64 (but input[31] == 0)
72   s1[31] = multiply_shift_and_narrow_s32_dual(in[1], cospi_1_64);
73 
74   s1[20] = multiply_shift_and_narrow_s32_dual(in[5], cospi_27_64);
75   s1[27] = multiply_shift_and_narrow_s32_dual(in[5], cospi_5_64);
76 
77   s1[23] = multiply_shift_and_narrow_s32_dual(in[3], -cospi_29_64);
78   s1[24] = multiply_shift_and_narrow_s32_dual(in[3], cospi_3_64);
79 
80   // stage 2
81   s2[8] = multiply_shift_and_narrow_s32_dual(in[2], cospi_30_64);
82   s2[15] = multiply_shift_and_narrow_s32_dual(in[2], cospi_2_64);
83 
84   // stage 3
85   s1[4] = multiply_shift_and_narrow_s32_dual(in[4], cospi_28_64);
86   s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64);
87 
88   s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64,
89                                                          s1[31], cospi_28_64);
90   s1[30] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], cospi_28_64,
91                                                          s1[31], cospi_4_64);
92 
93   s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], -cospi_20_64,
94                                                          s1[27], cospi_12_64);
95   s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], cospi_12_64,
96                                                          s1[27], cospi_20_64);
97 
98   s1[22] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_12_64,
99                                                          s1[24], -cospi_20_64);
100   s1[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64,
101                                                          s1[24], cospi_12_64);
102 
103   // stage 4
104   s1[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
105 
106   s2[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64,
107                                                         s2[15], cospi_24_64);
108   s2[14] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], cospi_24_64,
109                                                          s2[15], cospi_8_64);
110 
111   s2[20] = highbd_idct_sub_dual(s1[23], s1[20]);
112   s2[21] = highbd_idct_sub_dual(s1[22], s1[21]);
113   s2[22] = highbd_idct_add_dual(s1[21], s1[22]);
114   s2[23] = highbd_idct_add_dual(s1[20], s1[23]);
115   s2[24] = highbd_idct_add_dual(s1[24], s1[27]);
116   s2[25] = highbd_idct_add_dual(s1[25], s1[26]);
117   s2[26] = highbd_idct_sub_dual(s1[25], s1[26]);
118   s2[27] = highbd_idct_sub_dual(s1[24], s1[27]);
119 
120   // stage 5
121   s1[5] = sub_multiply_shift_and_narrow_s32_dual(s1[7], s1[4], cospi_16_64);
122   s1[6] = add_multiply_shift_and_narrow_s32_dual(s1[4], s1[7], cospi_16_64);
123 
124   s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], -cospi_8_64,
125                                                          s1[30], cospi_24_64);
126   s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], cospi_24_64,
127                                                          s1[30], cospi_8_64);
128 
129   s1[19] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_8_64,
130                                                          s1[31], cospi_24_64);
131   s1[28] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], cospi_24_64,
132                                                          s1[31], cospi_8_64);
133 
134   s1[20] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_24_64,
135                                                          s2[27], -cospi_8_64);
136   s1[27] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_8_64,
137                                                          s2[27], cospi_24_64);
138 
139   s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_24_64,
140                                                          s2[26], -cospi_8_64);
141   s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_8_64,
142                                                          s2[26], cospi_24_64);
143 
144   // stage 6
145   s2[0] = highbd_idct_add_dual(s1[0], s1[7]);
146   s2[1] = highbd_idct_add_dual(s1[0], s1[6]);
147   s2[2] = highbd_idct_add_dual(s1[0], s1[5]);
148   s2[3] = highbd_idct_add_dual(s1[0], s1[4]);
149   s2[4] = highbd_idct_sub_dual(s1[0], s1[4]);
150   s2[5] = highbd_idct_sub_dual(s1[0], s1[5]);
151   s2[6] = highbd_idct_sub_dual(s1[0], s1[6]);
152   s2[7] = highbd_idct_sub_dual(s1[0], s1[7]);
153 
154   s2[10] = sub_multiply_shift_and_narrow_s32_dual(s2[14], s2[9], cospi_16_64);
155   s2[13] = add_multiply_shift_and_narrow_s32_dual(s2[9], s2[14], cospi_16_64);
156 
157   s2[11] = sub_multiply_shift_and_narrow_s32_dual(s2[15], s2[8], cospi_16_64);
158   s2[12] = add_multiply_shift_and_narrow_s32_dual(s2[8], s2[15], cospi_16_64);
159 
160   s2[16] = highbd_idct_add_dual(s1[16], s2[23]);
161   s2[17] = highbd_idct_add_dual(s1[17], s2[22]);
162   s2[18] = highbd_idct_add_dual(s1[18], s1[21]);
163   s2[19] = highbd_idct_add_dual(s1[19], s1[20]);
164   s2[20] = highbd_idct_sub_dual(s1[19], s1[20]);
165   s2[21] = highbd_idct_sub_dual(s1[18], s1[21]);
166   s2[22] = highbd_idct_sub_dual(s1[17], s2[22]);
167   s2[23] = highbd_idct_sub_dual(s1[16], s2[23]);
168 
169   s3[24] = highbd_idct_sub_dual(s1[31], s2[24]);
170   s3[25] = highbd_idct_sub_dual(s1[30], s2[25]);
171   s3[26] = highbd_idct_sub_dual(s1[29], s1[26]);
172   s3[27] = highbd_idct_sub_dual(s1[28], s1[27]);
173   s2[28] = highbd_idct_add_dual(s1[27], s1[28]);
174   s2[29] = highbd_idct_add_dual(s1[26], s1[29]);
175   s2[30] = highbd_idct_add_dual(s2[25], s1[30]);
176   s2[31] = highbd_idct_add_dual(s2[24], s1[31]);
177 
178   // stage 7
179   s1[0] = highbd_idct_add_dual(s2[0], s2[15]);
180   s1[1] = highbd_idct_add_dual(s2[1], s2[14]);
181   s1[2] = highbd_idct_add_dual(s2[2], s2[13]);
182   s1[3] = highbd_idct_add_dual(s2[3], s2[12]);
183   s1[4] = highbd_idct_add_dual(s2[4], s2[11]);
184   s1[5] = highbd_idct_add_dual(s2[5], s2[10]);
185   s1[6] = highbd_idct_add_dual(s2[6], s2[9]);
186   s1[7] = highbd_idct_add_dual(s2[7], s2[8]);
187   s1[8] = highbd_idct_sub_dual(s2[7], s2[8]);
188   s1[9] = highbd_idct_sub_dual(s2[6], s2[9]);
189   s1[10] = highbd_idct_sub_dual(s2[5], s2[10]);
190   s1[11] = highbd_idct_sub_dual(s2[4], s2[11]);
191   s1[12] = highbd_idct_sub_dual(s2[3], s2[12]);
192   s1[13] = highbd_idct_sub_dual(s2[2], s2[13]);
193   s1[14] = highbd_idct_sub_dual(s2[1], s2[14]);
194   s1[15] = highbd_idct_sub_dual(s2[0], s2[15]);
195 
196   s1[20] = sub_multiply_shift_and_narrow_s32_dual(s3[27], s2[20], cospi_16_64);
197   s1[27] = add_multiply_shift_and_narrow_s32_dual(s2[20], s3[27], cospi_16_64);
198 
199   s1[21] = sub_multiply_shift_and_narrow_s32_dual(s3[26], s2[21], cospi_16_64);
200   s1[26] = add_multiply_shift_and_narrow_s32_dual(s2[21], s3[26], cospi_16_64);
201 
202   s1[22] = sub_multiply_shift_and_narrow_s32_dual(s3[25], s2[22], cospi_16_64);
203   s1[25] = add_multiply_shift_and_narrow_s32_dual(s2[22], s3[25], cospi_16_64);
204 
205   s1[23] = sub_multiply_shift_and_narrow_s32_dual(s3[24], s2[23], cospi_16_64);
206   s1[24] = add_multiply_shift_and_narrow_s32_dual(s2[23], s3[24], cospi_16_64);
207 
208   // final stage
209   s3[0] = highbd_idct_add_dual(s1[0], s2[31]);
210   s3[1] = highbd_idct_add_dual(s1[1], s2[30]);
211   s3[2] = highbd_idct_add_dual(s1[2], s2[29]);
212   s3[3] = highbd_idct_add_dual(s1[3], s2[28]);
213   s3[4] = highbd_idct_add_dual(s1[4], s1[27]);
214   s3[5] = highbd_idct_add_dual(s1[5], s1[26]);
215   s3[6] = highbd_idct_add_dual(s1[6], s1[25]);
216   s3[7] = highbd_idct_add_dual(s1[7], s1[24]);
217   s3[8] = highbd_idct_add_dual(s1[8], s1[23]);
218   s3[9] = highbd_idct_add_dual(s1[9], s1[22]);
219   s3[10] = highbd_idct_add_dual(s1[10], s1[21]);
220   s3[11] = highbd_idct_add_dual(s1[11], s1[20]);
221   s3[12] = highbd_idct_add_dual(s1[12], s2[19]);
222   s3[13] = highbd_idct_add_dual(s1[13], s2[18]);
223   s3[14] = highbd_idct_add_dual(s1[14], s2[17]);
224   s3[15] = highbd_idct_add_dual(s1[15], s2[16]);
225   s3[16] = highbd_idct_sub_dual(s1[15], s2[16]);
226   s3[17] = highbd_idct_sub_dual(s1[14], s2[17]);
227   s3[18] = highbd_idct_sub_dual(s1[13], s2[18]);
228   s3[19] = highbd_idct_sub_dual(s1[12], s2[19]);
229   s3[20] = highbd_idct_sub_dual(s1[11], s1[20]);
230   s3[21] = highbd_idct_sub_dual(s1[10], s1[21]);
231   s3[22] = highbd_idct_sub_dual(s1[9], s1[22]);
232   s3[23] = highbd_idct_sub_dual(s1[8], s1[23]);
233   s3[24] = highbd_idct_sub_dual(s1[7], s1[24]);
234   s3[25] = highbd_idct_sub_dual(s1[6], s1[25]);
235   s3[26] = highbd_idct_sub_dual(s1[5], s1[26]);
236   s3[27] = highbd_idct_sub_dual(s1[4], s1[27]);
237   s3[28] = highbd_idct_sub_dual(s1[3], s2[28]);
238   s3[29] = highbd_idct_sub_dual(s1[2], s2[29]);
239   s3[30] = highbd_idct_sub_dual(s1[1], s2[30]);
240   s3[31] = highbd_idct_sub_dual(s1[0], s2[31]);
241 
242   vst1q_s32(output, s3[0].val[0]);
243   output += 4;
244   vst1q_s32(output, s3[0].val[1]);
245   output += 4;
246   vst1q_s32(output, s3[1].val[0]);
247   output += 4;
248   vst1q_s32(output, s3[1].val[1]);
249   output += 4;
250   vst1q_s32(output, s3[2].val[0]);
251   output += 4;
252   vst1q_s32(output, s3[2].val[1]);
253   output += 4;
254   vst1q_s32(output, s3[3].val[0]);
255   output += 4;
256   vst1q_s32(output, s3[3].val[1]);
257   output += 4;
258   vst1q_s32(output, s3[4].val[0]);
259   output += 4;
260   vst1q_s32(output, s3[4].val[1]);
261   output += 4;
262   vst1q_s32(output, s3[5].val[0]);
263   output += 4;
264   vst1q_s32(output, s3[5].val[1]);
265   output += 4;
266   vst1q_s32(output, s3[6].val[0]);
267   output += 4;
268   vst1q_s32(output, s3[6].val[1]);
269   output += 4;
270   vst1q_s32(output, s3[7].val[0]);
271   output += 4;
272   vst1q_s32(output, s3[7].val[1]);
273   output += 4;
274 
275   vst1q_s32(output, s3[8].val[0]);
276   output += 4;
277   vst1q_s32(output, s3[8].val[1]);
278   output += 4;
279   vst1q_s32(output, s3[9].val[0]);
280   output += 4;
281   vst1q_s32(output, s3[9].val[1]);
282   output += 4;
283   vst1q_s32(output, s3[10].val[0]);
284   output += 4;
285   vst1q_s32(output, s3[10].val[1]);
286   output += 4;
287   vst1q_s32(output, s3[11].val[0]);
288   output += 4;
289   vst1q_s32(output, s3[11].val[1]);
290   output += 4;
291   vst1q_s32(output, s3[12].val[0]);
292   output += 4;
293   vst1q_s32(output, s3[12].val[1]);
294   output += 4;
295   vst1q_s32(output, s3[13].val[0]);
296   output += 4;
297   vst1q_s32(output, s3[13].val[1]);
298   output += 4;
299   vst1q_s32(output, s3[14].val[0]);
300   output += 4;
301   vst1q_s32(output, s3[14].val[1]);
302   output += 4;
303   vst1q_s32(output, s3[15].val[0]);
304   output += 4;
305   vst1q_s32(output, s3[15].val[1]);
306   output += 4;
307 
308   vst1q_s32(output, s3[16].val[0]);
309   output += 4;
310   vst1q_s32(output, s3[16].val[1]);
311   output += 4;
312   vst1q_s32(output, s3[17].val[0]);
313   output += 4;
314   vst1q_s32(output, s3[17].val[1]);
315   output += 4;
316   vst1q_s32(output, s3[18].val[0]);
317   output += 4;
318   vst1q_s32(output, s3[18].val[1]);
319   output += 4;
320   vst1q_s32(output, s3[19].val[0]);
321   output += 4;
322   vst1q_s32(output, s3[19].val[1]);
323   output += 4;
324   vst1q_s32(output, s3[20].val[0]);
325   output += 4;
326   vst1q_s32(output, s3[20].val[1]);
327   output += 4;
328   vst1q_s32(output, s3[21].val[0]);
329   output += 4;
330   vst1q_s32(output, s3[21].val[1]);
331   output += 4;
332   vst1q_s32(output, s3[22].val[0]);
333   output += 4;
334   vst1q_s32(output, s3[22].val[1]);
335   output += 4;
336   vst1q_s32(output, s3[23].val[0]);
337   output += 4;
338   vst1q_s32(output, s3[23].val[1]);
339   output += 4;
340 
341   vst1q_s32(output, s3[24].val[0]);
342   output += 4;
343   vst1q_s32(output, s3[24].val[1]);
344   output += 4;
345   vst1q_s32(output, s3[25].val[0]);
346   output += 4;
347   vst1q_s32(output, s3[25].val[1]);
348   output += 4;
349   vst1q_s32(output, s3[26].val[0]);
350   output += 4;
351   vst1q_s32(output, s3[26].val[1]);
352   output += 4;
353   vst1q_s32(output, s3[27].val[0]);
354   output += 4;
355   vst1q_s32(output, s3[27].val[1]);
356   output += 4;
357   vst1q_s32(output, s3[28].val[0]);
358   output += 4;
359   vst1q_s32(output, s3[28].val[1]);
360   output += 4;
361   vst1q_s32(output, s3[29].val[0]);
362   output += 4;
363   vst1q_s32(output, s3[29].val[1]);
364   output += 4;
365   vst1q_s32(output, s3[30].val[0]);
366   output += 4;
367   vst1q_s32(output, s3[30].val[1]);
368   output += 4;
369   vst1q_s32(output, s3[31].val[0]);
370   output += 4;
371   vst1q_s32(output, s3[31].val[1]);
372 }
373 
vpx_highbd_idct32_8_neon(const int32_t * input,uint16_t * output,int stride,const int bd)374 static void vpx_highbd_idct32_8_neon(const int32_t *input, uint16_t *output,
375                                      int stride, const int bd) {
376   int32x4x2_t in[8], s1[32], s2[32], s3[32], out[32];
377 
378   load_and_transpose_s32_8x8(input, 8, &in[0], &in[1], &in[2], &in[3], &in[4],
379                              &in[5], &in[6], &in[7]);
380 
381   // stage 1
382   s1[16] = multiply_shift_and_narrow_s32_dual(in[1], cospi_31_64);
383   s1[31] = multiply_shift_and_narrow_s32_dual(in[1], cospi_1_64);
384 
385   // Different for _8_
386   s1[19] = multiply_shift_and_narrow_s32_dual(in[7], -cospi_25_64);
387   s1[28] = multiply_shift_and_narrow_s32_dual(in[7], cospi_7_64);
388 
389   s1[20] = multiply_shift_and_narrow_s32_dual(in[5], cospi_27_64);
390   s1[27] = multiply_shift_and_narrow_s32_dual(in[5], cospi_5_64);
391 
392   s1[23] = multiply_shift_and_narrow_s32_dual(in[3], -cospi_29_64);
393   s1[24] = multiply_shift_and_narrow_s32_dual(in[3], cospi_3_64);
394 
395   // stage 2
396   s2[8] = multiply_shift_and_narrow_s32_dual(in[2], cospi_30_64);
397   s2[15] = multiply_shift_and_narrow_s32_dual(in[2], cospi_2_64);
398 
399   s2[11] = multiply_shift_and_narrow_s32_dual(in[6], -cospi_26_64);
400   s2[12] = multiply_shift_and_narrow_s32_dual(in[6], cospi_6_64);
401 
402   // stage 3
403   s1[4] = multiply_shift_and_narrow_s32_dual(in[4], cospi_28_64);
404   s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64);
405 
406   s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64,
407                                                          s1[31], cospi_28_64);
408   s1[30] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], cospi_28_64,
409                                                          s1[31], cospi_4_64);
410 
411   // Different for _8_
412   s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s1[19], -cospi_28_64,
413                                                          s1[28], -cospi_4_64);
414   s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s1[19], -cospi_4_64,
415                                                          s1[28], cospi_28_64);
416 
417   s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], -cospi_20_64,
418                                                          s1[27], cospi_12_64);
419   s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], cospi_12_64,
420                                                          s1[27], cospi_20_64);
421 
422   s1[22] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_12_64,
423                                                          s1[24], -cospi_20_64);
424   s1[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64,
425                                                          s1[24], cospi_12_64);
426 
427   // stage 4
428   s1[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
429 
430   s2[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64,
431                                                         s2[15], cospi_24_64);
432   s2[14] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], cospi_24_64,
433                                                          s2[15], cospi_8_64);
434 
435   s2[10] = multiply_accumulate_shift_and_narrow_s32_dual(s2[11], -cospi_24_64,
436                                                          s2[12], -cospi_8_64);
437   s2[13] = multiply_accumulate_shift_and_narrow_s32_dual(s2[11], -cospi_8_64,
438                                                          s2[12], cospi_24_64);
439 
440   s2[16] = highbd_idct_add_dual(s1[16], s1[19]);
441 
442   s2[17] = highbd_idct_add_dual(s1[17], s1[18]);
443   s2[18] = highbd_idct_sub_dual(s1[17], s1[18]);
444 
445   s2[19] = highbd_idct_sub_dual(s1[16], s1[19]);
446 
447   s2[20] = highbd_idct_sub_dual(s1[23], s1[20]);
448   s2[21] = highbd_idct_sub_dual(s1[22], s1[21]);
449 
450   s2[22] = highbd_idct_add_dual(s1[21], s1[22]);
451   s2[23] = highbd_idct_add_dual(s1[20], s1[23]);
452 
453   s2[24] = highbd_idct_add_dual(s1[24], s1[27]);
454   s2[25] = highbd_idct_add_dual(s1[25], s1[26]);
455   s2[26] = highbd_idct_sub_dual(s1[25], s1[26]);
456   s2[27] = highbd_idct_sub_dual(s1[24], s1[27]);
457 
458   s2[28] = highbd_idct_sub_dual(s1[31], s1[28]);
459   s2[29] = highbd_idct_sub_dual(s1[30], s1[29]);
460   s2[30] = highbd_idct_add_dual(s1[29], s1[30]);
461   s2[31] = highbd_idct_add_dual(s1[28], s1[31]);
462 
463   // stage 5
464   s1[5] = sub_multiply_shift_and_narrow_s32_dual(s1[7], s1[4], cospi_16_64);
465   s1[6] = add_multiply_shift_and_narrow_s32_dual(s1[4], s1[7], cospi_16_64);
466 
467   s1[8] = highbd_idct_add_dual(s2[8], s2[11]);
468   s1[9] = highbd_idct_add_dual(s2[9], s2[10]);
469   s1[10] = highbd_idct_sub_dual(s2[9], s2[10]);
470   s1[11] = highbd_idct_sub_dual(s2[8], s2[11]);
471   s1[12] = highbd_idct_sub_dual(s2[15], s2[12]);
472   s1[13] = highbd_idct_sub_dual(s2[14], s2[13]);
473   s1[14] = highbd_idct_add_dual(s2[13], s2[14]);
474   s1[15] = highbd_idct_add_dual(s2[12], s2[15]);
475 
476   s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_8_64,
477                                                          s2[29], cospi_24_64);
478   s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], cospi_24_64,
479                                                          s2[29], cospi_8_64);
480 
481   s1[19] = multiply_accumulate_shift_and_narrow_s32_dual(s2[19], -cospi_8_64,
482                                                          s2[28], cospi_24_64);
483   s1[28] = multiply_accumulate_shift_and_narrow_s32_dual(s2[19], cospi_24_64,
484                                                          s2[28], cospi_8_64);
485 
486   s1[20] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_24_64,
487                                                          s2[27], -cospi_8_64);
488   s1[27] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_8_64,
489                                                          s2[27], cospi_24_64);
490 
491   s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_24_64,
492                                                          s2[26], -cospi_8_64);
493   s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_8_64,
494                                                          s2[26], cospi_24_64);
495 
496   // stage 6
497   s2[0] = highbd_idct_add_dual(s1[0], s1[7]);
498   s2[1] = highbd_idct_add_dual(s1[0], s1[6]);
499   s2[2] = highbd_idct_add_dual(s1[0], s1[5]);
500   s2[3] = highbd_idct_add_dual(s1[0], s1[4]);
501   s2[4] = highbd_idct_sub_dual(s1[0], s1[4]);
502   s2[5] = highbd_idct_sub_dual(s1[0], s1[5]);
503   s2[6] = highbd_idct_sub_dual(s1[0], s1[6]);
504   s2[7] = highbd_idct_sub_dual(s1[0], s1[7]);
505 
506   s2[10] = sub_multiply_shift_and_narrow_s32_dual(s1[13], s1[10], cospi_16_64);
507   s2[13] = add_multiply_shift_and_narrow_s32_dual(s1[10], s1[13], cospi_16_64);
508 
509   s2[11] = sub_multiply_shift_and_narrow_s32_dual(s1[12], s1[11], cospi_16_64);
510   s2[12] = add_multiply_shift_and_narrow_s32_dual(s1[11], s1[12], cospi_16_64);
511 
512   s1[16] = highbd_idct_add_dual(s2[16], s2[23]);
513   s1[17] = highbd_idct_add_dual(s2[17], s2[22]);
514   s2[18] = highbd_idct_add_dual(s1[18], s1[21]);
515   s2[19] = highbd_idct_add_dual(s1[19], s1[20]);
516   s2[20] = highbd_idct_sub_dual(s1[19], s1[20]);
517   s2[21] = highbd_idct_sub_dual(s1[18], s1[21]);
518   s1[22] = highbd_idct_sub_dual(s2[17], s2[22]);
519   s1[23] = highbd_idct_sub_dual(s2[16], s2[23]);
520 
521   s3[24] = highbd_idct_sub_dual(s2[31], s2[24]);
522   s3[25] = highbd_idct_sub_dual(s2[30], s2[25]);
523   s3[26] = highbd_idct_sub_dual(s1[29], s1[26]);
524   s3[27] = highbd_idct_sub_dual(s1[28], s1[27]);
525   s2[28] = highbd_idct_add_dual(s1[27], s1[28]);
526   s2[29] = highbd_idct_add_dual(s1[26], s1[29]);
527   s2[30] = highbd_idct_add_dual(s2[25], s2[30]);
528   s2[31] = highbd_idct_add_dual(s2[24], s2[31]);
529 
530   // stage 7
531   s1[0] = highbd_idct_add_dual(s2[0], s1[15]);
532   s1[1] = highbd_idct_add_dual(s2[1], s1[14]);
533   s1[2] = highbd_idct_add_dual(s2[2], s2[13]);
534   s1[3] = highbd_idct_add_dual(s2[3], s2[12]);
535   s1[4] = highbd_idct_add_dual(s2[4], s2[11]);
536   s1[5] = highbd_idct_add_dual(s2[5], s2[10]);
537   s1[6] = highbd_idct_add_dual(s2[6], s1[9]);
538   s1[7] = highbd_idct_add_dual(s2[7], s1[8]);
539   s1[8] = highbd_idct_sub_dual(s2[7], s1[8]);
540   s1[9] = highbd_idct_sub_dual(s2[6], s1[9]);
541   s1[10] = highbd_idct_sub_dual(s2[5], s2[10]);
542   s1[11] = highbd_idct_sub_dual(s2[4], s2[11]);
543   s1[12] = highbd_idct_sub_dual(s2[3], s2[12]);
544   s1[13] = highbd_idct_sub_dual(s2[2], s2[13]);
545   s1[14] = highbd_idct_sub_dual(s2[1], s1[14]);
546   s1[15] = highbd_idct_sub_dual(s2[0], s1[15]);
547 
548   s1[20] = sub_multiply_shift_and_narrow_s32_dual(s3[27], s2[20], cospi_16_64);
549   s1[27] = add_multiply_shift_and_narrow_s32_dual(s2[20], s3[27], cospi_16_64);
550 
551   s1[21] = sub_multiply_shift_and_narrow_s32_dual(s3[26], s2[21], cospi_16_64);
552   s1[26] = add_multiply_shift_and_narrow_s32_dual(s2[21], s3[26], cospi_16_64);
553 
554   s2[22] = sub_multiply_shift_and_narrow_s32_dual(s3[25], s1[22], cospi_16_64);
555   s1[25] = add_multiply_shift_and_narrow_s32_dual(s1[22], s3[25], cospi_16_64);
556 
557   s2[23] = sub_multiply_shift_and_narrow_s32_dual(s3[24], s1[23], cospi_16_64);
558   s1[24] = add_multiply_shift_and_narrow_s32_dual(s1[23], s3[24], cospi_16_64);
559 
560   // final stage
561   out[0] = highbd_idct_add_dual(s1[0], s2[31]);
562   out[1] = highbd_idct_add_dual(s1[1], s2[30]);
563   out[2] = highbd_idct_add_dual(s1[2], s2[29]);
564   out[3] = highbd_idct_add_dual(s1[3], s2[28]);
565   out[4] = highbd_idct_add_dual(s1[4], s1[27]);
566   out[5] = highbd_idct_add_dual(s1[5], s1[26]);
567   out[6] = highbd_idct_add_dual(s1[6], s1[25]);
568   out[7] = highbd_idct_add_dual(s1[7], s1[24]);
569   out[8] = highbd_idct_add_dual(s1[8], s2[23]);
570   out[9] = highbd_idct_add_dual(s1[9], s2[22]);
571   out[10] = highbd_idct_add_dual(s1[10], s1[21]);
572   out[11] = highbd_idct_add_dual(s1[11], s1[20]);
573   out[12] = highbd_idct_add_dual(s1[12], s2[19]);
574   out[13] = highbd_idct_add_dual(s1[13], s2[18]);
575   out[14] = highbd_idct_add_dual(s1[14], s1[17]);
576   out[15] = highbd_idct_add_dual(s1[15], s1[16]);
577   out[16] = highbd_idct_sub_dual(s1[15], s1[16]);
578   out[17] = highbd_idct_sub_dual(s1[14], s1[17]);
579   out[18] = highbd_idct_sub_dual(s1[13], s2[18]);
580   out[19] = highbd_idct_sub_dual(s1[12], s2[19]);
581   out[20] = highbd_idct_sub_dual(s1[11], s1[20]);
582   out[21] = highbd_idct_sub_dual(s1[10], s1[21]);
583   out[22] = highbd_idct_sub_dual(s1[9], s2[22]);
584   out[23] = highbd_idct_sub_dual(s1[8], s2[23]);
585   out[24] = highbd_idct_sub_dual(s1[7], s1[24]);
586   out[25] = highbd_idct_sub_dual(s1[6], s1[25]);
587   out[26] = highbd_idct_sub_dual(s1[5], s1[26]);
588   out[27] = highbd_idct_sub_dual(s1[4], s1[27]);
589   out[28] = highbd_idct_sub_dual(s1[3], s2[28]);
590   out[29] = highbd_idct_sub_dual(s1[2], s2[29]);
591   out[30] = highbd_idct_sub_dual(s1[1], s2[30]);
592   out[31] = highbd_idct_sub_dual(s1[0], s2[31]);
593 
594   highbd_idct16x16_add_store(out, output, stride, bd);
595   highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
596 }
597 
vpx_highbd_idct32x32_34_add_neon(const tran_low_t * input,uint16_t * dest,int stride,int bd)598 void vpx_highbd_idct32x32_34_add_neon(const tran_low_t *input, uint16_t *dest,
599                                       int stride, int bd) {
600   int i;
601 
602   if (bd == 8) {
603     int16_t temp[32 * 8];
604     int16_t *t = temp;
605 
606     vpx_idct32_6_neon(input, t);
607 
608     for (i = 0; i < 32; i += 8) {
609       vpx_idct32_8_neon(t, dest, stride, 1);
610       t += (8 * 8);
611       dest += 8;
612     }
613   } else {
614     int32_t temp[32 * 8];
615     int32_t *t = temp;
616 
617     vpx_highbd_idct32_6_neon(input, t);
618 
619     for (i = 0; i < 32; i += 8) {
620       vpx_highbd_idct32_8_neon(t, dest, stride, bd);
621       t += (8 * 8);
622       dest += 8;
623     }
624   }
625 }
626