1 /*
2  *  Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vpx_config.h"
12 #include "./vpx_dsp_rtcd.h"
13 
14 #include "vpx_dsp/ppc/transpose_vsx.h"
15 #include "vpx_dsp/ppc/txfm_common_vsx.h"
16 #include "vpx_dsp/ppc/types_vsx.h"
17 
18 // Returns ((a +/- b) * cospi16 + (2 << 13)) >> 14.
single_butterfly(int16x8_t a,int16x8_t b,int16x8_t * add,int16x8_t * sub)19 static INLINE void single_butterfly(int16x8_t a, int16x8_t b, int16x8_t *add,
20                                     int16x8_t *sub) {
21   // Since a + b can overflow 16 bits, the multiplication is distributed
22   // (a * c +/- b * c).
23   const int32x4_t ac_e = vec_mule(a, cospi16_v);
24   const int32x4_t ac_o = vec_mulo(a, cospi16_v);
25   const int32x4_t bc_e = vec_mule(b, cospi16_v);
26   const int32x4_t bc_o = vec_mulo(b, cospi16_v);
27 
28   // Reuse the same multiplies for sum and difference.
29   const int32x4_t sum_e = vec_add(ac_e, bc_e);
30   const int32x4_t sum_o = vec_add(ac_o, bc_o);
31   const int32x4_t diff_e = vec_sub(ac_e, bc_e);
32   const int32x4_t diff_o = vec_sub(ac_o, bc_o);
33 
34   // Add rounding offset
35   const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding);
36   const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding);
37   const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding);
38   const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding);
39 
40   const int32x4_t ssum_o = vec_sra(rsum_o, vec_dct_const_bits);
41   const int32x4_t ssum_e = vec_sra(rsum_e, vec_dct_const_bits);
42   const int32x4_t sdiff_o = vec_sra(rdiff_o, vec_dct_const_bits);
43   const int32x4_t sdiff_e = vec_sra(rdiff_e, vec_dct_const_bits);
44 
45   // There's no pack operation for even and odd, so we need to permute.
46   *add = (int16x8_t)vec_perm(ssum_e, ssum_o, vec_perm_odd_even_pack);
47   *sub = (int16x8_t)vec_perm(sdiff_e, sdiff_o, vec_perm_odd_even_pack);
48 }
49 
50 // Returns (a * c1 +/- b * c2 + (2 << 13)) >> 14
double_butterfly(int16x8_t a,int16x8_t c1,int16x8_t b,int16x8_t c2,int16x8_t * add,int16x8_t * sub)51 static INLINE void double_butterfly(int16x8_t a, int16x8_t c1, int16x8_t b,
52                                     int16x8_t c2, int16x8_t *add,
53                                     int16x8_t *sub) {
54   const int32x4_t ac1_o = vec_mulo(a, c1);
55   const int32x4_t ac1_e = vec_mule(a, c1);
56   const int32x4_t ac2_o = vec_mulo(a, c2);
57   const int32x4_t ac2_e = vec_mule(a, c2);
58 
59   const int32x4_t bc1_o = vec_mulo(b, c1);
60   const int32x4_t bc1_e = vec_mule(b, c1);
61   const int32x4_t bc2_o = vec_mulo(b, c2);
62   const int32x4_t bc2_e = vec_mule(b, c2);
63 
64   const int32x4_t sum_o = vec_add(ac1_o, bc2_o);
65   const int32x4_t sum_e = vec_add(ac1_e, bc2_e);
66   const int32x4_t diff_o = vec_sub(ac2_o, bc1_o);
67   const int32x4_t diff_e = vec_sub(ac2_e, bc1_e);
68 
69   // Add rounding offset
70   const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding);
71   const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding);
72   const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding);
73   const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding);
74 
75   const int32x4_t ssum_o = vec_sra(rsum_o, vec_dct_const_bits);
76   const int32x4_t ssum_e = vec_sra(rsum_e, vec_dct_const_bits);
77   const int32x4_t sdiff_o = vec_sra(rdiff_o, vec_dct_const_bits);
78   const int32x4_t sdiff_e = vec_sra(rdiff_e, vec_dct_const_bits);
79 
80   // There's no pack operation for even and odd, so we need to permute.
81   *add = (int16x8_t)vec_perm(ssum_e, ssum_o, vec_perm_odd_even_pack);
82   *sub = (int16x8_t)vec_perm(sdiff_e, sdiff_o, vec_perm_odd_even_pack);
83 }
84 
85 // While other architecture combine the load and the stage 1 operations, Power9
86 // benchmarking show no benefit in such an approach.
load(const int16_t * a,int stride,int16x8_t * b)87 static INLINE void load(const int16_t *a, int stride, int16x8_t *b) {
88   // Tried out different combinations of load and shift instructions, this is
89   // the fastest one.
90   {
91     const int16x8_t l0 = vec_vsx_ld(0, a);
92     const int16x8_t l1 = vec_vsx_ld(0, a + stride);
93     const int16x8_t l2 = vec_vsx_ld(0, a + 2 * stride);
94     const int16x8_t l3 = vec_vsx_ld(0, a + 3 * stride);
95     const int16x8_t l4 = vec_vsx_ld(0, a + 4 * stride);
96     const int16x8_t l5 = vec_vsx_ld(0, a + 5 * stride);
97     const int16x8_t l6 = vec_vsx_ld(0, a + 6 * stride);
98     const int16x8_t l7 = vec_vsx_ld(0, a + 7 * stride);
99 
100     const int16x8_t l8 = vec_vsx_ld(0, a + 8 * stride);
101     const int16x8_t l9 = vec_vsx_ld(0, a + 9 * stride);
102     const int16x8_t l10 = vec_vsx_ld(0, a + 10 * stride);
103     const int16x8_t l11 = vec_vsx_ld(0, a + 11 * stride);
104     const int16x8_t l12 = vec_vsx_ld(0, a + 12 * stride);
105     const int16x8_t l13 = vec_vsx_ld(0, a + 13 * stride);
106     const int16x8_t l14 = vec_vsx_ld(0, a + 14 * stride);
107     const int16x8_t l15 = vec_vsx_ld(0, a + 15 * stride);
108 
109     b[0] = vec_sl(l0, vec_dct_scale_log2);
110     b[1] = vec_sl(l1, vec_dct_scale_log2);
111     b[2] = vec_sl(l2, vec_dct_scale_log2);
112     b[3] = vec_sl(l3, vec_dct_scale_log2);
113     b[4] = vec_sl(l4, vec_dct_scale_log2);
114     b[5] = vec_sl(l5, vec_dct_scale_log2);
115     b[6] = vec_sl(l6, vec_dct_scale_log2);
116     b[7] = vec_sl(l7, vec_dct_scale_log2);
117 
118     b[8] = vec_sl(l8, vec_dct_scale_log2);
119     b[9] = vec_sl(l9, vec_dct_scale_log2);
120     b[10] = vec_sl(l10, vec_dct_scale_log2);
121     b[11] = vec_sl(l11, vec_dct_scale_log2);
122     b[12] = vec_sl(l12, vec_dct_scale_log2);
123     b[13] = vec_sl(l13, vec_dct_scale_log2);
124     b[14] = vec_sl(l14, vec_dct_scale_log2);
125     b[15] = vec_sl(l15, vec_dct_scale_log2);
126   }
127   {
128     const int16x8_t l16 = vec_vsx_ld(0, a + 16 * stride);
129     const int16x8_t l17 = vec_vsx_ld(0, a + 17 * stride);
130     const int16x8_t l18 = vec_vsx_ld(0, a + 18 * stride);
131     const int16x8_t l19 = vec_vsx_ld(0, a + 19 * stride);
132     const int16x8_t l20 = vec_vsx_ld(0, a + 20 * stride);
133     const int16x8_t l21 = vec_vsx_ld(0, a + 21 * stride);
134     const int16x8_t l22 = vec_vsx_ld(0, a + 22 * stride);
135     const int16x8_t l23 = vec_vsx_ld(0, a + 23 * stride);
136 
137     const int16x8_t l24 = vec_vsx_ld(0, a + 24 * stride);
138     const int16x8_t l25 = vec_vsx_ld(0, a + 25 * stride);
139     const int16x8_t l26 = vec_vsx_ld(0, a + 26 * stride);
140     const int16x8_t l27 = vec_vsx_ld(0, a + 27 * stride);
141     const int16x8_t l28 = vec_vsx_ld(0, a + 28 * stride);
142     const int16x8_t l29 = vec_vsx_ld(0, a + 29 * stride);
143     const int16x8_t l30 = vec_vsx_ld(0, a + 30 * stride);
144     const int16x8_t l31 = vec_vsx_ld(0, a + 31 * stride);
145 
146     b[16] = vec_sl(l16, vec_dct_scale_log2);
147     b[17] = vec_sl(l17, vec_dct_scale_log2);
148     b[18] = vec_sl(l18, vec_dct_scale_log2);
149     b[19] = vec_sl(l19, vec_dct_scale_log2);
150     b[20] = vec_sl(l20, vec_dct_scale_log2);
151     b[21] = vec_sl(l21, vec_dct_scale_log2);
152     b[22] = vec_sl(l22, vec_dct_scale_log2);
153     b[23] = vec_sl(l23, vec_dct_scale_log2);
154 
155     b[24] = vec_sl(l24, vec_dct_scale_log2);
156     b[25] = vec_sl(l25, vec_dct_scale_log2);
157     b[26] = vec_sl(l26, vec_dct_scale_log2);
158     b[27] = vec_sl(l27, vec_dct_scale_log2);
159     b[28] = vec_sl(l28, vec_dct_scale_log2);
160     b[29] = vec_sl(l29, vec_dct_scale_log2);
161     b[30] = vec_sl(l30, vec_dct_scale_log2);
162     b[31] = vec_sl(l31, vec_dct_scale_log2);
163   }
164 }
165 
store(tran_low_t * a,const int16x8_t * b)166 static INLINE void store(tran_low_t *a, const int16x8_t *b) {
167   vec_vsx_st(b[0], 0, a);
168   vec_vsx_st(b[8], 0, a + 8);
169   vec_vsx_st(b[16], 0, a + 16);
170   vec_vsx_st(b[24], 0, a + 24);
171 
172   vec_vsx_st(b[1], 0, a + 32);
173   vec_vsx_st(b[9], 0, a + 40);
174   vec_vsx_st(b[17], 0, a + 48);
175   vec_vsx_st(b[25], 0, a + 56);
176 
177   vec_vsx_st(b[2], 0, a + 64);
178   vec_vsx_st(b[10], 0, a + 72);
179   vec_vsx_st(b[18], 0, a + 80);
180   vec_vsx_st(b[26], 0, a + 88);
181 
182   vec_vsx_st(b[3], 0, a + 96);
183   vec_vsx_st(b[11], 0, a + 104);
184   vec_vsx_st(b[19], 0, a + 112);
185   vec_vsx_st(b[27], 0, a + 120);
186 
187   vec_vsx_st(b[4], 0, a + 128);
188   vec_vsx_st(b[12], 0, a + 136);
189   vec_vsx_st(b[20], 0, a + 144);
190   vec_vsx_st(b[28], 0, a + 152);
191 
192   vec_vsx_st(b[5], 0, a + 160);
193   vec_vsx_st(b[13], 0, a + 168);
194   vec_vsx_st(b[21], 0, a + 176);
195   vec_vsx_st(b[29], 0, a + 184);
196 
197   vec_vsx_st(b[6], 0, a + 192);
198   vec_vsx_st(b[14], 0, a + 200);
199   vec_vsx_st(b[22], 0, a + 208);
200   vec_vsx_st(b[30], 0, a + 216);
201 
202   vec_vsx_st(b[7], 0, a + 224);
203   vec_vsx_st(b[15], 0, a + 232);
204   vec_vsx_st(b[23], 0, a + 240);
205   vec_vsx_st(b[31], 0, a + 248);
206 }
207 
208 // Returns 1 if negative 0 if positive
vec_sign_s16(int16x8_t a)209 static INLINE int16x8_t vec_sign_s16(int16x8_t a) {
210   return vec_sr(a, vec_shift_sign_s16);
211 }
212 
213 // Add 2 if positive, 1 if negative, and shift by 2.
sub_round_shift(const int16x8_t a)214 static INLINE int16x8_t sub_round_shift(const int16x8_t a) {
215   const int16x8_t sign = vec_sign_s16(a);
216   return vec_sra(vec_sub(vec_add(a, vec_twos_s16), sign), vec_dct_scale_log2);
217 }
218 
219 // Add 1 if positive, 2 if negative, and shift by 2.
220 // In practice, add 1, then add the sign bit, then shift without rounding.
add_round_shift_s16(const int16x8_t a)221 static INLINE int16x8_t add_round_shift_s16(const int16x8_t a) {
222   const int16x8_t sign = vec_sign_s16(a);
223   return vec_sra(vec_add(vec_add(a, vec_ones_s16), sign), vec_dct_scale_log2);
224 }
225 
fdct32_vsx(const int16x8_t * in,int16x8_t * out,int pass)226 static void fdct32_vsx(const int16x8_t *in, int16x8_t *out, int pass) {
227   int16x8_t temp0[32];  // Hold stages: 1, 4, 7
228   int16x8_t temp1[32];  // Hold stages: 2, 5
229   int16x8_t temp2[32];  // Hold stages: 3, 6
230   int i;
231 
232   // Stage 1
233   // Unrolling this loops actually slows down Power9 benchmarks
234   for (i = 0; i < 16; i++) {
235     temp0[i] = vec_add(in[i], in[31 - i]);
236     // pass through to stage 3.
237     temp1[i + 16] = vec_sub(in[15 - i], in[i + 16]);
238   }
239 
240   // Stage 2
241   // Unrolling this loops actually slows down Power9 benchmarks
242   for (i = 0; i < 8; i++) {
243     temp1[i] = vec_add(temp0[i], temp0[15 - i]);
244     temp1[i + 8] = vec_sub(temp0[7 - i], temp0[i + 8]);
245   }
246 
247   // Apply butterflies (in place) on pass through to stage 3.
248   single_butterfly(temp1[27], temp1[20], &temp1[27], &temp1[20]);
249   single_butterfly(temp1[26], temp1[21], &temp1[26], &temp1[21]);
250   single_butterfly(temp1[25], temp1[22], &temp1[25], &temp1[22]);
251   single_butterfly(temp1[24], temp1[23], &temp1[24], &temp1[23]);
252 
253   // dump the magnitude by 4, hence the intermediate values are within
254   // the range of 16 bits.
255   if (pass) {
256     temp1[0] = add_round_shift_s16(temp1[0]);
257     temp1[1] = add_round_shift_s16(temp1[1]);
258     temp1[2] = add_round_shift_s16(temp1[2]);
259     temp1[3] = add_round_shift_s16(temp1[3]);
260     temp1[4] = add_round_shift_s16(temp1[4]);
261     temp1[5] = add_round_shift_s16(temp1[5]);
262     temp1[6] = add_round_shift_s16(temp1[6]);
263     temp1[7] = add_round_shift_s16(temp1[7]);
264     temp1[8] = add_round_shift_s16(temp1[8]);
265     temp1[9] = add_round_shift_s16(temp1[9]);
266     temp1[10] = add_round_shift_s16(temp1[10]);
267     temp1[11] = add_round_shift_s16(temp1[11]);
268     temp1[12] = add_round_shift_s16(temp1[12]);
269     temp1[13] = add_round_shift_s16(temp1[13]);
270     temp1[14] = add_round_shift_s16(temp1[14]);
271     temp1[15] = add_round_shift_s16(temp1[15]);
272 
273     temp1[16] = add_round_shift_s16(temp1[16]);
274     temp1[17] = add_round_shift_s16(temp1[17]);
275     temp1[18] = add_round_shift_s16(temp1[18]);
276     temp1[19] = add_round_shift_s16(temp1[19]);
277     temp1[20] = add_round_shift_s16(temp1[20]);
278     temp1[21] = add_round_shift_s16(temp1[21]);
279     temp1[22] = add_round_shift_s16(temp1[22]);
280     temp1[23] = add_round_shift_s16(temp1[23]);
281     temp1[24] = add_round_shift_s16(temp1[24]);
282     temp1[25] = add_round_shift_s16(temp1[25]);
283     temp1[26] = add_round_shift_s16(temp1[26]);
284     temp1[27] = add_round_shift_s16(temp1[27]);
285     temp1[28] = add_round_shift_s16(temp1[28]);
286     temp1[29] = add_round_shift_s16(temp1[29]);
287     temp1[30] = add_round_shift_s16(temp1[30]);
288     temp1[31] = add_round_shift_s16(temp1[31]);
289   }
290 
291   // Stage 3
292   temp2[0] = vec_add(temp1[0], temp1[7]);
293   temp2[1] = vec_add(temp1[1], temp1[6]);
294   temp2[2] = vec_add(temp1[2], temp1[5]);
295   temp2[3] = vec_add(temp1[3], temp1[4]);
296   temp2[5] = vec_sub(temp1[2], temp1[5]);
297   temp2[6] = vec_sub(temp1[1], temp1[6]);
298   temp2[8] = temp1[8];
299   temp2[9] = temp1[9];
300 
301   single_butterfly(temp1[13], temp1[10], &temp2[13], &temp2[10]);
302   single_butterfly(temp1[12], temp1[11], &temp2[12], &temp2[11]);
303   temp2[14] = temp1[14];
304   temp2[15] = temp1[15];
305 
306   temp2[18] = vec_add(temp1[18], temp1[21]);
307   temp2[19] = vec_add(temp1[19], temp1[20]);
308 
309   temp2[20] = vec_sub(temp1[19], temp1[20]);
310   temp2[21] = vec_sub(temp1[18], temp1[21]);
311 
312   temp2[26] = vec_sub(temp1[29], temp1[26]);
313   temp2[27] = vec_sub(temp1[28], temp1[27]);
314 
315   temp2[28] = vec_add(temp1[28], temp1[27]);
316   temp2[29] = vec_add(temp1[29], temp1[26]);
317 
318   // Pass through Stage 4
319   temp0[7] = vec_sub(temp1[0], temp1[7]);
320   temp0[4] = vec_sub(temp1[3], temp1[4]);
321   temp0[16] = vec_add(temp1[16], temp1[23]);
322   temp0[17] = vec_add(temp1[17], temp1[22]);
323   temp0[22] = vec_sub(temp1[17], temp1[22]);
324   temp0[23] = vec_sub(temp1[16], temp1[23]);
325   temp0[24] = vec_sub(temp1[31], temp1[24]);
326   temp0[25] = vec_sub(temp1[30], temp1[25]);
327   temp0[30] = vec_add(temp1[30], temp1[25]);
328   temp0[31] = vec_add(temp1[31], temp1[24]);
329 
330   // Stage 4
331   temp0[0] = vec_add(temp2[0], temp2[3]);
332   temp0[1] = vec_add(temp2[1], temp2[2]);
333   temp0[2] = vec_sub(temp2[1], temp2[2]);
334   temp0[3] = vec_sub(temp2[0], temp2[3]);
335   single_butterfly(temp2[6], temp2[5], &temp0[6], &temp0[5]);
336 
337   temp0[9] = vec_add(temp2[9], temp2[10]);
338   temp0[10] = vec_sub(temp2[9], temp2[10]);
339   temp0[13] = vec_sub(temp2[14], temp2[13]);
340   temp0[14] = vec_add(temp2[14], temp2[13]);
341 
342   double_butterfly(temp2[29], cospi8_v, temp2[18], cospi24_v, &temp0[29],
343                    &temp0[18]);
344   double_butterfly(temp2[28], cospi8_v, temp2[19], cospi24_v, &temp0[28],
345                    &temp0[19]);
346   double_butterfly(temp2[27], cospi24_v, temp2[20], cospi8m_v, &temp0[27],
347                    &temp0[20]);
348   double_butterfly(temp2[26], cospi24_v, temp2[21], cospi8m_v, &temp0[26],
349                    &temp0[21]);
350 
351   // Pass through Stage 5
352   temp1[8] = vec_add(temp2[8], temp2[11]);
353   temp1[11] = vec_sub(temp2[8], temp2[11]);
354   temp1[12] = vec_sub(temp2[15], temp2[12]);
355   temp1[15] = vec_add(temp2[15], temp2[12]);
356 
357   // Stage 5
358   // 0 and 1 pass through to 0 and 16 at the end
359   single_butterfly(temp0[0], temp0[1], &out[0], &out[16]);
360 
361   // 2 and 3 pass through to 8 and 24 at the end
362   double_butterfly(temp0[3], cospi8_v, temp0[2], cospi24_v, &out[8], &out[24]);
363 
364   temp1[4] = vec_add(temp0[4], temp0[5]);
365   temp1[5] = vec_sub(temp0[4], temp0[5]);
366   temp1[6] = vec_sub(temp0[7], temp0[6]);
367   temp1[7] = vec_add(temp0[7], temp0[6]);
368 
369   double_butterfly(temp0[14], cospi8_v, temp0[9], cospi24_v, &temp1[14],
370                    &temp1[9]);
371   double_butterfly(temp0[13], cospi24_v, temp0[10], cospi8m_v, &temp1[13],
372                    &temp1[10]);
373 
374   temp1[17] = vec_add(temp0[17], temp0[18]);
375   temp1[18] = vec_sub(temp0[17], temp0[18]);
376 
377   temp1[21] = vec_sub(temp0[22], temp0[21]);
378   temp1[22] = vec_add(temp0[22], temp0[21]);
379 
380   temp1[25] = vec_add(temp0[25], temp0[26]);
381   temp1[26] = vec_sub(temp0[25], temp0[26]);
382 
383   temp1[29] = vec_sub(temp0[30], temp0[29]);
384   temp1[30] = vec_add(temp0[30], temp0[29]);
385 
386   // Pass through Stage 6
387   temp2[16] = vec_add(temp0[16], temp0[19]);
388   temp2[19] = vec_sub(temp0[16], temp0[19]);
389   temp2[20] = vec_sub(temp0[23], temp0[20]);
390   temp2[23] = vec_add(temp0[23], temp0[20]);
391   temp2[24] = vec_add(temp0[24], temp0[27]);
392   temp2[27] = vec_sub(temp0[24], temp0[27]);
393   temp2[28] = vec_sub(temp0[31], temp0[28]);
394   temp2[31] = vec_add(temp0[31], temp0[28]);
395 
396   // Stage 6
397   // 4 and 7 pass through to 4 and 28 at the end
398   double_butterfly(temp1[7], cospi4_v, temp1[4], cospi28_v, &out[4], &out[28]);
399   // 5 and 6 pass through to 20 and 12 at the end
400   double_butterfly(temp1[6], cospi20_v, temp1[5], cospi12_v, &out[20],
401                    &out[12]);
402   temp2[8] = vec_add(temp1[8], temp1[9]);
403   temp2[9] = vec_sub(temp1[8], temp1[9]);
404   temp2[10] = vec_sub(temp1[11], temp1[10]);
405   temp2[11] = vec_add(temp1[11], temp1[10]);
406   temp2[12] = vec_add(temp1[12], temp1[13]);
407   temp2[13] = vec_sub(temp1[12], temp1[13]);
408   temp2[14] = vec_sub(temp1[15], temp1[14]);
409   temp2[15] = vec_add(temp1[15], temp1[14]);
410 
411   double_butterfly(temp1[30], cospi4_v, temp1[17], cospi28_v, &temp2[30],
412                    &temp2[17]);
413   double_butterfly(temp1[29], cospi28_v, temp1[18], cospi4m_v, &temp2[29],
414                    &temp2[18]);
415   double_butterfly(temp1[26], cospi20_v, temp1[21], cospi12_v, &temp2[26],
416                    &temp2[21]);
417   double_butterfly(temp1[25], cospi12_v, temp1[22], cospi20m_v, &temp2[25],
418                    &temp2[22]);
419 
420   // Stage 7
421   double_butterfly(temp2[15], cospi2_v, temp2[8], cospi30_v, &out[2], &out[30]);
422   double_butterfly(temp2[14], cospi18_v, temp2[9], cospi14_v, &out[18],
423                    &out[14]);
424   double_butterfly(temp2[13], cospi10_v, temp2[10], cospi22_v, &out[10],
425                    &out[22]);
426   double_butterfly(temp2[12], cospi26_v, temp2[11], cospi6_v, &out[26],
427                    &out[6]);
428 
429   temp0[16] = vec_add(temp2[16], temp2[17]);
430   temp0[17] = vec_sub(temp2[16], temp2[17]);
431   temp0[18] = vec_sub(temp2[19], temp2[18]);
432   temp0[19] = vec_add(temp2[19], temp2[18]);
433   temp0[20] = vec_add(temp2[20], temp2[21]);
434   temp0[21] = vec_sub(temp2[20], temp2[21]);
435   temp0[22] = vec_sub(temp2[23], temp2[22]);
436   temp0[23] = vec_add(temp2[23], temp2[22]);
437   temp0[24] = vec_add(temp2[24], temp2[25]);
438   temp0[25] = vec_sub(temp2[24], temp2[25]);
439   temp0[26] = vec_sub(temp2[27], temp2[26]);
440   temp0[27] = vec_add(temp2[27], temp2[26]);
441   temp0[28] = vec_add(temp2[28], temp2[29]);
442   temp0[29] = vec_sub(temp2[28], temp2[29]);
443   temp0[30] = vec_sub(temp2[31], temp2[30]);
444   temp0[31] = vec_add(temp2[31], temp2[30]);
445 
446   // Final stage --- outputs indices are bit-reversed.
447   double_butterfly(temp0[31], cospi1_v, temp0[16], cospi31_v, &out[1],
448                    &out[31]);
449   double_butterfly(temp0[30], cospi17_v, temp0[17], cospi15_v, &out[17],
450                    &out[15]);
451   double_butterfly(temp0[29], cospi9_v, temp0[18], cospi23_v, &out[9],
452                    &out[23]);
453   double_butterfly(temp0[28], cospi25_v, temp0[19], cospi7_v, &out[25],
454                    &out[7]);
455   double_butterfly(temp0[27], cospi5_v, temp0[20], cospi27_v, &out[5],
456                    &out[27]);
457   double_butterfly(temp0[26], cospi21_v, temp0[21], cospi11_v, &out[21],
458                    &out[11]);
459   double_butterfly(temp0[25], cospi13_v, temp0[22], cospi19_v, &out[13],
460                    &out[19]);
461   double_butterfly(temp0[24], cospi29_v, temp0[23], cospi3_v, &out[29],
462                    &out[3]);
463 
464   if (pass == 0) {
465     for (i = 0; i < 32; i++) {
466       out[i] = sub_round_shift(out[i]);
467     }
468   }
469 }
470 
vpx_fdct32x32_rd_vsx(const int16_t * input,tran_low_t * out,int stride)471 void vpx_fdct32x32_rd_vsx(const int16_t *input, tran_low_t *out, int stride) {
472   int16x8_t temp0[32];
473   int16x8_t temp1[32];
474   int16x8_t temp2[32];
475   int16x8_t temp3[32];
476   int16x8_t temp4[32];
477   int16x8_t temp5[32];
478   int16x8_t temp6[32];
479 
480   // Process in 8x32 columns.
481   load(input, stride, temp0);
482   fdct32_vsx(temp0, temp1, 0);
483 
484   load(input + 8, stride, temp0);
485   fdct32_vsx(temp0, temp2, 0);
486 
487   load(input + 16, stride, temp0);
488   fdct32_vsx(temp0, temp3, 0);
489 
490   load(input + 24, stride, temp0);
491   fdct32_vsx(temp0, temp4, 0);
492 
493   // Generate the top row by munging the first set of 8 from each one
494   // together.
495   transpose_8x8(&temp1[0], &temp0[0]);
496   transpose_8x8(&temp2[0], &temp0[8]);
497   transpose_8x8(&temp3[0], &temp0[16]);
498   transpose_8x8(&temp4[0], &temp0[24]);
499 
500   fdct32_vsx(temp0, temp5, 1);
501 
502   transpose_8x8(&temp5[0], &temp6[0]);
503   transpose_8x8(&temp5[8], &temp6[8]);
504   transpose_8x8(&temp5[16], &temp6[16]);
505   transpose_8x8(&temp5[24], &temp6[24]);
506 
507   store(out, temp6);
508 
509   // Second row of 8x32.
510   transpose_8x8(&temp1[8], &temp0[0]);
511   transpose_8x8(&temp2[8], &temp0[8]);
512   transpose_8x8(&temp3[8], &temp0[16]);
513   transpose_8x8(&temp4[8], &temp0[24]);
514 
515   fdct32_vsx(temp0, temp5, 1);
516 
517   transpose_8x8(&temp5[0], &temp6[0]);
518   transpose_8x8(&temp5[8], &temp6[8]);
519   transpose_8x8(&temp5[16], &temp6[16]);
520   transpose_8x8(&temp5[24], &temp6[24]);
521 
522   store(out + 8 * 32, temp6);
523 
524   // Third row of 8x32
525   transpose_8x8(&temp1[16], &temp0[0]);
526   transpose_8x8(&temp2[16], &temp0[8]);
527   transpose_8x8(&temp3[16], &temp0[16]);
528   transpose_8x8(&temp4[16], &temp0[24]);
529 
530   fdct32_vsx(temp0, temp5, 1);
531 
532   transpose_8x8(&temp5[0], &temp6[0]);
533   transpose_8x8(&temp5[8], &temp6[8]);
534   transpose_8x8(&temp5[16], &temp6[16]);
535   transpose_8x8(&temp5[24], &temp6[24]);
536 
537   store(out + 16 * 32, temp6);
538 
539   // Final row of 8x32.
540   transpose_8x8(&temp1[24], &temp0[0]);
541   transpose_8x8(&temp2[24], &temp0[8]);
542   transpose_8x8(&temp3[24], &temp0[16]);
543   transpose_8x8(&temp4[24], &temp0[24]);
544 
545   fdct32_vsx(temp0, temp5, 1);
546 
547   transpose_8x8(&temp5[0], &temp6[0]);
548   transpose_8x8(&temp5[8], &temp6[8]);
549   transpose_8x8(&temp5[16], &temp6[16]);
550   transpose_8x8(&temp5[24], &temp6[24]);
551 
552   store(out + 24 * 32, temp6);
553 }
554