1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <tmmintrin.h>
12 
13 #include "vpx_dsp_rtcd.h"
14 //#include "vpx_dsp/x86/inv_txfm_sse2.h"
15 #include "inv_txfm_ssse3.h"
16 //#include "vpx_dsp/x86/transpose_sse2.h"
17 //#include "vpx_dsp/x86/txfm_common_sse2.h"
18 
partial_butterfly_ssse3(const __m128i in,const int c0,const int c1,__m128i * const out0,__m128i * const out1)19 static INLINE void partial_butterfly_ssse3(const __m128i in, const int c0,
20                                            const int c1, __m128i *const out0,
21                                            __m128i *const out1) {
22   const __m128i cst0 = _mm_set1_epi16(2 * c0);
23   const __m128i cst1 = _mm_set1_epi16(2 * c1);
24   *out0 = _mm_mulhrs_epi16(in, cst0);
25   *out1 = _mm_mulhrs_epi16(in, cst1);
26 }
27 
partial_butterfly_cospi16_ssse3(const __m128i in)28 static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
29   const __m128i coef_pair = _mm_set1_epi16(2 * cospi_16_64);
30   return _mm_mulhrs_epi16(in, coef_pair);
31 }
32 
eb_vp9_idct8x8_12_add_ssse3(const tran_low_t * input,uint8_t * dest,int stride)33 void eb_vp9_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
34                               int stride) {
35   __m128i io[8];
36 
37   io[0] = load_input_data4(input + 0 * 8);
38   io[1] = load_input_data4(input + 1 * 8);
39   io[2] = load_input_data4(input + 2 * 8);
40   io[3] = load_input_data4(input + 3 * 8);
41 
42   idct8x8_12_add_kernel_ssse3(io);
43   write_buffer_8x8(io, dest, stride);
44 }
45 
46 // Group the coefficient calculation into smaller functions to prevent stack
47 // spillover in 32x32 idct optimizations:
48 // quarter_1: 0-7
49 // quarter_2: 8-15
50 // quarter_3_4: 16-23, 24-31
51 
52 // For each 8x32 block __m128i in[32],
53 // Input with index, 0, 4
54 // output pixels: 0-7 in __m128i out[32]
idct32_34_8x32_quarter_1(const __m128i * const in,__m128i * const out)55 static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
56                                             __m128i *const out /*out[8]*/) {
57   __m128i step1[8], step2[8];
58 
59   // stage 3
60   partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
61 
62   // stage 4
63   step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
64   step2[4] = step1[4];
65   step2[5] = step1[4];
66   step2[6] = step1[7];
67   step2[7] = step1[7];
68 
69   // stage 5
70   step1[0] = step2[0];
71   step1[1] = step2[0];
72   step1[2] = step2[0];
73   step1[3] = step2[0];
74   step1[4] = step2[4];
75   butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
76   step1[7] = step2[7];
77 
78   // stage 6
79   out[0] = _mm_add_epi16(step1[0], step1[7]);
80   out[1] = _mm_add_epi16(step1[1], step1[6]);
81   out[2] = _mm_add_epi16(step1[2], step1[5]);
82   out[3] = _mm_add_epi16(step1[3], step1[4]);
83   out[4] = _mm_sub_epi16(step1[3], step1[4]);
84   out[5] = _mm_sub_epi16(step1[2], step1[5]);
85   out[6] = _mm_sub_epi16(step1[1], step1[6]);
86   out[7] = _mm_sub_epi16(step1[0], step1[7]);
87 }
88 
89 // For each 8x32 block __m128i in[32],
90 // Input with index, 2, 6
91 // output pixels: 8-15 in __m128i out[32]
idct32_34_8x32_quarter_2(const __m128i * const in,__m128i * const out)92 static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
93                                             __m128i *const out /*out[16]*/) {
94   __m128i step1[16], step2[16];
95 
96   // stage 2
97   partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
98                           &step2[15]);
99   partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
100                           &step2[12]);
101 
102   // stage 3
103   step1[8] = step2[8];
104   step1[9] = step2[8];
105   step1[14] = step2[15];
106   step1[15] = step2[15];
107   step1[10] = step2[11];
108   step1[11] = step2[11];
109   step1[12] = step2[12];
110   step1[13] = step2[12];
111 
112   idct32_8x32_quarter_2_stage_4_to_6(step1, out);
113 }
114 
idct32_34_8x32_quarter_1_2(const __m128i * const in,__m128i * const out)115 static INLINE void idct32_34_8x32_quarter_1_2(
116     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
117   __m128i temp[16];
118   idct32_34_8x32_quarter_1(in, temp);
119   idct32_34_8x32_quarter_2(in, temp);
120   // stage 7
121   add_sub_butterfly(temp, out, 16);
122 }
123 
124 // For each 8x32 block __m128i in[32],
125 // Input with odd index, 1, 3, 5, 7
126 // output pixels: 16-23, 24-31 in __m128i out[32]
idct32_34_8x32_quarter_3_4(const __m128i * const in,__m128i * const out)127 static INLINE void idct32_34_8x32_quarter_3_4(
128     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
129   __m128i step1[32];
130 
131   // stage 1
132   partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
133                           &step1[31]);
134   partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
135                           &step1[28]);
136   partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
137                           &step1[27]);
138   partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
139                           &step1[24]);
140 
141   // stage 3
142   butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
143             &step1[30]);
144   butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
145             &step1[29]);
146   butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
147             &step1[26]);
148   butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
149             &step1[25]);
150 
151   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
152 }
153 
eb_vp9_idct32_34_8x32_ssse3(const __m128i * const in,__m128i * const out)154 void eb_vp9_idct32_34_8x32_ssse3(const __m128i *const in /*in[32]*/,
155                           __m128i *const out /*out[32]*/) {
156   __m128i temp[32];
157 
158   idct32_34_8x32_quarter_1_2(in, temp);
159   idct32_34_8x32_quarter_3_4(in, temp);
160   // final stage
161   add_sub_butterfly(temp, out, 32);
162 }
163 
164 // For each 8x32 block __m128i in[32],
165 // Input with index, 0, 4, 8, 12
166 // output pixels: 0-7 in __m128i out[32]
idct32_135_8x32_quarter_1(const __m128i * const in,__m128i * const out)167 static INLINE void idct32_135_8x32_quarter_1(const __m128i *const in /*in[32]*/,
168                                              __m128i *const out /*out[8]*/) {
169   __m128i step1[8], step2[8];
170 
171   // stage 3
172   partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
173   partial_butterfly_ssse3(in[12], -cospi_20_64, cospi_12_64, &step1[5],
174                           &step1[6]);
175 
176   // stage 4
177   step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
178   partial_butterfly_ssse3(in[8], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
179   step2[4] = _mm_add_epi16(step1[4], step1[5]);
180   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
181   step2[6] = _mm_sub_epi16(step1[7], step1[6]);
182   step2[7] = _mm_add_epi16(step1[7], step1[6]);
183 
184   // stage 5
185   step1[0] = _mm_add_epi16(step2[0], step2[3]);
186   step1[1] = _mm_add_epi16(step2[0], step2[2]);
187   step1[2] = _mm_sub_epi16(step2[0], step2[2]);
188   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
189   step1[4] = step2[4];
190   butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
191   step1[7] = step2[7];
192 
193   // stage 6
194   out[0] = _mm_add_epi16(step1[0], step1[7]);
195   out[1] = _mm_add_epi16(step1[1], step1[6]);
196   out[2] = _mm_add_epi16(step1[2], step1[5]);
197   out[3] = _mm_add_epi16(step1[3], step1[4]);
198   out[4] = _mm_sub_epi16(step1[3], step1[4]);
199   out[5] = _mm_sub_epi16(step1[2], step1[5]);
200   out[6] = _mm_sub_epi16(step1[1], step1[6]);
201   out[7] = _mm_sub_epi16(step1[0], step1[7]);
202 }
203 
204 // For each 8x32 block __m128i in[32],
205 // Input with index, 2, 6, 10, 14
206 // output pixels: 8-15 in __m128i out[32]
idct32_135_8x32_quarter_2(const __m128i * const in,__m128i * const out)207 static INLINE void idct32_135_8x32_quarter_2(const __m128i *const in /*in[32]*/,
208                                              __m128i *const out /*out[16]*/) {
209   __m128i step1[16], step2[16];
210 
211   // stage 2
212   partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
213                           &step2[15]);
214   partial_butterfly_ssse3(in[14], -cospi_18_64, cospi_14_64, &step2[9],
215                           &step2[14]);
216   partial_butterfly_ssse3(in[10], cospi_22_64, cospi_10_64, &step2[10],
217                           &step2[13]);
218   partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
219                           &step2[12]);
220 
221   // stage 3
222   step1[8] = _mm_add_epi16(step2[8], step2[9]);
223   step1[9] = _mm_sub_epi16(step2[8], step2[9]);
224   step1[10] = _mm_sub_epi16(step2[11], step2[10]);
225   step1[11] = _mm_add_epi16(step2[11], step2[10]);
226   step1[12] = _mm_add_epi16(step2[12], step2[13]);
227   step1[13] = _mm_sub_epi16(step2[12], step2[13]);
228   step1[14] = _mm_sub_epi16(step2[15], step2[14]);
229   step1[15] = _mm_add_epi16(step2[15], step2[14]);
230 
231   idct32_8x32_quarter_2_stage_4_to_6(step1, out);
232 }
233 
idct32_135_8x32_quarter_1_2(const __m128i * const in,__m128i * const out)234 static INLINE void idct32_135_8x32_quarter_1_2(
235     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
236   __m128i temp[16];
237   idct32_135_8x32_quarter_1(in, temp);
238   idct32_135_8x32_quarter_2(in, temp);
239   // stage 7
240   add_sub_butterfly(temp, out, 16);
241 }
242 
243 // For each 8x32 block __m128i in[32],
244 // Input with odd index,
245 // 1, 3, 5, 7, 9, 11, 13, 15
246 // output pixels: 16-23, 24-31 in __m128i out[32]
idct32_135_8x32_quarter_3_4(const __m128i * const in,__m128i * const out)247 static INLINE void idct32_135_8x32_quarter_3_4(
248     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
249   __m128i step1[32], step2[32];
250 
251   // stage 1
252   partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
253                           &step1[31]);
254   partial_butterfly_ssse3(in[15], -cospi_17_64, cospi_15_64, &step1[17],
255                           &step1[30]);
256   partial_butterfly_ssse3(in[9], cospi_23_64, cospi_9_64, &step1[18],
257                           &step1[29]);
258   partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
259                           &step1[28]);
260 
261   partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
262                           &step1[27]);
263   partial_butterfly_ssse3(in[11], -cospi_21_64, cospi_11_64, &step1[21],
264                           &step1[26]);
265 
266   partial_butterfly_ssse3(in[13], cospi_19_64, cospi_13_64, &step1[22],
267                           &step1[25]);
268   partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
269                           &step1[24]);
270 
271   // stage 2
272   step2[16] = _mm_add_epi16(step1[16], step1[17]);
273   step2[17] = _mm_sub_epi16(step1[16], step1[17]);
274   step2[18] = _mm_sub_epi16(step1[19], step1[18]);
275   step2[19] = _mm_add_epi16(step1[19], step1[18]);
276   step2[20] = _mm_add_epi16(step1[20], step1[21]);
277   step2[21] = _mm_sub_epi16(step1[20], step1[21]);
278   step2[22] = _mm_sub_epi16(step1[23], step1[22]);
279   step2[23] = _mm_add_epi16(step1[23], step1[22]);
280 
281   step2[24] = _mm_add_epi16(step1[24], step1[25]);
282   step2[25] = _mm_sub_epi16(step1[24], step1[25]);
283   step2[26] = _mm_sub_epi16(step1[27], step1[26]);
284   step2[27] = _mm_add_epi16(step1[27], step1[26]);
285   step2[28] = _mm_add_epi16(step1[28], step1[29]);
286   step2[29] = _mm_sub_epi16(step1[28], step1[29]);
287   step2[30] = _mm_sub_epi16(step1[31], step1[30]);
288   step2[31] = _mm_add_epi16(step1[31], step1[30]);
289 
290   // stage 3
291   step1[16] = step2[16];
292   step1[31] = step2[31];
293   butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
294             &step1[30]);
295   butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
296             &step1[29]);
297   step1[19] = step2[19];
298   step1[20] = step2[20];
299   butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
300             &step1[26]);
301   butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
302             &step1[25]);
303   step1[23] = step2[23];
304   step1[24] = step2[24];
305   step1[27] = step2[27];
306   step1[28] = step2[28];
307 
308   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
309 }
310 
eb_vp9_idct32_135_8x32_ssse3(const __m128i * const in,__m128i * const out)311 void eb_vp9_idct32_135_8x32_ssse3(const __m128i *const in /*in[32]*/,
312                            __m128i *const out /*out[32]*/) {
313   __m128i temp[32];
314   idct32_135_8x32_quarter_1_2(in, temp);
315   idct32_135_8x32_quarter_3_4(in, temp);
316   // final stage
317   add_sub_butterfly(temp, out, 32);
318 }
319