1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11 #include <assert.h>
12 #include <smmintrin.h> /* SSE4.1 */
13
14 #include "config/aom_config.h"
15 #include "config/av1_rtcd.h"
16
17 #include "av1/common/av1_txfm.h"
18 #include "av1/common/x86/highbd_txfm_utility_sse4.h"
19 #include "av1/encoder/av1_fwd_txfm1d_cfg.h"
20 #include "av1/encoder/x86/av1_txfm1d_sse4.h"
21 #include "aom_dsp/txfm_common.h"
22 #include "aom_dsp/x86/txfm_common_sse2.h"
23 #include "aom_ports/mem.h"
24
load_buffer_4x4(const int16_t * input,__m128i * in,int stride,int flipud,int fliplr,int shift)25 static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
26 int stride, int flipud, int fliplr,
27 int shift) {
28 if (!flipud) {
29 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
30 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
31 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
32 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
33 } else {
34 in[0] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
35 in[1] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
36 in[2] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
37 in[3] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
38 }
39
40 if (fliplr) {
41 in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
42 in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
43 in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
44 in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
45 }
46
47 in[0] = _mm_cvtepi16_epi32(in[0]);
48 in[1] = _mm_cvtepi16_epi32(in[1]);
49 in[2] = _mm_cvtepi16_epi32(in[2]);
50 in[3] = _mm_cvtepi16_epi32(in[3]);
51
52 in[0] = _mm_slli_epi32(in[0], shift);
53 in[1] = _mm_slli_epi32(in[1], shift);
54 in[2] = _mm_slli_epi32(in[2], shift);
55 in[3] = _mm_slli_epi32(in[3], shift);
56 }
57
58 // We only use stage-2 bit;
59 // shift[0] is used in load_buffer_4x4()
60 // shift[1] is used in txfm_func_col()
61 // shift[2] is used in txfm_func_row()
fdct4x4_sse4_1(__m128i * in,int bit)62 static void fdct4x4_sse4_1(__m128i *in, int bit) {
63 const int32_t *cospi = cospi_arr(bit);
64 const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
65 const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
66 const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
67 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
68 __m128i s0, s1, s2, s3;
69 __m128i u0, u1, u2, u3;
70 __m128i v0, v1, v2, v3;
71
72 s0 = _mm_add_epi32(in[0], in[3]);
73 s1 = _mm_add_epi32(in[1], in[2]);
74 s2 = _mm_sub_epi32(in[1], in[2]);
75 s3 = _mm_sub_epi32(in[0], in[3]);
76
77 // btf_32_sse4_1_type0(cospi32, cospi32, s[01], u[02], bit);
78 u0 = _mm_mullo_epi32(s0, cospi32);
79 u1 = _mm_mullo_epi32(s1, cospi32);
80 u2 = _mm_add_epi32(u0, u1);
81 v0 = _mm_sub_epi32(u0, u1);
82
83 u3 = _mm_add_epi32(u2, rnding);
84 v1 = _mm_add_epi32(v0, rnding);
85
86 u0 = _mm_srai_epi32(u3, bit);
87 u2 = _mm_srai_epi32(v1, bit);
88
89 // btf_32_sse4_1_type1(cospi48, cospi16, s[23], u[13], bit);
90 v0 = _mm_mullo_epi32(s2, cospi48);
91 v1 = _mm_mullo_epi32(s3, cospi16);
92 v2 = _mm_add_epi32(v0, v1);
93
94 v3 = _mm_add_epi32(v2, rnding);
95 u1 = _mm_srai_epi32(v3, bit);
96
97 v0 = _mm_mullo_epi32(s2, cospi16);
98 v1 = _mm_mullo_epi32(s3, cospi48);
99 v2 = _mm_sub_epi32(v1, v0);
100
101 v3 = _mm_add_epi32(v2, rnding);
102 u3 = _mm_srai_epi32(v3, bit);
103
104 // Note: shift[1] and shift[2] are zeros
105
106 // Transpose 4x4 32-bit
107 v0 = _mm_unpacklo_epi32(u0, u1);
108 v1 = _mm_unpackhi_epi32(u0, u1);
109 v2 = _mm_unpacklo_epi32(u2, u3);
110 v3 = _mm_unpackhi_epi32(u2, u3);
111
112 in[0] = _mm_unpacklo_epi64(v0, v2);
113 in[1] = _mm_unpackhi_epi64(v0, v2);
114 in[2] = _mm_unpacklo_epi64(v1, v3);
115 in[3] = _mm_unpackhi_epi64(v1, v3);
116 }
117
write_buffer_4x4(__m128i * res,int32_t * output)118 static INLINE void write_buffer_4x4(__m128i *res, int32_t *output) {
119 _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
120 _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
121 _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
122 _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
123 }
124
fadst4x4_sse4_1(__m128i * in,int bit)125 static void fadst4x4_sse4_1(__m128i *in, int bit) {
126 const int32_t *sinpi = sinpi_arr(bit);
127 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
128 const __m128i sinpi1 = _mm_set1_epi32((int)sinpi[1]);
129 const __m128i sinpi2 = _mm_set1_epi32((int)sinpi[2]);
130 const __m128i sinpi3 = _mm_set1_epi32((int)sinpi[3]);
131 const __m128i sinpi4 = _mm_set1_epi32((int)sinpi[4]);
132 __m128i t;
133 __m128i s0, s1, s2, s3, s4, s5, s6, s7;
134 __m128i x0, x1, x2, x3;
135 __m128i u0, u1, u2, u3;
136 __m128i v0, v1, v2, v3;
137
138 s0 = _mm_mullo_epi32(in[0], sinpi1);
139 s1 = _mm_mullo_epi32(in[0], sinpi4);
140 s2 = _mm_mullo_epi32(in[1], sinpi2);
141 s3 = _mm_mullo_epi32(in[1], sinpi1);
142 s4 = _mm_mullo_epi32(in[2], sinpi3);
143 s5 = _mm_mullo_epi32(in[3], sinpi4);
144 s6 = _mm_mullo_epi32(in[3], sinpi2);
145 t = _mm_add_epi32(in[0], in[1]);
146 s7 = _mm_sub_epi32(t, in[3]);
147
148 t = _mm_add_epi32(s0, s2);
149 x0 = _mm_add_epi32(t, s5);
150 x1 = _mm_mullo_epi32(s7, sinpi3);
151 t = _mm_sub_epi32(s1, s3);
152 x2 = _mm_add_epi32(t, s6);
153 x3 = s4;
154
155 s0 = _mm_add_epi32(x0, x3);
156 s1 = x1;
157 s2 = _mm_sub_epi32(x2, x3);
158 t = _mm_sub_epi32(x2, x0);
159 s3 = _mm_add_epi32(t, x3);
160
161 u0 = _mm_add_epi32(s0, rnding);
162 u0 = _mm_srai_epi32(u0, bit);
163
164 u1 = _mm_add_epi32(s1, rnding);
165 u1 = _mm_srai_epi32(u1, bit);
166
167 u2 = _mm_add_epi32(s2, rnding);
168 u2 = _mm_srai_epi32(u2, bit);
169
170 u3 = _mm_add_epi32(s3, rnding);
171 u3 = _mm_srai_epi32(u3, bit);
172
173 v0 = _mm_unpacklo_epi32(u0, u1);
174 v1 = _mm_unpackhi_epi32(u0, u1);
175 v2 = _mm_unpacklo_epi32(u2, u3);
176 v3 = _mm_unpackhi_epi32(u2, u3);
177
178 in[0] = _mm_unpacklo_epi64(v0, v2);
179 in[1] = _mm_unpackhi_epi64(v0, v2);
180 in[2] = _mm_unpacklo_epi64(v1, v3);
181 in[3] = _mm_unpackhi_epi64(v1, v3);
182 }
183
av1_fwd_txfm2d_4x4_sse4_1(const int16_t * input,int32_t * coeff,int input_stride,TX_TYPE tx_type,int bd)184 void av1_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
185 int input_stride, TX_TYPE tx_type, int bd) {
186 __m128i in[4];
187 const int8_t *shift = fwd_txfm_shift_ls[TX_4X4];
188 const int txw_idx = get_txw_idx(TX_4X4);
189 const int txh_idx = get_txh_idx(TX_4X4);
190
191 switch (tx_type) {
192 case DCT_DCT:
193 load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
194 fdct4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
195 fdct4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
196 write_buffer_4x4(in, coeff);
197 break;
198 case ADST_DCT:
199 load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
200 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
201 fdct4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
202 write_buffer_4x4(in, coeff);
203 break;
204 case DCT_ADST:
205 load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
206 fdct4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
207 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
208 write_buffer_4x4(in, coeff);
209 break;
210 case ADST_ADST:
211 load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
212 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
213 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
214 write_buffer_4x4(in, coeff);
215 break;
216 case FLIPADST_DCT:
217 load_buffer_4x4(input, in, input_stride, 1, 0, shift[0]);
218 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
219 fdct4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
220 write_buffer_4x4(in, coeff);
221 break;
222 case DCT_FLIPADST:
223 load_buffer_4x4(input, in, input_stride, 0, 1, shift[0]);
224 fdct4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
225 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
226 write_buffer_4x4(in, coeff);
227 break;
228 case FLIPADST_FLIPADST:
229 load_buffer_4x4(input, in, input_stride, 1, 1, shift[0]);
230 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
231 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
232 write_buffer_4x4(in, coeff);
233 break;
234 case ADST_FLIPADST:
235 load_buffer_4x4(input, in, input_stride, 0, 1, shift[0]);
236 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
237 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
238 write_buffer_4x4(in, coeff);
239 break;
240 case FLIPADST_ADST:
241 load_buffer_4x4(input, in, input_stride, 1, 0, shift[0]);
242 fadst4x4_sse4_1(in, fwd_cos_bit_col[txw_idx][txh_idx]);
243 fadst4x4_sse4_1(in, fwd_cos_bit_row[txw_idx][txh_idx]);
244 write_buffer_4x4(in, coeff);
245 break;
246 default: assert(0);
247 }
248 (void)bd;
249 }
250
load_buffer_8x8(const int16_t * input,__m128i * in,int stride,int flipud,int fliplr,int shift)251 static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
252 int stride, int flipud, int fliplr,
253 int shift) {
254 __m128i u;
255 if (!flipud) {
256 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
257 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
258 in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
259 in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
260 in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
261 in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
262 in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
263 in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
264 } else {
265 in[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
266 in[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
267 in[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
268 in[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
269 in[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
270 in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
271 in[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
272 in[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
273 }
274
275 if (fliplr) {
276 in[0] = mm_reverse_epi16(in[0]);
277 in[1] = mm_reverse_epi16(in[1]);
278 in[2] = mm_reverse_epi16(in[2]);
279 in[3] = mm_reverse_epi16(in[3]);
280 in[4] = mm_reverse_epi16(in[4]);
281 in[5] = mm_reverse_epi16(in[5]);
282 in[6] = mm_reverse_epi16(in[6]);
283 in[7] = mm_reverse_epi16(in[7]);
284 }
285
286 u = _mm_unpackhi_epi64(in[4], in[4]);
287 in[8] = _mm_cvtepi16_epi32(in[4]);
288 in[9] = _mm_cvtepi16_epi32(u);
289
290 u = _mm_unpackhi_epi64(in[5], in[5]);
291 in[10] = _mm_cvtepi16_epi32(in[5]);
292 in[11] = _mm_cvtepi16_epi32(u);
293
294 u = _mm_unpackhi_epi64(in[6], in[6]);
295 in[12] = _mm_cvtepi16_epi32(in[6]);
296 in[13] = _mm_cvtepi16_epi32(u);
297
298 u = _mm_unpackhi_epi64(in[7], in[7]);
299 in[14] = _mm_cvtepi16_epi32(in[7]);
300 in[15] = _mm_cvtepi16_epi32(u);
301
302 u = _mm_unpackhi_epi64(in[3], in[3]);
303 in[6] = _mm_cvtepi16_epi32(in[3]);
304 in[7] = _mm_cvtepi16_epi32(u);
305
306 u = _mm_unpackhi_epi64(in[2], in[2]);
307 in[4] = _mm_cvtepi16_epi32(in[2]);
308 in[5] = _mm_cvtepi16_epi32(u);
309
310 u = _mm_unpackhi_epi64(in[1], in[1]);
311 in[2] = _mm_cvtepi16_epi32(in[1]);
312 in[3] = _mm_cvtepi16_epi32(u);
313
314 u = _mm_unpackhi_epi64(in[0], in[0]);
315 in[0] = _mm_cvtepi16_epi32(in[0]);
316 in[1] = _mm_cvtepi16_epi32(u);
317
318 in[0] = _mm_slli_epi32(in[0], shift);
319 in[1] = _mm_slli_epi32(in[1], shift);
320 in[2] = _mm_slli_epi32(in[2], shift);
321 in[3] = _mm_slli_epi32(in[3], shift);
322 in[4] = _mm_slli_epi32(in[4], shift);
323 in[5] = _mm_slli_epi32(in[5], shift);
324 in[6] = _mm_slli_epi32(in[6], shift);
325 in[7] = _mm_slli_epi32(in[7], shift);
326
327 in[8] = _mm_slli_epi32(in[8], shift);
328 in[9] = _mm_slli_epi32(in[9], shift);
329 in[10] = _mm_slli_epi32(in[10], shift);
330 in[11] = _mm_slli_epi32(in[11], shift);
331 in[12] = _mm_slli_epi32(in[12], shift);
332 in[13] = _mm_slli_epi32(in[13], shift);
333 in[14] = _mm_slli_epi32(in[14], shift);
334 in[15] = _mm_slli_epi32(in[15], shift);
335 }
336
col_txfm_8x8_rounding(__m128i * in,int shift)337 static INLINE void col_txfm_8x8_rounding(__m128i *in, int shift) {
338 const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
339
340 in[0] = _mm_add_epi32(in[0], rounding);
341 in[1] = _mm_add_epi32(in[1], rounding);
342 in[2] = _mm_add_epi32(in[2], rounding);
343 in[3] = _mm_add_epi32(in[3], rounding);
344 in[4] = _mm_add_epi32(in[4], rounding);
345 in[5] = _mm_add_epi32(in[5], rounding);
346 in[6] = _mm_add_epi32(in[6], rounding);
347 in[7] = _mm_add_epi32(in[7], rounding);
348 in[8] = _mm_add_epi32(in[8], rounding);
349 in[9] = _mm_add_epi32(in[9], rounding);
350 in[10] = _mm_add_epi32(in[10], rounding);
351 in[11] = _mm_add_epi32(in[11], rounding);
352 in[12] = _mm_add_epi32(in[12], rounding);
353 in[13] = _mm_add_epi32(in[13], rounding);
354 in[14] = _mm_add_epi32(in[14], rounding);
355 in[15] = _mm_add_epi32(in[15], rounding);
356
357 in[0] = _mm_srai_epi32(in[0], shift);
358 in[1] = _mm_srai_epi32(in[1], shift);
359 in[2] = _mm_srai_epi32(in[2], shift);
360 in[3] = _mm_srai_epi32(in[3], shift);
361 in[4] = _mm_srai_epi32(in[4], shift);
362 in[5] = _mm_srai_epi32(in[5], shift);
363 in[6] = _mm_srai_epi32(in[6], shift);
364 in[7] = _mm_srai_epi32(in[7], shift);
365 in[8] = _mm_srai_epi32(in[8], shift);
366 in[9] = _mm_srai_epi32(in[9], shift);
367 in[10] = _mm_srai_epi32(in[10], shift);
368 in[11] = _mm_srai_epi32(in[11], shift);
369 in[12] = _mm_srai_epi32(in[12], shift);
370 in[13] = _mm_srai_epi32(in[13], shift);
371 in[14] = _mm_srai_epi32(in[14], shift);
372 in[15] = _mm_srai_epi32(in[15], shift);
373 }
374
write_buffer_8x8(const __m128i * res,int32_t * output)375 static INLINE void write_buffer_8x8(const __m128i *res, int32_t *output) {
376 _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
377 _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
378 _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
379 _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
380
381 _mm_store_si128((__m128i *)(output + 4 * 4), res[4]);
382 _mm_store_si128((__m128i *)(output + 5 * 4), res[5]);
383 _mm_store_si128((__m128i *)(output + 6 * 4), res[6]);
384 _mm_store_si128((__m128i *)(output + 7 * 4), res[7]);
385
386 _mm_store_si128((__m128i *)(output + 8 * 4), res[8]);
387 _mm_store_si128((__m128i *)(output + 9 * 4), res[9]);
388 _mm_store_si128((__m128i *)(output + 10 * 4), res[10]);
389 _mm_store_si128((__m128i *)(output + 11 * 4), res[11]);
390
391 _mm_store_si128((__m128i *)(output + 12 * 4), res[12]);
392 _mm_store_si128((__m128i *)(output + 13 * 4), res[13]);
393 _mm_store_si128((__m128i *)(output + 14 * 4), res[14]);
394 _mm_store_si128((__m128i *)(output + 15 * 4), res[15]);
395 }
396
write_buffer_16x8(const __m128i * res,int32_t * output,const int stride)397 static INLINE void write_buffer_16x8(const __m128i *res, int32_t *output,
398 const int stride) {
399 _mm_storeu_si128((__m128i *)(output), res[0]);
400 _mm_storeu_si128((__m128i *)(output + 4), res[1]);
401 _mm_storeu_si128((__m128i *)(output + stride), res[2]);
402 _mm_storeu_si128((__m128i *)(output + stride + 4), res[3]);
403
404 _mm_storeu_si128((__m128i *)(output + (stride * 2)), res[4]);
405 _mm_storeu_si128((__m128i *)(output + (stride * 2) + 4), res[5]);
406 _mm_storeu_si128((__m128i *)(output + (stride * 3)), res[6]);
407 _mm_storeu_si128((__m128i *)(output + (stride * 3) + 4), res[7]);
408
409 _mm_storeu_si128((__m128i *)(output + (stride * 4)), res[8]);
410 _mm_storeu_si128((__m128i *)(output + (stride * 4) + 4), res[9]);
411 _mm_storeu_si128((__m128i *)(output + (stride * 5)), res[10]);
412 _mm_storeu_si128((__m128i *)(output + (stride * 5) + 4), res[11]);
413
414 _mm_storeu_si128((__m128i *)(output + (stride * 6)), res[12]);
415 _mm_storeu_si128((__m128i *)(output + (stride * 6) + 4), res[13]);
416 _mm_storeu_si128((__m128i *)(output + (stride * 7)), res[14]);
417 _mm_storeu_si128((__m128i *)(output + (stride * 7) + 4), res[15]);
418 }
419
fdct8x8_sse4_1(__m128i * in,__m128i * out,int bit,const int col_num)420 static void fdct8x8_sse4_1(__m128i *in, __m128i *out, int bit,
421 const int col_num) {
422 (void)(col_num);
423 const int32_t *cospi = cospi_arr(bit);
424 const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
425 const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
426 const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
427 const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
428 const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
429 const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
430 const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
431 const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
432 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
433 __m128i u[8], v[8];
434
435 // Even 8 points 0, 2, ..., 14
436 // stage 0
437 // stage 1
438 u[0] = _mm_add_epi32(in[0], in[14]);
439 v[7] = _mm_sub_epi32(in[0], in[14]); // v[7]
440 u[1] = _mm_add_epi32(in[2], in[12]);
441 u[6] = _mm_sub_epi32(in[2], in[12]);
442 u[2] = _mm_add_epi32(in[4], in[10]);
443 u[5] = _mm_sub_epi32(in[4], in[10]);
444 u[3] = _mm_add_epi32(in[6], in[8]);
445 v[4] = _mm_sub_epi32(in[6], in[8]); // v[4]
446
447 // stage 2
448 v[0] = _mm_add_epi32(u[0], u[3]);
449 v[3] = _mm_sub_epi32(u[0], u[3]);
450 v[1] = _mm_add_epi32(u[1], u[2]);
451 v[2] = _mm_sub_epi32(u[1], u[2]);
452
453 v[5] = _mm_mullo_epi32(u[5], cospim32);
454 v[6] = _mm_mullo_epi32(u[6], cospi32);
455 v[5] = _mm_add_epi32(v[5], v[6]);
456 v[5] = _mm_add_epi32(v[5], rnding);
457 v[5] = _mm_srai_epi32(v[5], bit);
458
459 u[0] = _mm_mullo_epi32(u[5], cospi32);
460 v[6] = _mm_mullo_epi32(u[6], cospim32);
461 v[6] = _mm_sub_epi32(u[0], v[6]);
462 v[6] = _mm_add_epi32(v[6], rnding);
463 v[6] = _mm_srai_epi32(v[6], bit);
464
465 // stage 3
466 // type 0
467 v[0] = _mm_mullo_epi32(v[0], cospi32);
468 v[1] = _mm_mullo_epi32(v[1], cospi32);
469 u[0] = _mm_add_epi32(v[0], v[1]);
470 u[0] = _mm_add_epi32(u[0], rnding);
471 u[0] = _mm_srai_epi32(u[0], bit);
472
473 u[1] = _mm_sub_epi32(v[0], v[1]);
474 u[1] = _mm_add_epi32(u[1], rnding);
475 u[1] = _mm_srai_epi32(u[1], bit);
476
477 // type 1
478 v[0] = _mm_mullo_epi32(v[2], cospi48);
479 v[1] = _mm_mullo_epi32(v[3], cospi16);
480 u[2] = _mm_add_epi32(v[0], v[1]);
481 u[2] = _mm_add_epi32(u[2], rnding);
482 u[2] = _mm_srai_epi32(u[2], bit);
483
484 v[0] = _mm_mullo_epi32(v[2], cospi16);
485 v[1] = _mm_mullo_epi32(v[3], cospi48);
486 u[3] = _mm_sub_epi32(v[1], v[0]);
487 u[3] = _mm_add_epi32(u[3], rnding);
488 u[3] = _mm_srai_epi32(u[3], bit);
489
490 u[4] = _mm_add_epi32(v[4], v[5]);
491 u[5] = _mm_sub_epi32(v[4], v[5]);
492 u[6] = _mm_sub_epi32(v[7], v[6]);
493 u[7] = _mm_add_epi32(v[7], v[6]);
494
495 // stage 4
496 // stage 5
497 v[0] = _mm_mullo_epi32(u[4], cospi56);
498 v[1] = _mm_mullo_epi32(u[7], cospi8);
499 v[0] = _mm_add_epi32(v[0], v[1]);
500 v[0] = _mm_add_epi32(v[0], rnding);
501 out[2] = _mm_srai_epi32(v[0], bit); // buf0[4]
502
503 v[0] = _mm_mullo_epi32(u[4], cospi8);
504 v[1] = _mm_mullo_epi32(u[7], cospi56);
505 v[0] = _mm_sub_epi32(v[1], v[0]);
506 v[0] = _mm_add_epi32(v[0], rnding);
507 out[14] = _mm_srai_epi32(v[0], bit); // buf0[7]
508
509 v[0] = _mm_mullo_epi32(u[5], cospi24);
510 v[1] = _mm_mullo_epi32(u[6], cospi40);
511 v[0] = _mm_add_epi32(v[0], v[1]);
512 v[0] = _mm_add_epi32(v[0], rnding);
513 out[10] = _mm_srai_epi32(v[0], bit); // buf0[5]
514
515 v[0] = _mm_mullo_epi32(u[5], cospi40);
516 v[1] = _mm_mullo_epi32(u[6], cospi24);
517 v[0] = _mm_sub_epi32(v[1], v[0]);
518 v[0] = _mm_add_epi32(v[0], rnding);
519 out[6] = _mm_srai_epi32(v[0], bit); // buf0[6]
520
521 out[0] = u[0]; // buf0[0]
522 out[8] = u[1]; // buf0[1]
523 out[4] = u[2]; // buf0[2]
524 out[12] = u[3]; // buf0[3]
525
526 // Odd 8 points: 1, 3, ..., 15
527 // stage 0
528 // stage 1
529 u[0] = _mm_add_epi32(in[1], in[15]);
530 v[7] = _mm_sub_epi32(in[1], in[15]); // v[7]
531 u[1] = _mm_add_epi32(in[3], in[13]);
532 u[6] = _mm_sub_epi32(in[3], in[13]);
533 u[2] = _mm_add_epi32(in[5], in[11]);
534 u[5] = _mm_sub_epi32(in[5], in[11]);
535 u[3] = _mm_add_epi32(in[7], in[9]);
536 v[4] = _mm_sub_epi32(in[7], in[9]); // v[4]
537
538 // stage 2
539 v[0] = _mm_add_epi32(u[0], u[3]);
540 v[3] = _mm_sub_epi32(u[0], u[3]);
541 v[1] = _mm_add_epi32(u[1], u[2]);
542 v[2] = _mm_sub_epi32(u[1], u[2]);
543
544 v[5] = _mm_mullo_epi32(u[5], cospim32);
545 v[6] = _mm_mullo_epi32(u[6], cospi32);
546 v[5] = _mm_add_epi32(v[5], v[6]);
547 v[5] = _mm_add_epi32(v[5], rnding);
548 v[5] = _mm_srai_epi32(v[5], bit);
549
550 u[0] = _mm_mullo_epi32(u[5], cospi32);
551 v[6] = _mm_mullo_epi32(u[6], cospim32);
552 v[6] = _mm_sub_epi32(u[0], v[6]);
553 v[6] = _mm_add_epi32(v[6], rnding);
554 v[6] = _mm_srai_epi32(v[6], bit);
555
556 // stage 3
557 // type 0
558 v[0] = _mm_mullo_epi32(v[0], cospi32);
559 v[1] = _mm_mullo_epi32(v[1], cospi32);
560 u[0] = _mm_add_epi32(v[0], v[1]);
561 u[0] = _mm_add_epi32(u[0], rnding);
562 u[0] = _mm_srai_epi32(u[0], bit);
563
564 u[1] = _mm_sub_epi32(v[0], v[1]);
565 u[1] = _mm_add_epi32(u[1], rnding);
566 u[1] = _mm_srai_epi32(u[1], bit);
567
568 // type 1
569 v[0] = _mm_mullo_epi32(v[2], cospi48);
570 v[1] = _mm_mullo_epi32(v[3], cospi16);
571 u[2] = _mm_add_epi32(v[0], v[1]);
572 u[2] = _mm_add_epi32(u[2], rnding);
573 u[2] = _mm_srai_epi32(u[2], bit);
574
575 v[0] = _mm_mullo_epi32(v[2], cospi16);
576 v[1] = _mm_mullo_epi32(v[3], cospi48);
577 u[3] = _mm_sub_epi32(v[1], v[0]);
578 u[3] = _mm_add_epi32(u[3], rnding);
579 u[3] = _mm_srai_epi32(u[3], bit);
580
581 u[4] = _mm_add_epi32(v[4], v[5]);
582 u[5] = _mm_sub_epi32(v[4], v[5]);
583 u[6] = _mm_sub_epi32(v[7], v[6]);
584 u[7] = _mm_add_epi32(v[7], v[6]);
585
586 // stage 4
587 // stage 5
588 v[0] = _mm_mullo_epi32(u[4], cospi56);
589 v[1] = _mm_mullo_epi32(u[7], cospi8);
590 v[0] = _mm_add_epi32(v[0], v[1]);
591 v[0] = _mm_add_epi32(v[0], rnding);
592 out[3] = _mm_srai_epi32(v[0], bit); // buf0[4]
593
594 v[0] = _mm_mullo_epi32(u[4], cospi8);
595 v[1] = _mm_mullo_epi32(u[7], cospi56);
596 v[0] = _mm_sub_epi32(v[1], v[0]);
597 v[0] = _mm_add_epi32(v[0], rnding);
598 out[15] = _mm_srai_epi32(v[0], bit); // buf0[7]
599
600 v[0] = _mm_mullo_epi32(u[5], cospi24);
601 v[1] = _mm_mullo_epi32(u[6], cospi40);
602 v[0] = _mm_add_epi32(v[0], v[1]);
603 v[0] = _mm_add_epi32(v[0], rnding);
604 out[11] = _mm_srai_epi32(v[0], bit); // buf0[5]
605
606 v[0] = _mm_mullo_epi32(u[5], cospi40);
607 v[1] = _mm_mullo_epi32(u[6], cospi24);
608 v[0] = _mm_sub_epi32(v[1], v[0]);
609 v[0] = _mm_add_epi32(v[0], rnding);
610 out[7] = _mm_srai_epi32(v[0], bit); // buf0[6]
611
612 out[1] = u[0]; // buf0[0]
613 out[9] = u[1]; // buf0[1]
614 out[5] = u[2]; // buf0[2]
615 out[13] = u[3]; // buf0[3]
616 }
617
fadst8x8_sse4_1(__m128i * in,__m128i * out,int bit,const int col_num)618 static void fadst8x8_sse4_1(__m128i *in, __m128i *out, int bit,
619 const int col_num) {
620 (void)(col_num);
621 const int32_t *cospi = cospi_arr(bit);
622 const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
623 const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
624 const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
625 const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
626 const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
627 const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
628 const __m128i cospim4 = _mm_set1_epi32(-cospi[4]);
629 const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
630 const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
631 const __m128i cospim20 = _mm_set1_epi32(-cospi[20]);
632 const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
633 const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
634 const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
635 const __m128i cospim36 = _mm_set1_epi32(-cospi[36]);
636 const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
637 const __m128i cospim52 = _mm_set1_epi32(-cospi[52]);
638 const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
639 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
640 const __m128i zero = _mm_setzero_si128();
641 __m128i u0, u1, u2, u3, u4, u5, u6, u7;
642 __m128i v0, v1, v2, v3, v4, v5, v6, v7;
643 __m128i x, y;
644 int col;
645
646 // Note:
647 // Even column: 0, 2, ..., 14
648 // Odd column: 1, 3, ..., 15
649 // one even column plus one odd column constructs one row (8 coeffs)
650 // total we have 8 rows (8x8).
651 for (col = 0; col < 2; ++col) {
652 // stage 0
653 // stage 1
654 u0 = in[2 * 0 + col];
655 u1 = _mm_sub_epi32(zero, in[2 * 7 + col]);
656 u2 = _mm_sub_epi32(zero, in[2 * 3 + col]);
657 u3 = in[2 * 4 + col];
658 u4 = _mm_sub_epi32(zero, in[2 * 1 + col]);
659 u5 = in[2 * 6 + col];
660 u6 = in[2 * 2 + col];
661 u7 = _mm_sub_epi32(zero, in[2 * 5 + col]);
662
663 // stage 2
664 v0 = u0;
665 v1 = u1;
666
667 x = _mm_mullo_epi32(u2, cospi32);
668 y = _mm_mullo_epi32(u3, cospi32);
669 v2 = _mm_add_epi32(x, y);
670 v2 = _mm_add_epi32(v2, rnding);
671 v2 = _mm_srai_epi32(v2, bit);
672
673 v3 = _mm_sub_epi32(x, y);
674 v3 = _mm_add_epi32(v3, rnding);
675 v3 = _mm_srai_epi32(v3, bit);
676
677 v4 = u4;
678 v5 = u5;
679
680 x = _mm_mullo_epi32(u6, cospi32);
681 y = _mm_mullo_epi32(u7, cospi32);
682 v6 = _mm_add_epi32(x, y);
683 v6 = _mm_add_epi32(v6, rnding);
684 v6 = _mm_srai_epi32(v6, bit);
685
686 v7 = _mm_sub_epi32(x, y);
687 v7 = _mm_add_epi32(v7, rnding);
688 v7 = _mm_srai_epi32(v7, bit);
689
690 // stage 3
691 u0 = _mm_add_epi32(v0, v2);
692 u1 = _mm_add_epi32(v1, v3);
693 u2 = _mm_sub_epi32(v0, v2);
694 u3 = _mm_sub_epi32(v1, v3);
695 u4 = _mm_add_epi32(v4, v6);
696 u5 = _mm_add_epi32(v5, v7);
697 u6 = _mm_sub_epi32(v4, v6);
698 u7 = _mm_sub_epi32(v5, v7);
699
700 // stage 4
701 v0 = u0;
702 v1 = u1;
703 v2 = u2;
704 v3 = u3;
705
706 x = _mm_mullo_epi32(u4, cospi16);
707 y = _mm_mullo_epi32(u5, cospi48);
708 v4 = _mm_add_epi32(x, y);
709 v4 = _mm_add_epi32(v4, rnding);
710 v4 = _mm_srai_epi32(v4, bit);
711
712 x = _mm_mullo_epi32(u4, cospi48);
713 y = _mm_mullo_epi32(u5, cospim16);
714 v5 = _mm_add_epi32(x, y);
715 v5 = _mm_add_epi32(v5, rnding);
716 v5 = _mm_srai_epi32(v5, bit);
717
718 x = _mm_mullo_epi32(u6, cospim48);
719 y = _mm_mullo_epi32(u7, cospi16);
720 v6 = _mm_add_epi32(x, y);
721 v6 = _mm_add_epi32(v6, rnding);
722 v6 = _mm_srai_epi32(v6, bit);
723
724 x = _mm_mullo_epi32(u6, cospi16);
725 y = _mm_mullo_epi32(u7, cospi48);
726 v7 = _mm_add_epi32(x, y);
727 v7 = _mm_add_epi32(v7, rnding);
728 v7 = _mm_srai_epi32(v7, bit);
729
730 // stage 5
731 u0 = _mm_add_epi32(v0, v4);
732 u1 = _mm_add_epi32(v1, v5);
733 u2 = _mm_add_epi32(v2, v6);
734 u3 = _mm_add_epi32(v3, v7);
735 u4 = _mm_sub_epi32(v0, v4);
736 u5 = _mm_sub_epi32(v1, v5);
737 u6 = _mm_sub_epi32(v2, v6);
738 u7 = _mm_sub_epi32(v3, v7);
739
740 // stage 6
741 x = _mm_mullo_epi32(u0, cospi4);
742 y = _mm_mullo_epi32(u1, cospi60);
743 v0 = _mm_add_epi32(x, y);
744 v0 = _mm_add_epi32(v0, rnding);
745 v0 = _mm_srai_epi32(v0, bit);
746
747 x = _mm_mullo_epi32(u0, cospi60);
748 y = _mm_mullo_epi32(u1, cospim4);
749 v1 = _mm_add_epi32(x, y);
750 v1 = _mm_add_epi32(v1, rnding);
751 v1 = _mm_srai_epi32(v1, bit);
752
753 x = _mm_mullo_epi32(u2, cospi20);
754 y = _mm_mullo_epi32(u3, cospi44);
755 v2 = _mm_add_epi32(x, y);
756 v2 = _mm_add_epi32(v2, rnding);
757 v2 = _mm_srai_epi32(v2, bit);
758
759 x = _mm_mullo_epi32(u2, cospi44);
760 y = _mm_mullo_epi32(u3, cospim20);
761 v3 = _mm_add_epi32(x, y);
762 v3 = _mm_add_epi32(v3, rnding);
763 v3 = _mm_srai_epi32(v3, bit);
764
765 x = _mm_mullo_epi32(u4, cospi36);
766 y = _mm_mullo_epi32(u5, cospi28);
767 v4 = _mm_add_epi32(x, y);
768 v4 = _mm_add_epi32(v4, rnding);
769 v4 = _mm_srai_epi32(v4, bit);
770
771 x = _mm_mullo_epi32(u4, cospi28);
772 y = _mm_mullo_epi32(u5, cospim36);
773 v5 = _mm_add_epi32(x, y);
774 v5 = _mm_add_epi32(v5, rnding);
775 v5 = _mm_srai_epi32(v5, bit);
776
777 x = _mm_mullo_epi32(u6, cospi52);
778 y = _mm_mullo_epi32(u7, cospi12);
779 v6 = _mm_add_epi32(x, y);
780 v6 = _mm_add_epi32(v6, rnding);
781 v6 = _mm_srai_epi32(v6, bit);
782
783 x = _mm_mullo_epi32(u6, cospi12);
784 y = _mm_mullo_epi32(u7, cospim52);
785 v7 = _mm_add_epi32(x, y);
786 v7 = _mm_add_epi32(v7, rnding);
787 v7 = _mm_srai_epi32(v7, bit);
788
789 // stage 7
790 out[2 * 0 + col] = v1;
791 out[2 * 1 + col] = v6;
792 out[2 * 2 + col] = v3;
793 out[2 * 3 + col] = v4;
794 out[2 * 4 + col] = v5;
795 out[2 * 5 + col] = v2;
796 out[2 * 6 + col] = v7;
797 out[2 * 7 + col] = v0;
798 }
799 }
800
av1_fwd_txfm2d_8x8_sse4_1(const int16_t * input,int32_t * coeff,int stride,TX_TYPE tx_type,int bd)801 void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
802 TX_TYPE tx_type, int bd) {
803 __m128i in[16], out[16];
804 const int8_t *shift = fwd_txfm_shift_ls[TX_8X8];
805 const int txw_idx = get_txw_idx(TX_8X8);
806 const int txh_idx = get_txh_idx(TX_8X8);
807
808 switch (tx_type) {
809 case DCT_DCT:
810 load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
811 fdct8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
812 col_txfm_8x8_rounding(out, -shift[1]);
813 transpose_8x8(out, in);
814 fdct8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
815 transpose_8x8(out, in);
816 write_buffer_8x8(in, coeff);
817 break;
818 case ADST_DCT:
819 load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
820 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
821 col_txfm_8x8_rounding(out, -shift[1]);
822 transpose_8x8(out, in);
823 fdct8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
824 transpose_8x8(out, in);
825 write_buffer_8x8(in, coeff);
826 break;
827 case DCT_ADST:
828 load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
829 fdct8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
830 col_txfm_8x8_rounding(out, -shift[1]);
831 transpose_8x8(out, in);
832 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
833 transpose_8x8(out, in);
834 write_buffer_8x8(in, coeff);
835 break;
836 case ADST_ADST:
837 load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
838 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
839 col_txfm_8x8_rounding(out, -shift[1]);
840 transpose_8x8(out, in);
841 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
842 transpose_8x8(out, in);
843 write_buffer_8x8(in, coeff);
844 break;
845 case FLIPADST_DCT:
846 load_buffer_8x8(input, in, stride, 1, 0, shift[0]);
847 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
848 col_txfm_8x8_rounding(out, -shift[1]);
849 transpose_8x8(out, in);
850 fdct8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
851 transpose_8x8(out, in);
852 write_buffer_8x8(in, coeff);
853 break;
854 case DCT_FLIPADST:
855 load_buffer_8x8(input, in, stride, 0, 1, shift[0]);
856 fdct8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
857 col_txfm_8x8_rounding(out, -shift[1]);
858 transpose_8x8(out, in);
859 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
860 transpose_8x8(out, in);
861 write_buffer_8x8(in, coeff);
862 break;
863 case FLIPADST_FLIPADST:
864 load_buffer_8x8(input, in, stride, 1, 1, shift[0]);
865 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
866 col_txfm_8x8_rounding(out, -shift[1]);
867 transpose_8x8(out, in);
868 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
869 transpose_8x8(out, in);
870 write_buffer_8x8(in, coeff);
871 break;
872 case ADST_FLIPADST:
873 load_buffer_8x8(input, in, stride, 0, 1, shift[0]);
874 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
875 col_txfm_8x8_rounding(out, -shift[1]);
876 transpose_8x8(out, in);
877 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
878 transpose_8x8(out, in);
879 write_buffer_8x8(in, coeff);
880 break;
881 case FLIPADST_ADST:
882 load_buffer_8x8(input, in, stride, 1, 0, shift[0]);
883 fadst8x8_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], 0);
884 col_txfm_8x8_rounding(out, -shift[1]);
885 transpose_8x8(out, in);
886 fadst8x8_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], 0);
887 transpose_8x8(out, in);
888 write_buffer_8x8(in, coeff);
889 break;
890 default: assert(0);
891 }
892 (void)bd;
893 }
894
895 // Hybrid Transform 16x16
896
convert_8x8_to_16x16(const __m128i * in,__m128i * out)897 static INLINE void convert_8x8_to_16x16(const __m128i *in, __m128i *out) {
898 int row_index = 0;
899 int dst_index = 0;
900 int src_index = 0;
901
902 // row 0, 1, .., 7
903 do {
904 out[dst_index] = in[src_index];
905 out[dst_index + 1] = in[src_index + 1];
906 out[dst_index + 2] = in[src_index + 16];
907 out[dst_index + 3] = in[src_index + 17];
908 dst_index += 4;
909 src_index += 2;
910 row_index += 1;
911 } while (row_index < 8);
912
913 // row 8, 9, ..., 15
914 src_index += 16;
915 do {
916 out[dst_index] = in[src_index];
917 out[dst_index + 1] = in[src_index + 1];
918 out[dst_index + 2] = in[src_index + 16];
919 out[dst_index + 3] = in[src_index + 17];
920 dst_index += 4;
921 src_index += 2;
922 row_index += 1;
923 } while (row_index < 16);
924 }
925
load_buffer_16x16(const int16_t * input,__m128i * out,int stride,int flipud,int fliplr,int shift)926 static INLINE void load_buffer_16x16(const int16_t *input, __m128i *out,
927 int stride, int flipud, int fliplr,
928 int shift) {
929 __m128i in[64];
930 // Load 4 8x8 blocks
931 const int16_t *topL = input;
932 const int16_t *topR = input + 8;
933 const int16_t *botL = input + 8 * stride;
934 const int16_t *botR = input + 8 * stride + 8;
935
936 const int16_t *tmp;
937
938 if (flipud) {
939 // Swap left columns
940 tmp = topL;
941 topL = botL;
942 botL = tmp;
943 // Swap right columns
944 tmp = topR;
945 topR = botR;
946 botR = tmp;
947 }
948
949 if (fliplr) {
950 // Swap top rows
951 tmp = topL;
952 topL = topR;
953 topR = tmp;
954 // Swap bottom rows
955 tmp = botL;
956 botL = botR;
957 botR = tmp;
958 }
959
960 // load first 8 columns
961 load_buffer_8x8(topL, &in[0], stride, flipud, fliplr, shift);
962 load_buffer_8x8(botL, &in[32], stride, flipud, fliplr, shift);
963
964 // load second 8 columns
965 load_buffer_8x8(topR, &in[16], stride, flipud, fliplr, shift);
966 load_buffer_8x8(botR, &in[48], stride, flipud, fliplr, shift);
967
968 convert_8x8_to_16x16(in, out);
969 }
970
load_buffer_8x16(const int16_t * input,__m128i * out,int stride,int flipud,int fliplr,int shift)971 static INLINE void load_buffer_8x16(const int16_t *input, __m128i *out,
972 int stride, int flipud, int fliplr,
973 int shift) {
974 const int16_t *topL = input;
975 const int16_t *botL = input + 8 * stride;
976
977 const int16_t *tmp;
978
979 if (flipud) {
980 tmp = topL;
981 topL = botL;
982 botL = tmp;
983 }
984
985 load_buffer_8x8(topL, out, stride, flipud, fliplr, shift);
986 load_buffer_8x8(botL, out + 16, stride, flipud, fliplr, shift);
987 }
988
fdct16x16_sse4_1(__m128i * in,__m128i * out,int bit,const int col_num)989 static void fdct16x16_sse4_1(__m128i *in, __m128i *out, int bit,
990 const int col_num) {
991 const int32_t *cospi = cospi_arr(bit);
992 const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
993 const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
994 const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
995 const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
996 const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
997 const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
998 const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
999 const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
1000 const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
1001 const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
1002 const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
1003 const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
1004 const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
1005 const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
1006 const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
1007 const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
1008 const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
1009 const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
1010 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
1011 __m128i u[16], v[16], x;
1012 int col;
1013
1014 // Calculate the column 0, 1, 2, 3
1015 for (col = 0; col < col_num; ++col) {
1016 // stage 0
1017 // stage 1
1018 u[0] = _mm_add_epi32(in[0 * col_num + col], in[15 * col_num + col]);
1019 u[15] = _mm_sub_epi32(in[0 * col_num + col], in[15 * col_num + col]);
1020 u[1] = _mm_add_epi32(in[1 * col_num + col], in[14 * col_num + col]);
1021 u[14] = _mm_sub_epi32(in[1 * col_num + col], in[14 * col_num + col]);
1022 u[2] = _mm_add_epi32(in[2 * col_num + col], in[13 * col_num + col]);
1023 u[13] = _mm_sub_epi32(in[2 * col_num + col], in[13 * col_num + col]);
1024 u[3] = _mm_add_epi32(in[3 * col_num + col], in[12 * col_num + col]);
1025 u[12] = _mm_sub_epi32(in[3 * col_num + col], in[12 * col_num + col]);
1026 u[4] = _mm_add_epi32(in[4 * col_num + col], in[11 * col_num + col]);
1027 u[11] = _mm_sub_epi32(in[4 * col_num + col], in[11 * col_num + col]);
1028 u[5] = _mm_add_epi32(in[5 * col_num + col], in[10 * col_num + col]);
1029 u[10] = _mm_sub_epi32(in[5 * col_num + col], in[10 * col_num + col]);
1030 u[6] = _mm_add_epi32(in[6 * col_num + col], in[9 * col_num + col]);
1031 u[9] = _mm_sub_epi32(in[6 * col_num + col], in[9 * col_num + col]);
1032 u[7] = _mm_add_epi32(in[7 * col_num + col], in[8 * col_num + col]);
1033 u[8] = _mm_sub_epi32(in[7 * col_num + col], in[8 * col_num + col]);
1034
1035 // stage 2
1036 v[0] = _mm_add_epi32(u[0], u[7]);
1037 v[7] = _mm_sub_epi32(u[0], u[7]);
1038 v[1] = _mm_add_epi32(u[1], u[6]);
1039 v[6] = _mm_sub_epi32(u[1], u[6]);
1040 v[2] = _mm_add_epi32(u[2], u[5]);
1041 v[5] = _mm_sub_epi32(u[2], u[5]);
1042 v[3] = _mm_add_epi32(u[3], u[4]);
1043 v[4] = _mm_sub_epi32(u[3], u[4]);
1044 v[8] = u[8];
1045 v[9] = u[9];
1046
1047 v[10] = _mm_mullo_epi32(u[10], cospim32);
1048 x = _mm_mullo_epi32(u[13], cospi32);
1049 v[10] = _mm_add_epi32(v[10], x);
1050 v[10] = _mm_add_epi32(v[10], rnding);
1051 v[10] = _mm_srai_epi32(v[10], bit);
1052
1053 v[13] = _mm_mullo_epi32(u[10], cospi32);
1054 x = _mm_mullo_epi32(u[13], cospim32);
1055 v[13] = _mm_sub_epi32(v[13], x);
1056 v[13] = _mm_add_epi32(v[13], rnding);
1057 v[13] = _mm_srai_epi32(v[13], bit);
1058
1059 v[11] = _mm_mullo_epi32(u[11], cospim32);
1060 x = _mm_mullo_epi32(u[12], cospi32);
1061 v[11] = _mm_add_epi32(v[11], x);
1062 v[11] = _mm_add_epi32(v[11], rnding);
1063 v[11] = _mm_srai_epi32(v[11], bit);
1064
1065 v[12] = _mm_mullo_epi32(u[11], cospi32);
1066 x = _mm_mullo_epi32(u[12], cospim32);
1067 v[12] = _mm_sub_epi32(v[12], x);
1068 v[12] = _mm_add_epi32(v[12], rnding);
1069 v[12] = _mm_srai_epi32(v[12], bit);
1070 v[14] = u[14];
1071 v[15] = u[15];
1072
1073 // stage 3
1074 u[0] = _mm_add_epi32(v[0], v[3]);
1075 u[3] = _mm_sub_epi32(v[0], v[3]);
1076 u[1] = _mm_add_epi32(v[1], v[2]);
1077 u[2] = _mm_sub_epi32(v[1], v[2]);
1078 u[4] = v[4];
1079
1080 u[5] = _mm_mullo_epi32(v[5], cospim32);
1081 x = _mm_mullo_epi32(v[6], cospi32);
1082 u[5] = _mm_add_epi32(u[5], x);
1083 u[5] = _mm_add_epi32(u[5], rnding);
1084 u[5] = _mm_srai_epi32(u[5], bit);
1085
1086 u[6] = _mm_mullo_epi32(v[5], cospi32);
1087 x = _mm_mullo_epi32(v[6], cospim32);
1088 u[6] = _mm_sub_epi32(u[6], x);
1089 u[6] = _mm_add_epi32(u[6], rnding);
1090 u[6] = _mm_srai_epi32(u[6], bit);
1091
1092 u[7] = v[7];
1093 u[8] = _mm_add_epi32(v[8], v[11]);
1094 u[11] = _mm_sub_epi32(v[8], v[11]);
1095 u[9] = _mm_add_epi32(v[9], v[10]);
1096 u[10] = _mm_sub_epi32(v[9], v[10]);
1097 u[12] = _mm_sub_epi32(v[15], v[12]);
1098 u[15] = _mm_add_epi32(v[15], v[12]);
1099 u[13] = _mm_sub_epi32(v[14], v[13]);
1100 u[14] = _mm_add_epi32(v[14], v[13]);
1101
1102 // stage 4
1103 u[0] = _mm_mullo_epi32(u[0], cospi32);
1104 u[1] = _mm_mullo_epi32(u[1], cospi32);
1105 v[0] = _mm_add_epi32(u[0], u[1]);
1106 v[0] = _mm_add_epi32(v[0], rnding);
1107 v[0] = _mm_srai_epi32(v[0], bit);
1108
1109 v[1] = _mm_sub_epi32(u[0], u[1]);
1110 v[1] = _mm_add_epi32(v[1], rnding);
1111 v[1] = _mm_srai_epi32(v[1], bit);
1112
1113 v[2] = _mm_mullo_epi32(u[2], cospi48);
1114 x = _mm_mullo_epi32(u[3], cospi16);
1115 v[2] = _mm_add_epi32(v[2], x);
1116 v[2] = _mm_add_epi32(v[2], rnding);
1117 v[2] = _mm_srai_epi32(v[2], bit);
1118
1119 v[3] = _mm_mullo_epi32(u[2], cospi16);
1120 x = _mm_mullo_epi32(u[3], cospi48);
1121 v[3] = _mm_sub_epi32(x, v[3]);
1122 v[3] = _mm_add_epi32(v[3], rnding);
1123 v[3] = _mm_srai_epi32(v[3], bit);
1124
1125 v[4] = _mm_add_epi32(u[4], u[5]);
1126 v[5] = _mm_sub_epi32(u[4], u[5]);
1127 v[6] = _mm_sub_epi32(u[7], u[6]);
1128 v[7] = _mm_add_epi32(u[7], u[6]);
1129 v[8] = u[8];
1130
1131 v[9] = _mm_mullo_epi32(u[9], cospim16);
1132 x = _mm_mullo_epi32(u[14], cospi48);
1133 v[9] = _mm_add_epi32(v[9], x);
1134 v[9] = _mm_add_epi32(v[9], rnding);
1135 v[9] = _mm_srai_epi32(v[9], bit);
1136
1137 v[14] = _mm_mullo_epi32(u[9], cospi48);
1138 x = _mm_mullo_epi32(u[14], cospim16);
1139 v[14] = _mm_sub_epi32(v[14], x);
1140 v[14] = _mm_add_epi32(v[14], rnding);
1141 v[14] = _mm_srai_epi32(v[14], bit);
1142
1143 v[10] = _mm_mullo_epi32(u[10], cospim48);
1144 x = _mm_mullo_epi32(u[13], cospim16);
1145 v[10] = _mm_add_epi32(v[10], x);
1146 v[10] = _mm_add_epi32(v[10], rnding);
1147 v[10] = _mm_srai_epi32(v[10], bit);
1148
1149 v[13] = _mm_mullo_epi32(u[10], cospim16);
1150 x = _mm_mullo_epi32(u[13], cospim48);
1151 v[13] = _mm_sub_epi32(v[13], x);
1152 v[13] = _mm_add_epi32(v[13], rnding);
1153 v[13] = _mm_srai_epi32(v[13], bit);
1154
1155 v[11] = u[11];
1156 v[12] = u[12];
1157 v[15] = u[15];
1158
1159 // stage 5
1160 u[0] = v[0];
1161 u[1] = v[1];
1162 u[2] = v[2];
1163 u[3] = v[3];
1164
1165 u[4] = _mm_mullo_epi32(v[4], cospi56);
1166 x = _mm_mullo_epi32(v[7], cospi8);
1167 u[4] = _mm_add_epi32(u[4], x);
1168 u[4] = _mm_add_epi32(u[4], rnding);
1169 u[4] = _mm_srai_epi32(u[4], bit);
1170
1171 u[7] = _mm_mullo_epi32(v[4], cospi8);
1172 x = _mm_mullo_epi32(v[7], cospi56);
1173 u[7] = _mm_sub_epi32(x, u[7]);
1174 u[7] = _mm_add_epi32(u[7], rnding);
1175 u[7] = _mm_srai_epi32(u[7], bit);
1176
1177 u[5] = _mm_mullo_epi32(v[5], cospi24);
1178 x = _mm_mullo_epi32(v[6], cospi40);
1179 u[5] = _mm_add_epi32(u[5], x);
1180 u[5] = _mm_add_epi32(u[5], rnding);
1181 u[5] = _mm_srai_epi32(u[5], bit);
1182
1183 u[6] = _mm_mullo_epi32(v[5], cospi40);
1184 x = _mm_mullo_epi32(v[6], cospi24);
1185 u[6] = _mm_sub_epi32(x, u[6]);
1186 u[6] = _mm_add_epi32(u[6], rnding);
1187 u[6] = _mm_srai_epi32(u[6], bit);
1188
1189 u[8] = _mm_add_epi32(v[8], v[9]);
1190 u[9] = _mm_sub_epi32(v[8], v[9]);
1191 u[10] = _mm_sub_epi32(v[11], v[10]);
1192 u[11] = _mm_add_epi32(v[11], v[10]);
1193 u[12] = _mm_add_epi32(v[12], v[13]);
1194 u[13] = _mm_sub_epi32(v[12], v[13]);
1195 u[14] = _mm_sub_epi32(v[15], v[14]);
1196 u[15] = _mm_add_epi32(v[15], v[14]);
1197
1198 // stage 6
1199 v[0] = u[0];
1200 v[1] = u[1];
1201 v[2] = u[2];
1202 v[3] = u[3];
1203 v[4] = u[4];
1204 v[5] = u[5];
1205 v[6] = u[6];
1206 v[7] = u[7];
1207
1208 v[8] = _mm_mullo_epi32(u[8], cospi60);
1209 x = _mm_mullo_epi32(u[15], cospi4);
1210 v[8] = _mm_add_epi32(v[8], x);
1211 v[8] = _mm_add_epi32(v[8], rnding);
1212 v[8] = _mm_srai_epi32(v[8], bit);
1213
1214 v[15] = _mm_mullo_epi32(u[8], cospi4);
1215 x = _mm_mullo_epi32(u[15], cospi60);
1216 v[15] = _mm_sub_epi32(x, v[15]);
1217 v[15] = _mm_add_epi32(v[15], rnding);
1218 v[15] = _mm_srai_epi32(v[15], bit);
1219
1220 v[9] = _mm_mullo_epi32(u[9], cospi28);
1221 x = _mm_mullo_epi32(u[14], cospi36);
1222 v[9] = _mm_add_epi32(v[9], x);
1223 v[9] = _mm_add_epi32(v[9], rnding);
1224 v[9] = _mm_srai_epi32(v[9], bit);
1225
1226 v[14] = _mm_mullo_epi32(u[9], cospi36);
1227 x = _mm_mullo_epi32(u[14], cospi28);
1228 v[14] = _mm_sub_epi32(x, v[14]);
1229 v[14] = _mm_add_epi32(v[14], rnding);
1230 v[14] = _mm_srai_epi32(v[14], bit);
1231
1232 v[10] = _mm_mullo_epi32(u[10], cospi44);
1233 x = _mm_mullo_epi32(u[13], cospi20);
1234 v[10] = _mm_add_epi32(v[10], x);
1235 v[10] = _mm_add_epi32(v[10], rnding);
1236 v[10] = _mm_srai_epi32(v[10], bit);
1237
1238 v[13] = _mm_mullo_epi32(u[10], cospi20);
1239 x = _mm_mullo_epi32(u[13], cospi44);
1240 v[13] = _mm_sub_epi32(x, v[13]);
1241 v[13] = _mm_add_epi32(v[13], rnding);
1242 v[13] = _mm_srai_epi32(v[13], bit);
1243
1244 v[11] = _mm_mullo_epi32(u[11], cospi12);
1245 x = _mm_mullo_epi32(u[12], cospi52);
1246 v[11] = _mm_add_epi32(v[11], x);
1247 v[11] = _mm_add_epi32(v[11], rnding);
1248 v[11] = _mm_srai_epi32(v[11], bit);
1249
1250 v[12] = _mm_mullo_epi32(u[11], cospi52);
1251 x = _mm_mullo_epi32(u[12], cospi12);
1252 v[12] = _mm_sub_epi32(x, v[12]);
1253 v[12] = _mm_add_epi32(v[12], rnding);
1254 v[12] = _mm_srai_epi32(v[12], bit);
1255
1256 out[0 * col_num + col] = v[0];
1257 out[1 * col_num + col] = v[8];
1258 out[2 * col_num + col] = v[4];
1259 out[3 * col_num + col] = v[12];
1260 out[4 * col_num + col] = v[2];
1261 out[5 * col_num + col] = v[10];
1262 out[6 * col_num + col] = v[6];
1263 out[7 * col_num + col] = v[14];
1264 out[8 * col_num + col] = v[1];
1265 out[9 * col_num + col] = v[9];
1266 out[10 * col_num + col] = v[5];
1267 out[11 * col_num + col] = v[13];
1268 out[12 * col_num + col] = v[3];
1269 out[13 * col_num + col] = v[11];
1270 out[14 * col_num + col] = v[7];
1271 out[15 * col_num + col] = v[15];
1272 }
1273 }
1274
fadst16x16_sse4_1(__m128i * in,__m128i * out,int bit,const int num_cols)1275 static void fadst16x16_sse4_1(__m128i *in, __m128i *out, int bit,
1276 const int num_cols) {
1277 const int32_t *cospi = cospi_arr(bit);
1278 const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
1279 const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
1280 const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
1281 const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
1282 const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
1283 const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
1284 const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
1285 const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
1286 const __m128i cospim8 = _mm_set1_epi32(-cospi[8]);
1287 const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
1288 const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
1289 const __m128i cospim40 = _mm_set1_epi32(-cospi[40]);
1290 const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
1291 const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
1292 const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
1293 const __m128i cospim2 = _mm_set1_epi32(-cospi[2]);
1294 const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
1295 const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
1296 const __m128i cospim10 = _mm_set1_epi32(-cospi[10]);
1297 const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
1298 const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
1299 const __m128i cospim18 = _mm_set1_epi32(-cospi[18]);
1300 const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
1301 const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
1302 const __m128i cospim26 = _mm_set1_epi32(-cospi[26]);
1303 const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
1304 const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
1305 const __m128i cospim34 = _mm_set1_epi32(-cospi[34]);
1306 const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
1307 const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
1308 const __m128i cospim42 = _mm_set1_epi32(-cospi[42]);
1309 const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
1310 const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
1311 const __m128i cospim50 = _mm_set1_epi32(-cospi[50]);
1312 const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
1313 const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
1314 const __m128i cospim58 = _mm_set1_epi32(-cospi[58]);
1315 const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
1316 const __m128i zero = _mm_setzero_si128();
1317
1318 __m128i u[16], v[16], x, y;
1319 int col;
1320
1321 for (col = 0; col < num_cols; ++col) {
1322 // stage 0
1323 // stage 1
1324 u[0] = in[0 * num_cols + col];
1325 u[1] = _mm_sub_epi32(zero, in[15 * num_cols + col]);
1326 u[2] = _mm_sub_epi32(zero, in[7 * num_cols + col]);
1327 u[3] = in[8 * num_cols + col];
1328 u[4] = _mm_sub_epi32(zero, in[3 * num_cols + col]);
1329 u[5] = in[12 * num_cols + col];
1330 u[6] = in[4 * num_cols + col];
1331 u[7] = _mm_sub_epi32(zero, in[11 * num_cols + col]);
1332 u[8] = _mm_sub_epi32(zero, in[1 * num_cols + col]);
1333 u[9] = in[14 * num_cols + col];
1334 u[10] = in[6 * num_cols + col];
1335 u[11] = _mm_sub_epi32(zero, in[9 * num_cols + col]);
1336 u[12] = in[2 * num_cols + col];
1337 u[13] = _mm_sub_epi32(zero, in[13 * num_cols + col]);
1338 u[14] = _mm_sub_epi32(zero, in[5 * num_cols + col]);
1339 u[15] = in[10 * num_cols + col];
1340
1341 // stage 2
1342 v[0] = u[0];
1343 v[1] = u[1];
1344
1345 x = _mm_mullo_epi32(u[2], cospi32);
1346 y = _mm_mullo_epi32(u[3], cospi32);
1347 v[2] = _mm_add_epi32(x, y);
1348 v[2] = _mm_add_epi32(v[2], rnding);
1349 v[2] = _mm_srai_epi32(v[2], bit);
1350
1351 v[3] = _mm_sub_epi32(x, y);
1352 v[3] = _mm_add_epi32(v[3], rnding);
1353 v[3] = _mm_srai_epi32(v[3], bit);
1354
1355 v[4] = u[4];
1356 v[5] = u[5];
1357
1358 x = _mm_mullo_epi32(u[6], cospi32);
1359 y = _mm_mullo_epi32(u[7], cospi32);
1360 v[6] = _mm_add_epi32(x, y);
1361 v[6] = _mm_add_epi32(v[6], rnding);
1362 v[6] = _mm_srai_epi32(v[6], bit);
1363
1364 v[7] = _mm_sub_epi32(x, y);
1365 v[7] = _mm_add_epi32(v[7], rnding);
1366 v[7] = _mm_srai_epi32(v[7], bit);
1367
1368 v[8] = u[8];
1369 v[9] = u[9];
1370
1371 x = _mm_mullo_epi32(u[10], cospi32);
1372 y = _mm_mullo_epi32(u[11], cospi32);
1373 v[10] = _mm_add_epi32(x, y);
1374 v[10] = _mm_add_epi32(v[10], rnding);
1375 v[10] = _mm_srai_epi32(v[10], bit);
1376
1377 v[11] = _mm_sub_epi32(x, y);
1378 v[11] = _mm_add_epi32(v[11], rnding);
1379 v[11] = _mm_srai_epi32(v[11], bit);
1380
1381 v[12] = u[12];
1382 v[13] = u[13];
1383
1384 x = _mm_mullo_epi32(u[14], cospi32);
1385 y = _mm_mullo_epi32(u[15], cospi32);
1386 v[14] = _mm_add_epi32(x, y);
1387 v[14] = _mm_add_epi32(v[14], rnding);
1388 v[14] = _mm_srai_epi32(v[14], bit);
1389
1390 v[15] = _mm_sub_epi32(x, y);
1391 v[15] = _mm_add_epi32(v[15], rnding);
1392 v[15] = _mm_srai_epi32(v[15], bit);
1393
1394 // stage 3
1395 u[0] = _mm_add_epi32(v[0], v[2]);
1396 u[1] = _mm_add_epi32(v[1], v[3]);
1397 u[2] = _mm_sub_epi32(v[0], v[2]);
1398 u[3] = _mm_sub_epi32(v[1], v[3]);
1399 u[4] = _mm_add_epi32(v[4], v[6]);
1400 u[5] = _mm_add_epi32(v[5], v[7]);
1401 u[6] = _mm_sub_epi32(v[4], v[6]);
1402 u[7] = _mm_sub_epi32(v[5], v[7]);
1403 u[8] = _mm_add_epi32(v[8], v[10]);
1404 u[9] = _mm_add_epi32(v[9], v[11]);
1405 u[10] = _mm_sub_epi32(v[8], v[10]);
1406 u[11] = _mm_sub_epi32(v[9], v[11]);
1407 u[12] = _mm_add_epi32(v[12], v[14]);
1408 u[13] = _mm_add_epi32(v[13], v[15]);
1409 u[14] = _mm_sub_epi32(v[12], v[14]);
1410 u[15] = _mm_sub_epi32(v[13], v[15]);
1411
1412 // stage 4
1413 v[0] = u[0];
1414 v[1] = u[1];
1415 v[2] = u[2];
1416 v[3] = u[3];
1417 v[4] = half_btf_sse4_1(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
1418 v[5] = half_btf_sse4_1(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
1419 v[6] = half_btf_sse4_1(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
1420 v[7] = half_btf_sse4_1(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
1421 v[8] = u[8];
1422 v[9] = u[9];
1423 v[10] = u[10];
1424 v[11] = u[11];
1425 v[12] = half_btf_sse4_1(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
1426 v[13] = half_btf_sse4_1(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
1427 v[14] = half_btf_sse4_1(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
1428 v[15] = half_btf_sse4_1(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
1429
1430 // stage 5
1431 u[0] = _mm_add_epi32(v[0], v[4]);
1432 u[1] = _mm_add_epi32(v[1], v[5]);
1433 u[2] = _mm_add_epi32(v[2], v[6]);
1434 u[3] = _mm_add_epi32(v[3], v[7]);
1435 u[4] = _mm_sub_epi32(v[0], v[4]);
1436 u[5] = _mm_sub_epi32(v[1], v[5]);
1437 u[6] = _mm_sub_epi32(v[2], v[6]);
1438 u[7] = _mm_sub_epi32(v[3], v[7]);
1439 u[8] = _mm_add_epi32(v[8], v[12]);
1440 u[9] = _mm_add_epi32(v[9], v[13]);
1441 u[10] = _mm_add_epi32(v[10], v[14]);
1442 u[11] = _mm_add_epi32(v[11], v[15]);
1443 u[12] = _mm_sub_epi32(v[8], v[12]);
1444 u[13] = _mm_sub_epi32(v[9], v[13]);
1445 u[14] = _mm_sub_epi32(v[10], v[14]);
1446 u[15] = _mm_sub_epi32(v[11], v[15]);
1447
1448 // stage 6
1449 v[0] = u[0];
1450 v[1] = u[1];
1451 v[2] = u[2];
1452 v[3] = u[3];
1453 v[4] = u[4];
1454 v[5] = u[5];
1455 v[6] = u[6];
1456 v[7] = u[7];
1457 v[8] = half_btf_sse4_1(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
1458 v[9] = half_btf_sse4_1(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
1459 v[10] = half_btf_sse4_1(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
1460 v[11] = half_btf_sse4_1(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
1461 v[12] = half_btf_sse4_1(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
1462 v[13] = half_btf_sse4_1(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
1463 v[14] = half_btf_sse4_1(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
1464 v[15] = half_btf_sse4_1(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
1465
1466 // stage 7
1467 u[0] = _mm_add_epi32(v[0], v[8]);
1468 u[1] = _mm_add_epi32(v[1], v[9]);
1469 u[2] = _mm_add_epi32(v[2], v[10]);
1470 u[3] = _mm_add_epi32(v[3], v[11]);
1471 u[4] = _mm_add_epi32(v[4], v[12]);
1472 u[5] = _mm_add_epi32(v[5], v[13]);
1473 u[6] = _mm_add_epi32(v[6], v[14]);
1474 u[7] = _mm_add_epi32(v[7], v[15]);
1475 u[8] = _mm_sub_epi32(v[0], v[8]);
1476 u[9] = _mm_sub_epi32(v[1], v[9]);
1477 u[10] = _mm_sub_epi32(v[2], v[10]);
1478 u[11] = _mm_sub_epi32(v[3], v[11]);
1479 u[12] = _mm_sub_epi32(v[4], v[12]);
1480 u[13] = _mm_sub_epi32(v[5], v[13]);
1481 u[14] = _mm_sub_epi32(v[6], v[14]);
1482 u[15] = _mm_sub_epi32(v[7], v[15]);
1483
1484 // stage 8
1485 v[0] = half_btf_sse4_1(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
1486 v[1] = half_btf_sse4_1(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
1487 v[2] = half_btf_sse4_1(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
1488 v[3] = half_btf_sse4_1(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
1489 v[4] = half_btf_sse4_1(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
1490 v[5] = half_btf_sse4_1(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
1491 v[6] = half_btf_sse4_1(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
1492 v[7] = half_btf_sse4_1(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
1493 v[8] = half_btf_sse4_1(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
1494 v[9] = half_btf_sse4_1(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
1495 v[10] = half_btf_sse4_1(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
1496 v[11] = half_btf_sse4_1(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
1497 v[12] = half_btf_sse4_1(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
1498 v[13] = half_btf_sse4_1(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
1499 v[14] = half_btf_sse4_1(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
1500 v[15] = half_btf_sse4_1(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
1501
1502 // stage 9
1503 out[0 * num_cols + col] = v[1];
1504 out[1 * num_cols + col] = v[14];
1505 out[2 * num_cols + col] = v[3];
1506 out[3 * num_cols + col] = v[12];
1507 out[4 * num_cols + col] = v[5];
1508 out[5 * num_cols + col] = v[10];
1509 out[6 * num_cols + col] = v[7];
1510 out[7 * num_cols + col] = v[8];
1511 out[8 * num_cols + col] = v[9];
1512 out[9 * num_cols + col] = v[6];
1513 out[10 * num_cols + col] = v[11];
1514 out[11 * num_cols + col] = v[4];
1515 out[12 * num_cols + col] = v[13];
1516 out[13 * num_cols + col] = v[2];
1517 out[14 * num_cols + col] = v[15];
1518 out[15 * num_cols + col] = v[0];
1519 }
1520 }
1521
col_txfm_16x16_rounding(__m128i * in,int shift)1522 static void col_txfm_16x16_rounding(__m128i *in, int shift) {
1523 // Note:
1524 // We split 16x16 rounding into 4 sections of 8x8 rounding,
1525 // instead of 4 columns
1526 col_txfm_8x8_rounding(&in[0], shift);
1527 col_txfm_8x8_rounding(&in[16], shift);
1528 col_txfm_8x8_rounding(&in[32], shift);
1529 col_txfm_8x8_rounding(&in[48], shift);
1530 }
1531
col_txfm_8x16_rounding(__m128i * in,int shift)1532 static void col_txfm_8x16_rounding(__m128i *in, int shift) {
1533 col_txfm_8x8_rounding(&in[0], shift);
1534 col_txfm_8x8_rounding(&in[16], shift);
1535 }
1536
write_buffer_16x16(const __m128i * in,int32_t * output)1537 static void write_buffer_16x16(const __m128i *in, int32_t *output) {
1538 const int size_8x8 = 16 * 4;
1539 write_buffer_8x8(&in[0], output);
1540 output += size_8x8;
1541 write_buffer_8x8(&in[16], output);
1542 output += size_8x8;
1543 write_buffer_8x8(&in[32], output);
1544 output += size_8x8;
1545 write_buffer_8x8(&in[48], output);
1546 }
1547
av1_fwd_txfm2d_16x16_sse4_1(const int16_t * input,int32_t * coeff,int stride,TX_TYPE tx_type,int bd)1548 void av1_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
1549 int stride, TX_TYPE tx_type, int bd) {
1550 __m128i in[64], out[64];
1551 const int8_t *shift = fwd_txfm_shift_ls[TX_16X16];
1552 const int txw_idx = get_txw_idx(TX_16X16);
1553 const int txh_idx = get_txh_idx(TX_16X16);
1554 const int col_num = 4;
1555 switch (tx_type) {
1556 case DCT_DCT:
1557 load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
1558 fdct16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1559 col_txfm_16x16_rounding(out, -shift[1]);
1560 transpose_16x16(out, in);
1561 fdct16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1562 transpose_16x16(out, in);
1563 write_buffer_16x16(in, coeff);
1564 break;
1565 case ADST_DCT:
1566 load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
1567 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1568 col_txfm_16x16_rounding(out, -shift[1]);
1569 transpose_16x16(out, in);
1570 fdct16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1571 transpose_16x16(out, in);
1572 write_buffer_16x16(in, coeff);
1573 break;
1574 case DCT_ADST:
1575 load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
1576 fdct16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1577 col_txfm_16x16_rounding(out, -shift[1]);
1578 transpose_16x16(out, in);
1579 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1580 transpose_16x16(out, in);
1581 write_buffer_16x16(in, coeff);
1582 break;
1583 case ADST_ADST:
1584 load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
1585 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1586 col_txfm_16x16_rounding(out, -shift[1]);
1587 transpose_16x16(out, in);
1588 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1589 transpose_16x16(out, in);
1590 write_buffer_16x16(in, coeff);
1591 break;
1592 case FLIPADST_DCT:
1593 load_buffer_16x16(input, in, stride, 1, 0, shift[0]);
1594 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1595 col_txfm_16x16_rounding(out, -shift[1]);
1596 transpose_16x16(out, in);
1597 fdct16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1598 transpose_16x16(out, in);
1599 write_buffer_16x16(in, coeff);
1600 break;
1601 case DCT_FLIPADST:
1602 load_buffer_16x16(input, in, stride, 0, 1, shift[0]);
1603 fdct16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1604 col_txfm_16x16_rounding(out, -shift[1]);
1605 transpose_16x16(out, in);
1606 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1607 transpose_16x16(out, in);
1608 write_buffer_16x16(in, coeff);
1609 break;
1610 case FLIPADST_FLIPADST:
1611 load_buffer_16x16(input, in, stride, 1, 1, shift[0]);
1612 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1613 col_txfm_16x16_rounding(out, -shift[1]);
1614 transpose_16x16(out, in);
1615 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1616 transpose_16x16(out, in);
1617 write_buffer_16x16(in, coeff);
1618 break;
1619 case ADST_FLIPADST:
1620 load_buffer_16x16(input, in, stride, 0, 1, shift[0]);
1621 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1622 col_txfm_16x16_rounding(out, -shift[1]);
1623 transpose_16x16(out, in);
1624 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1625 transpose_16x16(out, in);
1626 write_buffer_16x16(in, coeff);
1627 break;
1628 case FLIPADST_ADST:
1629 load_buffer_16x16(input, in, stride, 1, 0, shift[0]);
1630 fadst16x16_sse4_1(in, out, fwd_cos_bit_col[txw_idx][txh_idx], col_num);
1631 col_txfm_16x16_rounding(out, -shift[1]);
1632 transpose_16x16(out, in);
1633 fadst16x16_sse4_1(in, out, fwd_cos_bit_row[txw_idx][txh_idx], col_num);
1634 transpose_16x16(out, in);
1635 write_buffer_16x16(in, coeff);
1636 break;
1637 default: assert(0);
1638 }
1639 (void)bd;
1640 }
1641
flip_buf_sse4_1(__m128i * in,__m128i * out,int size)1642 static INLINE void flip_buf_sse4_1(__m128i *in, __m128i *out, int size) {
1643 for (int i = 0; i < size; i += 2) in[30 - i] = out[i];
1644 for (int i = 1; i < size; i += 2) in[size - i] = out[i];
1645 }
1646
1647 static const fwd_transform_1d_sse4_1 col_highbd_txfm8x8_arr[TX_TYPES] = {
1648 fdct8x8_sse4_1, // DCT_DCT
1649 fadst8x8_sse4_1, // ADST_DCT
1650 fdct8x8_sse4_1, // DCT_ADST
1651 fadst8x8_sse4_1, // ADST_ADST
1652 fadst8x8_sse4_1, // FLIPADST_DCT
1653 fdct8x8_sse4_1, // DCT_FLIPADST
1654 fadst8x8_sse4_1, // FLIPADST_FLIPADST
1655 fadst8x8_sse4_1, // ADST_FLIPADST
1656 fadst8x8_sse4_1, // FLIPADST_ADST
1657 NULL, // IDTX
1658 NULL, // V_DCT
1659 NULL, // H_DCT
1660 NULL, // V_ADST
1661 NULL, // H_ADST
1662 NULL, // V_FLIPADST
1663 NULL // H_FLIPADST
1664 };
1665
1666 static const fwd_transform_1d_sse4_1 row_highbd_txfm8x16_arr[TX_TYPES] = {
1667 fdct16x16_sse4_1, // DCT_DCT
1668 fdct16x16_sse4_1, // ADST_DCT
1669 fadst16x16_sse4_1, // DCT_ADST
1670 fadst16x16_sse4_1, // ADST_ADST
1671 fdct16x16_sse4_1, // FLIPADST_DCT
1672 fadst16x16_sse4_1, // DCT_FLIPADST
1673 fadst16x16_sse4_1, // FLIPADST_FLIPADST
1674 fadst16x16_sse4_1, // ADST_FLIPADST
1675 fadst16x16_sse4_1, // FLIPADST_ADST
1676 NULL, // IDTX
1677 NULL, // V_DCT
1678 NULL, // H_DCT
1679 NULL, // V_ADST
1680 NULL, // H_ADST
1681 NULL, // V_FLIPADST
1682 NULL // H_FLIPADST
1683 };
1684
1685 static const fwd_transform_1d_sse4_1 col_highbd_txfm8x16_arr[TX_TYPES] = {
1686 fdct16x16_sse4_1, // DCT_DCT
1687 fadst16x16_sse4_1, // ADST_DCT
1688 fdct16x16_sse4_1, // DCT_ADST
1689 fadst16x16_sse4_1, // ADST_ADST
1690 fadst16x16_sse4_1, // FLIPADST_DCT
1691 fdct16x16_sse4_1, // DCT_FLIPADST
1692 fadst16x16_sse4_1, // FLIPADST_FLIPADST
1693 fadst16x16_sse4_1, // ADST_FLIPADST
1694 fadst16x16_sse4_1, // FLIPADST_ADST
1695 NULL, // IDTX
1696 NULL, // V_DCT
1697 NULL, // H_DCT
1698 NULL, // V_ADST
1699 NULL, // H_ADST
1700 NULL, // V_FLIPADST
1701 NULL // H_FLIPADST
1702 };
1703 static const fwd_transform_1d_sse4_1 row_highbd_txfm8x8_arr[TX_TYPES] = {
1704 fdct8x8_sse4_1, // DCT_DCT
1705 fdct8x8_sse4_1, // ADST_DCT
1706 fadst8x8_sse4_1, // DCT_ADST
1707 fadst8x8_sse4_1, // ADST_ADST
1708 fdct8x8_sse4_1, // FLIPADST_DCT
1709 fadst8x8_sse4_1, // DCT_FLIPADST
1710 fadst8x8_sse4_1, // FLIPADST_FLIPADST
1711 fadst8x8_sse4_1, // ADST_FLIPADST
1712 fadst8x8_sse4_1, // FLIPADST_ADST
1713 NULL, // IDTX
1714 NULL, // V_DCT
1715 NULL, // H_DCT
1716 NULL, // V_ADST
1717 NULL, // H_ADST
1718 NULL, // V_FLIPADST
1719 NULL // H_FLIPADST
1720 };
1721
av1_fwd_txfm2d_16x8_sse4_1(const int16_t * input,int32_t * coeff,int stride,TX_TYPE tx_type,int bd)1722 void av1_fwd_txfm2d_16x8_sse4_1(const int16_t *input, int32_t *coeff,
1723 int stride, TX_TYPE tx_type, int bd) {
1724 __m128i in[32], out[32];
1725 const int8_t *shift = fwd_txfm_shift_ls[TX_16X8];
1726 const int txw_idx = get_txw_idx(TX_16X8);
1727 const int txh_idx = get_txh_idx(TX_16X8);
1728 const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x8_arr[tx_type];
1729 const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x16_arr[tx_type];
1730 int bit = fwd_cos_bit_col[txw_idx][txh_idx];
1731 int ud_flip, lr_flip;
1732 get_flip_cfg(tx_type, &ud_flip, &lr_flip);
1733
1734 for (int i = 0; i < 2; i++) {
1735 load_buffer_8x8(input + i * 8, in, stride, ud_flip, 0, shift[0]);
1736 col_txfm(in, in, bit, 0);
1737 col_txfm_8x8_rounding(in, -shift[1]);
1738 transpose_8x8(in, out + i * 16);
1739 }
1740
1741 if (lr_flip) {
1742 flip_buf_sse4_1(in, out, 32);
1743 row_txfm(in, out, bit, 2);
1744 } else {
1745 row_txfm(out, out, bit, 2);
1746 }
1747
1748 for (int i = 0; i < 2; i++) {
1749 transpose_8x8(out + i * 16, in);
1750 av1_round_shift_rect_array_32_sse4_1(in, in, 16, -shift[2], NewSqrt2);
1751 write_buffer_16x8(in, coeff + i * 8, 16);
1752 }
1753
1754 (void)bd;
1755 }
1756
av1_fwd_txfm2d_8x16_sse4_1(const int16_t * input,int32_t * coeff,int stride,TX_TYPE tx_type,int bd)1757 void av1_fwd_txfm2d_8x16_sse4_1(const int16_t *input, int32_t *coeff,
1758 int stride, TX_TYPE tx_type, int bd) {
1759 __m128i in[32], out[32];
1760 const int8_t *shift = fwd_txfm_shift_ls[TX_8X16];
1761 const int txw_idx = get_txw_idx(TX_8X16);
1762 const int txh_idx = get_txh_idx(TX_8X16);
1763 const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x16_arr[tx_type];
1764 const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x8_arr[tx_type];
1765 int bit = fwd_cos_bit_col[txw_idx][txh_idx];
1766 int ud_flip, lr_flip;
1767 get_flip_cfg(tx_type, &ud_flip, &lr_flip);
1768
1769 load_buffer_8x16(input, in, stride, ud_flip, lr_flip, shift[0]);
1770 col_txfm(in, in, bit, 2);
1771 col_txfm_8x16_rounding(in, -shift[1]);
1772 transpose_8x8(in, out);
1773 transpose_8x8(in + 16, out + 16);
1774
1775 for (int i = 0; i < 2; i++) {
1776 row_txfm(out + i * 16, out, bit, 0);
1777 transpose_8x8(out, in);
1778 av1_round_shift_rect_array_32_sse4_1(in, in, 16, -shift[2], NewSqrt2);
1779 write_buffer_8x8(in, coeff + i * 64);
1780 }
1781
1782 (void)bd;
1783 }
1784