1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
12 #define VPX_DSP_X86_INV_TXFM_SSE2_H_
13 
14 #include <emmintrin.h>  // SSE2
15 #include "./vpx_config.h"
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/inv_txfm.h"
18 #include "vpx_dsp/x86/txfm_common_sse2.h"
19 
20 // perform 8x8 transpose
array_transpose_8x8(__m128i * in,__m128i * res)21 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
22   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
23   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
24   const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
25   const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
26   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
27   const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
28   const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
29   const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
30 
31   const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
32   const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
33   const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
34   const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
35   const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
36   const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
37   const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
38   const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
39 
40   res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
41   res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
42   res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
43   res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
44   res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
45   res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
46   res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
47   res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
48 }
49 
50 #define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
51   {                                                     \
52     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
53     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
54                                                         \
55     in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);  /* i1 i0 */  \
56     in1 = _mm_unpackhi_epi32(tr0_0, tr0_1);  /* i3 i2 */  \
57   }
58 
array_transpose_4X8(__m128i * in,__m128i * out)59 static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) {
60   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
61   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
62   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
63   const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
64 
65   const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
66   const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
67   const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
68   const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
69 
70   out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4);
71   out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4);
72   out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6);
73   out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6);
74 }
75 
array_transpose_16x16(__m128i * res0,__m128i * res1)76 static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
77   __m128i tbuf[8];
78   array_transpose_8x8(res0, res0);
79   array_transpose_8x8(res1, tbuf);
80   array_transpose_8x8(res0 + 8, res1);
81   array_transpose_8x8(res1 + 8, res1 + 8);
82 
83   res0[8] = tbuf[0];
84   res0[9] = tbuf[1];
85   res0[10] = tbuf[2];
86   res0[11] = tbuf[3];
87   res0[12] = tbuf[4];
88   res0[13] = tbuf[5];
89   res0[14] = tbuf[6];
90   res0[15] = tbuf[7];
91 }
92 
93 // Function to allow 8 bit optimisations to be used when profile 0 is used with
94 // highbitdepth enabled
load_input_data(const tran_low_t * data)95 static INLINE __m128i load_input_data(const tran_low_t *data) {
96 #if CONFIG_VP9_HIGHBITDEPTH
97   return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
98       data[6], data[7]);
99 #else
100   return _mm_load_si128((const __m128i *)data);
101 #endif
102 }
103 
load_buffer_8x16(const tran_low_t * input,__m128i * in)104 static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
105   in[0]  = load_input_data(input + 0 * 16);
106   in[1]  = load_input_data(input + 1 * 16);
107   in[2]  = load_input_data(input + 2 * 16);
108   in[3]  = load_input_data(input + 3 * 16);
109   in[4]  = load_input_data(input + 4 * 16);
110   in[5]  = load_input_data(input + 5 * 16);
111   in[6]  = load_input_data(input + 6 * 16);
112   in[7]  = load_input_data(input + 7 * 16);
113 
114   in[8]  = load_input_data(input + 8 * 16);
115   in[9]  = load_input_data(input + 9 * 16);
116   in[10]  = load_input_data(input + 10 * 16);
117   in[11]  = load_input_data(input + 11 * 16);
118   in[12]  = load_input_data(input + 12 * 16);
119   in[13]  = load_input_data(input + 13 * 16);
120   in[14]  = load_input_data(input + 14 * 16);
121   in[15]  = load_input_data(input + 15 * 16);
122 }
123 
124 #define RECON_AND_STORE(dest, in_x) \
125   {                                                     \
126      __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
127       d0 = _mm_unpacklo_epi8(d0, zero); \
128       d0 = _mm_add_epi16(in_x, d0); \
129       d0 = _mm_packus_epi16(d0, d0); \
130       _mm_storel_epi64((__m128i *)(dest), d0); \
131   }
132 
write_buffer_8x16(uint8_t * dest,__m128i * in,int stride)133 static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
134   const __m128i final_rounding = _mm_set1_epi16(1<<5);
135   const __m128i zero = _mm_setzero_si128();
136   // Final rounding and shift
137   in[0] = _mm_adds_epi16(in[0], final_rounding);
138   in[1] = _mm_adds_epi16(in[1], final_rounding);
139   in[2] = _mm_adds_epi16(in[2], final_rounding);
140   in[3] = _mm_adds_epi16(in[3], final_rounding);
141   in[4] = _mm_adds_epi16(in[4], final_rounding);
142   in[5] = _mm_adds_epi16(in[5], final_rounding);
143   in[6] = _mm_adds_epi16(in[6], final_rounding);
144   in[7] = _mm_adds_epi16(in[7], final_rounding);
145   in[8] = _mm_adds_epi16(in[8], final_rounding);
146   in[9] = _mm_adds_epi16(in[9], final_rounding);
147   in[10] = _mm_adds_epi16(in[10], final_rounding);
148   in[11] = _mm_adds_epi16(in[11], final_rounding);
149   in[12] = _mm_adds_epi16(in[12], final_rounding);
150   in[13] = _mm_adds_epi16(in[13], final_rounding);
151   in[14] = _mm_adds_epi16(in[14], final_rounding);
152   in[15] = _mm_adds_epi16(in[15], final_rounding);
153 
154   in[0] = _mm_srai_epi16(in[0], 6);
155   in[1] = _mm_srai_epi16(in[1], 6);
156   in[2] = _mm_srai_epi16(in[2], 6);
157   in[3] = _mm_srai_epi16(in[3], 6);
158   in[4] = _mm_srai_epi16(in[4], 6);
159   in[5] = _mm_srai_epi16(in[5], 6);
160   in[6] = _mm_srai_epi16(in[6], 6);
161   in[7] = _mm_srai_epi16(in[7], 6);
162   in[8] = _mm_srai_epi16(in[8], 6);
163   in[9] = _mm_srai_epi16(in[9], 6);
164   in[10] = _mm_srai_epi16(in[10], 6);
165   in[11] = _mm_srai_epi16(in[11], 6);
166   in[12] = _mm_srai_epi16(in[12], 6);
167   in[13] = _mm_srai_epi16(in[13], 6);
168   in[14] = _mm_srai_epi16(in[14], 6);
169   in[15] = _mm_srai_epi16(in[15], 6);
170 
171   RECON_AND_STORE(dest +  0 * stride, in[0]);
172   RECON_AND_STORE(dest +  1 * stride, in[1]);
173   RECON_AND_STORE(dest +  2 * stride, in[2]);
174   RECON_AND_STORE(dest +  3 * stride, in[3]);
175   RECON_AND_STORE(dest +  4 * stride, in[4]);
176   RECON_AND_STORE(dest +  5 * stride, in[5]);
177   RECON_AND_STORE(dest +  6 * stride, in[6]);
178   RECON_AND_STORE(dest +  7 * stride, in[7]);
179   RECON_AND_STORE(dest +  8 * stride, in[8]);
180   RECON_AND_STORE(dest +  9 * stride, in[9]);
181   RECON_AND_STORE(dest + 10 * stride, in[10]);
182   RECON_AND_STORE(dest + 11 * stride, in[11]);
183   RECON_AND_STORE(dest + 12 * stride, in[12]);
184   RECON_AND_STORE(dest + 13 * stride, in[13]);
185   RECON_AND_STORE(dest + 14 * stride, in[14]);
186   RECON_AND_STORE(dest + 15 * stride, in[15]);
187 }
188 
189 void idct4_sse2(__m128i *in);
190 void idct8_sse2(__m128i *in);
191 void idct16_sse2(__m128i *in0, __m128i *in1);
192 void iadst4_sse2(__m128i *in);
193 void iadst8_sse2(__m128i *in);
194 void iadst16_sse2(__m128i *in0, __m128i *in1);
195 
196 #endif  // VPX_DSP_X86_INV_TXFM_SSE2_H_
197