1 /*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at https://www.aomedia.org/license/software-license. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at https://www.aomedia.org/license/patent-license.
10 */
11
12 #include <immintrin.h>
13 #include <assert.h>
14
15 #include "EbDefinitions.h"
16 #include "common_dsp_rtcd.h"
17
18 #include "convolve_avx2.h"
19 #include "synonyms.h"
20 #include "convolve.h"
21
svt_av1_highbd_convolve_2d_sr_avx2(const uint16_t * src,int32_t src_stride,uint16_t * dst,int32_t dst_stride,int32_t w,int32_t h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int32_t subpel_x_q4,const int32_t subpel_y_q4,ConvolveParams * conv_params,int32_t bd)22 void svt_av1_highbd_convolve_2d_sr_avx2(const uint16_t *src, int32_t src_stride, uint16_t *dst,
23 int32_t dst_stride, int32_t w, int32_t h,
24 const InterpFilterParams *filter_params_x,
25 const InterpFilterParams *filter_params_y,
26 const int32_t subpel_x_q4, const int32_t subpel_y_q4,
27 ConvolveParams *conv_params, int32_t bd) {
28 DECLARE_ALIGNED(32, int16_t, im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * 8]);
29 int32_t im_h = h + filter_params_y->taps - 1;
30 int32_t im_stride = 8;
31 int32_t i, j;
32 const int32_t fo_vert = filter_params_y->taps / 2 - 1;
33 const int32_t fo_horiz = filter_params_x->taps / 2 - 1;
34 const uint16_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
35
36 // Check that, even with 12-bit input, the intermediate values will fit
37 // into an unsigned 16-bit intermediate array.
38 assert(bd + FILTER_BITS + 2 - conv_params->round_0 <= 16);
39
40 __m256i s[8], coeffs_y[4], coeffs_x[4];
41
42 const __m256i round_const_x = _mm256_set1_epi32(((1 << conv_params->round_0) >> 1) +
43 (1 << (bd + FILTER_BITS - 1)));
44 const __m128i round_shift_x = _mm_cvtsi32_si128(conv_params->round_0);
45
46 const __m256i round_const_y = _mm256_set1_epi32(
47 ((1 << conv_params->round_1) >> 1) -
48 (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)));
49 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1);
50
51 const int32_t bits = FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1;
52 const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
53 const __m256i round_const_bits = _mm256_set1_epi32((1 << bits) >> 1);
54 const __m256i clp_pxl = _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
55 const __m256i zero = _mm256_setzero_si256();
56
57 prepare_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_x);
58 prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_y);
59
60 for (j = 0; j < w; j += 8) {
61 /* Horizontal filter */
62 {
63 for (i = 0; i < im_h; i += 2) {
64 const __m256i row0 = _mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]);
65 __m256i row1 = _mm256_set1_epi16(0);
66 if (i + 1 < im_h)
67 row1 = _mm256_loadu_si256((__m256i *)&src_ptr[(i + 1) * src_stride + j]);
68
69 const __m256i r0 = _mm256_permute2x128_si256(row0, row1, 0x20);
70 const __m256i r1 = _mm256_permute2x128_si256(row0, row1, 0x31);
71
72 // even pixels
73 s[0] = _mm256_alignr_epi8(r1, r0, 0);
74 s[1] = _mm256_alignr_epi8(r1, r0, 4);
75 s[2] = _mm256_alignr_epi8(r1, r0, 8);
76 s[3] = _mm256_alignr_epi8(r1, r0, 12);
77
78 __m256i res_even = convolve16_8tap_avx2(s, coeffs_x);
79 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x),
80 round_shift_x);
81
82 // odd pixels
83 s[0] = _mm256_alignr_epi8(r1, r0, 2);
84 s[1] = _mm256_alignr_epi8(r1, r0, 6);
85 s[2] = _mm256_alignr_epi8(r1, r0, 10);
86 s[3] = _mm256_alignr_epi8(r1, r0, 14);
87
88 __m256i res_odd = convolve16_8tap_avx2(s, coeffs_x);
89 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x), round_shift_x);
90
91 __m256i res_even1 = _mm256_packs_epi32(res_even, res_even);
92 __m256i res_odd1 = _mm256_packs_epi32(res_odd, res_odd);
93 __m256i res = _mm256_unpacklo_epi16(res_even1, res_odd1);
94
95 _mm256_storeu_si256((__m256i *)&im_block[i * im_stride], res);
96 }
97 }
98
99 /* Vertical filter */
100 {
101 __m256i s0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));
102 __m256i s1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));
103 __m256i s2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));
104 __m256i s3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));
105 __m256i s4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));
106 __m256i s5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));
107
108 s[0] = _mm256_unpacklo_epi16(s0, s1);
109 s[1] = _mm256_unpacklo_epi16(s2, s3);
110 s[2] = _mm256_unpacklo_epi16(s4, s5);
111
112 s[4] = _mm256_unpackhi_epi16(s0, s1);
113 s[5] = _mm256_unpackhi_epi16(s2, s3);
114 s[6] = _mm256_unpackhi_epi16(s4, s5);
115
116 for (i = 0; i < h; i += 2) {
117 const int16_t *data = &im_block[i * im_stride];
118
119 const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 6 * im_stride));
120 const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 7 * im_stride));
121
122 s[3] = _mm256_unpacklo_epi16(s6, s7);
123 s[7] = _mm256_unpackhi_epi16(s6, s7);
124
125 const __m256i res_a = convolve16_8tap_avx2(s, coeffs_y);
126 __m256i res_a_round = _mm256_sra_epi32(_mm256_add_epi32(res_a, round_const_y),
127 round_shift_y);
128
129 res_a_round = _mm256_sra_epi32(_mm256_add_epi32(res_a_round, round_const_bits),
130 round_shift_bits);
131
132 if (w - j > 4) {
133 const __m256i res_b = convolve16_8tap_avx2(s + 4, coeffs_y);
134 __m256i res_b_round = _mm256_sra_epi32(_mm256_add_epi32(res_b, round_const_y),
135 round_shift_y);
136 res_b_round = _mm256_sra_epi32(_mm256_add_epi32(res_b_round, round_const_bits),
137 round_shift_bits);
138
139 __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
140 res_16bit = _mm256_min_epi16(res_16bit, clp_pxl);
141 res_16bit = _mm256_max_epi16(res_16bit, zero);
142
143 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j],
144 _mm256_castsi256_si128(res_16bit));
145 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
146 _mm256_extracti128_si256(res_16bit, 1));
147 } else if (w == 4) {
148 res_a_round = _mm256_packs_epi32(res_a_round, res_a_round);
149 res_a_round = _mm256_min_epi16(res_a_round, clp_pxl);
150 res_a_round = _mm256_max_epi16(res_a_round, zero);
151
152 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j],
153 _mm256_castsi256_si128(res_a_round));
154 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
155 _mm256_extracti128_si256(res_a_round, 1));
156 } else {
157 res_a_round = _mm256_packs_epi32(res_a_round, res_a_round);
158 res_a_round = _mm256_min_epi16(res_a_round, clp_pxl);
159 res_a_round = _mm256_max_epi16(res_a_round, zero);
160
161 xx_storel_32((__m128i *)&dst[i * dst_stride + j],
162 _mm256_castsi256_si128(res_a_round));
163 xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
164 _mm256_extracti128_si256(res_a_round, 1));
165 }
166
167 s[0] = s[1];
168 s[1] = s[2];
169 s[2] = s[3];
170
171 s[4] = s[5];
172 s[5] = s[6];
173 s[6] = s[7];
174 }
175 }
176 }
177 }
178
copy_64(const uint16_t * src,uint16_t * dst)179 static INLINE void copy_64(const uint16_t *src, uint16_t *dst) {
180 __m256i s[4];
181 s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 16));
182 s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 16));
183 s[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 16));
184 s[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 16));
185 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[0]);
186 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[1]);
187 _mm256_storeu_si256((__m256i *)(dst + 2 * 16), s[2]);
188 _mm256_storeu_si256((__m256i *)(dst + 3 * 16), s[3]);
189 }
190
copy_128(const uint16_t * src,uint16_t * dst)191 static INLINE void copy_128(const uint16_t *src, uint16_t *dst) {
192 __m256i s[8];
193 s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 16));
194 s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 16));
195 s[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 16));
196 s[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 16));
197 s[4] = _mm256_loadu_si256((__m256i *)(src + 4 * 16));
198 s[5] = _mm256_loadu_si256((__m256i *)(src + 5 * 16));
199 s[6] = _mm256_loadu_si256((__m256i *)(src + 6 * 16));
200 s[7] = _mm256_loadu_si256((__m256i *)(src + 7 * 16));
201
202 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[0]);
203 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[1]);
204 _mm256_storeu_si256((__m256i *)(dst + 2 * 16), s[2]);
205 _mm256_storeu_si256((__m256i *)(dst + 3 * 16), s[3]);
206 _mm256_storeu_si256((__m256i *)(dst + 4 * 16), s[4]);
207 _mm256_storeu_si256((__m256i *)(dst + 5 * 16), s[5]);
208 _mm256_storeu_si256((__m256i *)(dst + 6 * 16), s[6]);
209 _mm256_storeu_si256((__m256i *)(dst + 7 * 16), s[7]);
210 }
211
svt_av1_highbd_convolve_2d_copy_sr_avx2(const uint16_t * src,int32_t src_stride,uint16_t * dst,int32_t dst_stride,int32_t w,int32_t h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int32_t subpel_x_q4,const int32_t subpel_y_q4,ConvolveParams * conv_params,int32_t bd)212 void svt_av1_highbd_convolve_2d_copy_sr_avx2(const uint16_t *src, int32_t src_stride, uint16_t *dst,
213 int32_t dst_stride, int32_t w, int32_t h,
214 const InterpFilterParams *filter_params_x,
215 const InterpFilterParams *filter_params_y,
216 const int32_t subpel_x_q4, const int32_t subpel_y_q4,
217 ConvolveParams *conv_params, int32_t bd) {
218 (void)filter_params_x;
219 (void)filter_params_y;
220 (void)subpel_x_q4;
221 (void)subpel_y_q4;
222 (void)conv_params;
223 (void)bd;
224
225 if (w == 2) {
226 do {
227 svt_memcpy_intrin_sse(dst, src, 2 * sizeof(*src));
228 src += src_stride;
229 dst += dst_stride;
230 svt_memcpy_intrin_sse(dst, src, 2 * sizeof(*src));
231 src += src_stride;
232 dst += dst_stride;
233 h -= 2;
234 } while (h);
235 } else if (w == 4) {
236 do {
237 __m128i s[2];
238 s[0] = _mm_loadl_epi64((__m128i *)src);
239 src += src_stride;
240 s[1] = _mm_loadl_epi64((__m128i *)src);
241 src += src_stride;
242 _mm_storel_epi64((__m128i *)dst, s[0]);
243 dst += dst_stride;
244 _mm_storel_epi64((__m128i *)dst, s[1]);
245 dst += dst_stride;
246 h -= 2;
247 } while (h);
248 } else if (w == 8) {
249 do {
250 __m128i s[2];
251 s[0] = _mm_loadu_si128((__m128i *)src);
252 src += src_stride;
253 s[1] = _mm_loadu_si128((__m128i *)src);
254 src += src_stride;
255 _mm_storeu_si128((__m128i *)dst, s[0]);
256 dst += dst_stride;
257 _mm_storeu_si128((__m128i *)dst, s[1]);
258 dst += dst_stride;
259 h -= 2;
260 } while (h);
261 } else if (w == 16) {
262 do {
263 __m256i s[2];
264 s[0] = _mm256_loadu_si256((__m256i *)src);
265 src += src_stride;
266 s[1] = _mm256_loadu_si256((__m256i *)src);
267 src += src_stride;
268 _mm256_storeu_si256((__m256i *)dst, s[0]);
269 dst += dst_stride;
270 _mm256_storeu_si256((__m256i *)dst, s[1]);
271 dst += dst_stride;
272 h -= 2;
273 } while (h);
274 } else if (w == 32) {
275 do {
276 __m256i s[4];
277 s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 16));
278 s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 16));
279 src += src_stride;
280 s[2] = _mm256_loadu_si256((__m256i *)(src + 0 * 16));
281 s[3] = _mm256_loadu_si256((__m256i *)(src + 1 * 16));
282 src += src_stride;
283 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[0]);
284 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[1]);
285 dst += dst_stride;
286 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[2]);
287 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[3]);
288 dst += dst_stride;
289 h -= 2;
290 } while (h);
291 } else if (w == 64) {
292 do {
293 copy_64(src, dst);
294 src += src_stride;
295 dst += dst_stride;
296 copy_64(src, dst);
297 src += src_stride;
298 dst += dst_stride;
299 h -= 2;
300 } while (h);
301 } else {
302 do {
303 copy_128(src, dst);
304 src += src_stride;
305 dst += dst_stride;
306 copy_128(src, dst);
307 src += src_stride;
308 dst += dst_stride;
309 h -= 2;
310 } while (h);
311 }
312 }
313