1 /*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <immintrin.h>
13
14 #include "config/av1_rtcd.h"
15
16 #include "aom_dsp/x86/convolve_avx2.h"
17 #include "aom_dsp/x86/convolve_common_intrin.h"
18 #include "aom_dsp/aom_dsp_common.h"
19 #include "aom_dsp/aom_filter.h"
20 #include "aom_dsp/x86/synonyms.h"
21 #include "av1/common/convolve.h"
22
av1_convolve_2d_sr_avx2(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_q4,const int subpel_y_q4,ConvolveParams * conv_params)23 void av1_convolve_2d_sr_avx2(const uint8_t *src, int src_stride, uint8_t *dst,
24 int dst_stride, int w, int h,
25 const InterpFilterParams *filter_params_x,
26 const InterpFilterParams *filter_params_y,
27 const int subpel_x_q4, const int subpel_y_q4,
28 ConvolveParams *conv_params) {
29 const int bd = 8;
30
31 DECLARE_ALIGNED(32, int16_t, im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * 8]);
32 int im_h = h + filter_params_y->taps - 1;
33 int im_stride = 8;
34 int i, j;
35 const int fo_vert = filter_params_y->taps / 2 - 1;
36 const int fo_horiz = filter_params_x->taps / 2 - 1;
37 const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
38
39 const int bits =
40 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1;
41 const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
42
43 __m256i filt[4], coeffs_h[4], coeffs_v[4];
44
45 assert(conv_params->round_0 > 0);
46
47 filt[0] = _mm256_load_si256((__m256i const *)filt_global_avx2);
48 filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
49 filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
50 filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
51
52 prepare_coeffs_lowbd(filter_params_x, subpel_x_q4, coeffs_h);
53 prepare_coeffs(filter_params_y, subpel_y_q4, coeffs_v);
54
55 const __m256i round_const_h = _mm256_set1_epi16(
56 ((1 << (conv_params->round_0 - 1)) >> 1) + (1 << (bd + FILTER_BITS - 2)));
57 const __m128i round_shift_h = _mm_cvtsi32_si128(conv_params->round_0 - 1);
58
59 const __m256i sum_round_v = _mm256_set1_epi32(
60 (1 << offset_bits) + ((1 << conv_params->round_1) >> 1));
61 const __m128i sum_shift_v = _mm_cvtsi32_si128(conv_params->round_1);
62
63 const __m256i round_const_v = _mm256_set1_epi32(
64 ((1 << bits) >> 1) - (1 << (offset_bits - conv_params->round_1)) -
65 ((1 << (offset_bits - conv_params->round_1)) >> 1));
66 const __m128i round_shift_v = _mm_cvtsi32_si128(bits);
67
68 for (j = 0; j < w; j += 8) {
69 for (i = 0; i < im_h; i += 2) {
70 __m256i data = _mm256_castsi128_si256(
71 _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));
72
73 // Load the next line
74 if (i + 1 < im_h)
75 data = _mm256_inserti128_si256(
76 data,
77 _mm_loadu_si128(
78 (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),
79 1);
80
81 __m256i res = convolve_lowbd_x(data, coeffs_h, filt);
82
83 res =
84 _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h);
85
86 _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
87 }
88
89 /* Vertical filter */
90 {
91 __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));
92 __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));
93 __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));
94 __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));
95 __m256i src_4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));
96 __m256i src_5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));
97
98 __m256i s[8];
99 s[0] = _mm256_unpacklo_epi16(src_0, src_1);
100 s[1] = _mm256_unpacklo_epi16(src_2, src_3);
101 s[2] = _mm256_unpacklo_epi16(src_4, src_5);
102
103 s[4] = _mm256_unpackhi_epi16(src_0, src_1);
104 s[5] = _mm256_unpackhi_epi16(src_2, src_3);
105 s[6] = _mm256_unpackhi_epi16(src_4, src_5);
106
107 for (i = 0; i < h; i += 2) {
108 const int16_t *data = &im_block[i * im_stride];
109
110 const __m256i s6 =
111 _mm256_loadu_si256((__m256i *)(data + 6 * im_stride));
112 const __m256i s7 =
113 _mm256_loadu_si256((__m256i *)(data + 7 * im_stride));
114
115 s[3] = _mm256_unpacklo_epi16(s6, s7);
116 s[7] = _mm256_unpackhi_epi16(s6, s7);
117
118 __m256i res_a = convolve(s, coeffs_v);
119 __m256i res_b = convolve(s + 4, coeffs_v);
120
121 // Combine V round and 2F-H-V round into a single rounding
122 res_a =
123 _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);
124 res_b =
125 _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);
126
127 const __m256i res_a_round = _mm256_sra_epi32(
128 _mm256_add_epi32(res_a, round_const_v), round_shift_v);
129 const __m256i res_b_round = _mm256_sra_epi32(
130 _mm256_add_epi32(res_b, round_const_v), round_shift_v);
131
132 /* rounding code */
133 // 16 bit conversion
134 const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
135 // 8 bit conversion and saturation to uint8
136 const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);
137
138 const __m128i res_0 = _mm256_castsi256_si128(res_8b);
139 const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
140
141 // Store values into the destination buffer
142 __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
143 __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];
144 if (w - j > 4) {
145 _mm_storel_epi64(p_0, res_0);
146 _mm_storel_epi64(p_1, res_1);
147 } else if (w == 4) {
148 xx_storel_32(p_0, res_0);
149 xx_storel_32(p_1, res_1);
150 } else {
151 *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
152 *(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
153 }
154
155 s[0] = s[1];
156 s[1] = s[2];
157 s[2] = s[3];
158
159 s[4] = s[5];
160 s[5] = s[6];
161 s[6] = s[7];
162 }
163 }
164 }
165 }
166
copy_128(const uint8_t * src,uint8_t * dst)167 static INLINE void copy_128(const uint8_t *src, uint8_t *dst) {
168 __m256i s[4];
169 s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
170 s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
171 s[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 32));
172 s[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 32));
173 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]);
174 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]);
175 _mm256_storeu_si256((__m256i *)(dst + 2 * 32), s[2]);
176 _mm256_storeu_si256((__m256i *)(dst + 3 * 32), s[3]);
177 }
178
av1_convolve_2d_copy_sr_avx2(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_q4,const int subpel_y_q4,ConvolveParams * conv_params)179 void av1_convolve_2d_copy_sr_avx2(const uint8_t *src, int src_stride,
180 uint8_t *dst, int dst_stride, int w, int h,
181 const InterpFilterParams *filter_params_x,
182 const InterpFilterParams *filter_params_y,
183 const int subpel_x_q4, const int subpel_y_q4,
184 ConvolveParams *conv_params) {
185 (void)filter_params_x;
186 (void)filter_params_y;
187 (void)subpel_x_q4;
188 (void)subpel_y_q4;
189 (void)conv_params;
190
191 if (w >= 16) {
192 assert(!((intptr_t)dst % 16));
193 assert(!(dst_stride % 16));
194 }
195
196 if (w == 2) {
197 do {
198 memcpy(dst, src, 2 * sizeof(*src));
199 src += src_stride;
200 dst += dst_stride;
201 memcpy(dst, src, 2 * sizeof(*src));
202 src += src_stride;
203 dst += dst_stride;
204 h -= 2;
205 } while (h);
206 } else if (w == 4) {
207 do {
208 memcpy(dst, src, 4 * sizeof(*src));
209 src += src_stride;
210 dst += dst_stride;
211 memcpy(dst, src, 4 * sizeof(*src));
212 src += src_stride;
213 dst += dst_stride;
214 h -= 2;
215 } while (h);
216 } else if (w == 8) {
217 do {
218 __m128i s[2];
219 s[0] = _mm_loadl_epi64((__m128i *)src);
220 src += src_stride;
221 s[1] = _mm_loadl_epi64((__m128i *)src);
222 src += src_stride;
223 _mm_storel_epi64((__m128i *)dst, s[0]);
224 dst += dst_stride;
225 _mm_storel_epi64((__m128i *)dst, s[1]);
226 dst += dst_stride;
227 h -= 2;
228 } while (h);
229 } else if (w == 16) {
230 do {
231 __m128i s[2];
232 s[0] = _mm_loadu_si128((__m128i *)src);
233 src += src_stride;
234 s[1] = _mm_loadu_si128((__m128i *)src);
235 src += src_stride;
236 _mm_store_si128((__m128i *)dst, s[0]);
237 dst += dst_stride;
238 _mm_store_si128((__m128i *)dst, s[1]);
239 dst += dst_stride;
240 h -= 2;
241 } while (h);
242 } else if (w == 32) {
243 do {
244 __m256i s[2];
245 s[0] = _mm256_loadu_si256((__m256i *)src);
246 src += src_stride;
247 s[1] = _mm256_loadu_si256((__m256i *)src);
248 src += src_stride;
249 _mm256_storeu_si256((__m256i *)dst, s[0]);
250 dst += dst_stride;
251 _mm256_storeu_si256((__m256i *)dst, s[1]);
252 dst += dst_stride;
253 h -= 2;
254 } while (h);
255 } else if (w == 64) {
256 do {
257 __m256i s[4];
258 s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
259 s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
260 src += src_stride;
261 s[2] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
262 s[3] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
263 src += src_stride;
264 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]);
265 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]);
266 dst += dst_stride;
267 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[2]);
268 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[3]);
269 dst += dst_stride;
270 h -= 2;
271 } while (h);
272 } else {
273 do {
274 copy_128(src, dst);
275 src += src_stride;
276 dst += dst_stride;
277 copy_128(src, dst);
278 src += src_stride;
279 dst += dst_stride;
280 h -= 2;
281 } while (h);
282 }
283 }
284