1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <emmintrin.h> // SSE2
14
15 #include "config/aom_config.h"
16 #include "config/aom_dsp_rtcd.h"
17 #include "config/av1_rtcd.h"
18
19 #include "aom_dsp/blend.h"
20 #include "aom_dsp/x86/mem_sse2.h"
21 #include "aom_dsp/x86/synonyms.h"
22
23 #include "aom_ports/mem.h"
24
25 #include "av1/common/av1_common_int.h"
26 #include "av1/common/filter.h"
27 #include "av1/common/reconinter.h"
28 #include "av1/encoder/reconinter_enc.h"
29
aom_get_mb_ss_sse2(const int16_t * src)30 unsigned int aom_get_mb_ss_sse2(const int16_t *src) {
31 __m128i vsum = _mm_setzero_si128();
32 int i;
33
34 for (i = 0; i < 32; ++i) {
35 const __m128i v = xx_loadu_128(src);
36 vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v));
37 src += 8;
38 }
39
40 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
41 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
42 return _mm_cvtsi128_si32(vsum);
43 }
44
load4x2_sse2(const uint8_t * const p,const int stride)45 static INLINE __m128i load4x2_sse2(const uint8_t *const p, const int stride) {
46 const __m128i p0 = _mm_cvtsi32_si128(loadu_uint32(p + 0 * stride));
47 const __m128i p1 = _mm_cvtsi32_si128(loadu_uint32(p + 1 * stride));
48 return _mm_unpacklo_epi8(_mm_unpacklo_epi32(p0, p1), _mm_setzero_si128());
49 }
50
load8_8to16_sse2(const uint8_t * const p)51 static INLINE __m128i load8_8to16_sse2(const uint8_t *const p) {
52 const __m128i p0 = _mm_loadl_epi64((const __m128i *)p);
53 return _mm_unpacklo_epi8(p0, _mm_setzero_si128());
54 }
55
56 // Accumulate 4 32bit numbers in val to 1 32bit number
add32x4_sse2(__m128i val)57 static INLINE unsigned int add32x4_sse2(__m128i val) {
58 val = _mm_add_epi32(val, _mm_srli_si128(val, 8));
59 val = _mm_add_epi32(val, _mm_srli_si128(val, 4));
60 return _mm_cvtsi128_si32(val);
61 }
62
63 // Accumulate 8 16bit in sum to 4 32bit number
sum_to_32bit_sse2(const __m128i sum)64 static INLINE __m128i sum_to_32bit_sse2(const __m128i sum) {
65 const __m128i sum_lo = _mm_srai_epi32(_mm_unpacklo_epi16(sum, sum), 16);
66 const __m128i sum_hi = _mm_srai_epi32(_mm_unpackhi_epi16(sum, sum), 16);
67 return _mm_add_epi32(sum_lo, sum_hi);
68 }
69
variance_kernel_sse2(const __m128i src,const __m128i ref,__m128i * const sse,__m128i * const sum)70 static INLINE void variance_kernel_sse2(const __m128i src, const __m128i ref,
71 __m128i *const sse,
72 __m128i *const sum) {
73 const __m128i diff = _mm_sub_epi16(src, ref);
74 *sse = _mm_add_epi32(*sse, _mm_madd_epi16(diff, diff));
75 *sum = _mm_add_epi16(*sum, diff);
76 }
77
78 // Can handle 128 pixels' diff sum (such as 8x16 or 16x8)
79 // Slightly faster than variance_final_256_pel_sse2()
80 // diff sum of 128 pixels can still fit in 16bit integer
variance_final_128_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)81 static INLINE void variance_final_128_pel_sse2(__m128i vsse, __m128i vsum,
82 unsigned int *const sse,
83 int *const sum) {
84 *sse = add32x4_sse2(vsse);
85
86 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
87 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
88 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
89 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
90 }
91
92 // Can handle 256 pixels' diff sum (such as 16x16)
variance_final_256_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)93 static INLINE void variance_final_256_pel_sse2(__m128i vsse, __m128i vsum,
94 unsigned int *const sse,
95 int *const sum) {
96 *sse = add32x4_sse2(vsse);
97
98 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
99 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
100 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
101 *sum += (int16_t)_mm_extract_epi16(vsum, 1);
102 }
103
104 // Can handle 512 pixels' diff sum (such as 16x32 or 32x16)
variance_final_512_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)105 static INLINE void variance_final_512_pel_sse2(__m128i vsse, __m128i vsum,
106 unsigned int *const sse,
107 int *const sum) {
108 *sse = add32x4_sse2(vsse);
109
110 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
111 vsum = _mm_unpacklo_epi16(vsum, vsum);
112 vsum = _mm_srai_epi32(vsum, 16);
113 *sum = add32x4_sse2(vsum);
114 }
115
116 // Can handle 1024 pixels' diff sum (such as 32x32)
variance_final_1024_pel_sse2(__m128i vsse,__m128i vsum,unsigned int * const sse,int * const sum)117 static INLINE void variance_final_1024_pel_sse2(__m128i vsse, __m128i vsum,
118 unsigned int *const sse,
119 int *const sum) {
120 *sse = add32x4_sse2(vsse);
121
122 vsum = sum_to_32bit_sse2(vsum);
123 *sum = add32x4_sse2(vsum);
124 }
125
variance4_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)126 static INLINE void variance4_sse2(const uint8_t *src, const int src_stride,
127 const uint8_t *ref, const int ref_stride,
128 const int h, __m128i *const sse,
129 __m128i *const sum) {
130 assert(h <= 256); // May overflow for larger height.
131 *sum = _mm_setzero_si128();
132
133 for (int i = 0; i < h; i += 2) {
134 const __m128i s = load4x2_sse2(src, src_stride);
135 const __m128i r = load4x2_sse2(ref, ref_stride);
136
137 variance_kernel_sse2(s, r, sse, sum);
138 src += 2 * src_stride;
139 ref += 2 * ref_stride;
140 }
141 }
142
variance8_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)143 static INLINE void variance8_sse2(const uint8_t *src, const int src_stride,
144 const uint8_t *ref, const int ref_stride,
145 const int h, __m128i *const sse,
146 __m128i *const sum) {
147 assert(h <= 128); // May overflow for larger height.
148 *sum = _mm_setzero_si128();
149 *sse = _mm_setzero_si128();
150 for (int i = 0; i < h; i++) {
151 const __m128i s = load8_8to16_sse2(src);
152 const __m128i r = load8_8to16_sse2(ref);
153
154 variance_kernel_sse2(s, r, sse, sum);
155 src += src_stride;
156 ref += ref_stride;
157 }
158 }
159
variance16_kernel_sse2(const uint8_t * const src,const uint8_t * const ref,__m128i * const sse,__m128i * const sum)160 static INLINE void variance16_kernel_sse2(const uint8_t *const src,
161 const uint8_t *const ref,
162 __m128i *const sse,
163 __m128i *const sum) {
164 const __m128i zero = _mm_setzero_si128();
165 const __m128i s = _mm_loadu_si128((const __m128i *)src);
166 const __m128i r = _mm_loadu_si128((const __m128i *)ref);
167 const __m128i src0 = _mm_unpacklo_epi8(s, zero);
168 const __m128i ref0 = _mm_unpacklo_epi8(r, zero);
169 const __m128i src1 = _mm_unpackhi_epi8(s, zero);
170 const __m128i ref1 = _mm_unpackhi_epi8(r, zero);
171
172 variance_kernel_sse2(src0, ref0, sse, sum);
173 variance_kernel_sse2(src1, ref1, sse, sum);
174 }
175
variance16_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)176 static INLINE void variance16_sse2(const uint8_t *src, const int src_stride,
177 const uint8_t *ref, const int ref_stride,
178 const int h, __m128i *const sse,
179 __m128i *const sum) {
180 assert(h <= 64); // May overflow for larger height.
181 *sum = _mm_setzero_si128();
182
183 for (int i = 0; i < h; ++i) {
184 variance16_kernel_sse2(src, ref, sse, sum);
185 src += src_stride;
186 ref += ref_stride;
187 }
188 }
189
variance32_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)190 static INLINE void variance32_sse2(const uint8_t *src, const int src_stride,
191 const uint8_t *ref, const int ref_stride,
192 const int h, __m128i *const sse,
193 __m128i *const sum) {
194 assert(h <= 32); // May overflow for larger height.
195 // Don't initialize sse here since it's an accumulation.
196 *sum = _mm_setzero_si128();
197
198 for (int i = 0; i < h; ++i) {
199 variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
200 variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
201 src += src_stride;
202 ref += ref_stride;
203 }
204 }
205
variance64_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)206 static INLINE void variance64_sse2(const uint8_t *src, const int src_stride,
207 const uint8_t *ref, const int ref_stride,
208 const int h, __m128i *const sse,
209 __m128i *const sum) {
210 assert(h <= 16); // May overflow for larger height.
211 *sum = _mm_setzero_si128();
212
213 for (int i = 0; i < h; ++i) {
214 variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
215 variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
216 variance16_kernel_sse2(src + 32, ref + 32, sse, sum);
217 variance16_kernel_sse2(src + 48, ref + 48, sse, sum);
218 src += src_stride;
219 ref += ref_stride;
220 }
221 }
222
variance128_sse2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m128i * const sse,__m128i * const sum)223 static INLINE void variance128_sse2(const uint8_t *src, const int src_stride,
224 const uint8_t *ref, const int ref_stride,
225 const int h, __m128i *const sse,
226 __m128i *const sum) {
227 assert(h <= 8); // May overflow for larger height.
228 *sum = _mm_setzero_si128();
229
230 for (int i = 0; i < h; ++i) {
231 for (int j = 0; j < 4; ++j) {
232 const int offset0 = j << 5;
233 const int offset1 = offset0 + 16;
234 variance16_kernel_sse2(src + offset0, ref + offset0, sse, sum);
235 variance16_kernel_sse2(src + offset1, ref + offset1, sse, sum);
236 }
237 src += src_stride;
238 ref += ref_stride;
239 }
240 }
241
aom_get8x8var_sse2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse,int * sum)242 void aom_get8x8var_sse2(const uint8_t *src_ptr, int src_stride,
243 const uint8_t *ref_ptr, int ref_stride,
244 unsigned int *sse, int *sum) {
245 __m128i vsse, vsum;
246 variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
247 variance_final_128_pel_sse2(vsse, vsum, sse, sum);
248 }
249
250 #define AOM_VAR_NO_LOOP_SSE2(bw, bh, bits, max_pixels) \
251 unsigned int aom_variance##bw##x##bh##_sse2( \
252 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
253 unsigned int *sse) { \
254 __m128i vsse = _mm_setzero_si128(); \
255 __m128i vsum; \
256 int sum = 0; \
257 variance##bw##_sse2(src, src_stride, ref, ref_stride, bh, &vsse, &vsum); \
258 variance_final_##max_pixels##_pel_sse2(vsse, vsum, sse, &sum); \
259 assert(sum <= 255 * bw * bh); \
260 assert(sum >= -255 * bw * bh); \
261 return *sse - (uint32_t)(((int64_t)sum * sum) >> bits); \
262 }
263
264 AOM_VAR_NO_LOOP_SSE2(4, 4, 4, 128);
265 AOM_VAR_NO_LOOP_SSE2(4, 8, 5, 128);
266 AOM_VAR_NO_LOOP_SSE2(4, 16, 6, 128);
267
268 AOM_VAR_NO_LOOP_SSE2(8, 4, 5, 128);
269 AOM_VAR_NO_LOOP_SSE2(8, 8, 6, 128);
270 AOM_VAR_NO_LOOP_SSE2(8, 16, 7, 128);
271
272 AOM_VAR_NO_LOOP_SSE2(16, 8, 7, 128);
273 AOM_VAR_NO_LOOP_SSE2(16, 16, 8, 256);
274 AOM_VAR_NO_LOOP_SSE2(16, 32, 9, 512);
275
276 AOM_VAR_NO_LOOP_SSE2(32, 8, 8, 256);
277 AOM_VAR_NO_LOOP_SSE2(32, 16, 9, 512);
278 AOM_VAR_NO_LOOP_SSE2(32, 32, 10, 1024);
279
280 #if !CONFIG_REALTIME_ONLY
281 AOM_VAR_NO_LOOP_SSE2(16, 4, 6, 128);
282 AOM_VAR_NO_LOOP_SSE2(8, 32, 8, 256);
283 AOM_VAR_NO_LOOP_SSE2(16, 64, 10, 1024);
284 #endif
285
286 #define AOM_VAR_LOOP_SSE2(bw, bh, bits, uh) \
287 unsigned int aom_variance##bw##x##bh##_sse2( \
288 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
289 unsigned int *sse) { \
290 __m128i vsse = _mm_setzero_si128(); \
291 __m128i vsum = _mm_setzero_si128(); \
292 for (int i = 0; i < (bh / uh); ++i) { \
293 __m128i vsum16; \
294 variance##bw##_sse2(src, src_stride, ref, ref_stride, uh, &vsse, \
295 &vsum16); \
296 vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16)); \
297 src += (src_stride * uh); \
298 ref += (ref_stride * uh); \
299 } \
300 *sse = add32x4_sse2(vsse); \
301 int sum = add32x4_sse2(vsum); \
302 assert(sum <= 255 * bw * bh); \
303 assert(sum >= -255 * bw * bh); \
304 return *sse - (uint32_t)(((int64_t)sum * sum) >> bits); \
305 }
306
307 AOM_VAR_LOOP_SSE2(32, 64, 11, 32); // 32x32 * ( 64/32 )
308
309 AOM_VAR_LOOP_SSE2(64, 32, 11, 16); // 64x16 * ( 32/16 )
310 AOM_VAR_LOOP_SSE2(64, 64, 12, 16); // 64x16 * ( 64/16 )
311 AOM_VAR_LOOP_SSE2(64, 128, 13, 16); // 64x16 * ( 128/16 )
312
313 AOM_VAR_LOOP_SSE2(128, 64, 13, 8); // 128x8 * ( 64/8 )
314 AOM_VAR_LOOP_SSE2(128, 128, 14, 8); // 128x8 * ( 128/8 )
315
316 #if !CONFIG_REALTIME_ONLY
317 AOM_VAR_NO_LOOP_SSE2(64, 16, 10, 1024);
318 #endif
319
aom_mse8x8_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)320 unsigned int aom_mse8x8_sse2(const uint8_t *src, int src_stride,
321 const uint8_t *ref, int ref_stride,
322 unsigned int *sse) {
323 aom_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
324 return *sse;
325 }
326
aom_mse8x16_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)327 unsigned int aom_mse8x16_sse2(const uint8_t *src, int src_stride,
328 const uint8_t *ref, int ref_stride,
329 unsigned int *sse) {
330 aom_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
331 return *sse;
332 }
333
aom_mse16x8_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)334 unsigned int aom_mse16x8_sse2(const uint8_t *src, int src_stride,
335 const uint8_t *ref, int ref_stride,
336 unsigned int *sse) {
337 aom_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
338 return *sse;
339 }
340
aom_mse16x16_sse2(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,unsigned int * sse)341 unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
342 const uint8_t *ref, int ref_stride,
343 unsigned int *sse) {
344 aom_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
345 return *sse;
346 }
347
348 // The 2 unused parameters are place holders for PIC enabled build.
349 // These definitions are for functions defined in subpel_variance.asm
350 #define DECL(w, opt) \
351 int aom_sub_pixel_variance##w##xh_##opt( \
352 const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
353 const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
354 void *unused0, void *unused)
355 #define DECLS(opt) \
356 DECL(4, opt); \
357 DECL(8, opt); \
358 DECL(16, opt)
359
360 DECLS(sse2);
361 DECLS(ssse3);
362 #undef DECLS
363 #undef DECL
364
365 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
366 unsigned int aom_sub_pixel_variance##w##x##h##_##opt( \
367 const uint8_t *src, int src_stride, int x_offset, int y_offset, \
368 const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) { \
369 /*Avoid overflow in helper by capping height.*/ \
370 const int hf = AOMMIN(h, 64); \
371 unsigned int sse = 0; \
372 int se = 0; \
373 for (int i = 0; i < (w / wf); ++i) { \
374 const uint8_t *src_ptr = src; \
375 const uint8_t *dst_ptr = dst; \
376 for (int j = 0; j < (h / hf); ++j) { \
377 unsigned int sse2; \
378 const int se2 = aom_sub_pixel_variance##wf##xh_##opt( \
379 src_ptr, src_stride, x_offset, y_offset, dst_ptr, dst_stride, hf, \
380 &sse2, NULL, NULL); \
381 dst_ptr += hf * dst_stride; \
382 src_ptr += hf * src_stride; \
383 se += se2; \
384 sse += sse2; \
385 } \
386 src += wf; \
387 dst += wf; \
388 } \
389 *sse_ptr = sse; \
390 return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
391 }
392
393 #if !CONFIG_REALTIME_ONLY
394 #define FNS(opt) \
395 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
396 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
397 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
398 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
399 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
400 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
401 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
402 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
403 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
404 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
405 FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
406 FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
407 FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
408 FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
409 FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
410 FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t)); \
411 FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
412 FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
413 FN(8, 32, 8, 3, 5, opt, (uint32_t), (int64_t)); \
414 FN(32, 8, 16, 5, 3, opt, (uint32_t), (int64_t)); \
415 FN(16, 64, 16, 4, 6, opt, (int64_t), (int64_t)); \
416 FN(64, 16, 16, 6, 4, opt, (int64_t), (int64_t))
417 #else
418 #define FNS(opt) \
419 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
420 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
421 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
422 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
423 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
424 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
425 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
426 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
427 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
428 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
429 FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
430 FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
431 FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
432 FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
433 FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
434 FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t));
435 #endif
436
437 FNS(sse2);
438 FNS(ssse3);
439
440 #undef FNS
441 #undef FN
442
443 // The 2 unused parameters are place holders for PIC enabled build.
444 #define DECL(w, opt) \
445 int aom_sub_pixel_avg_variance##w##xh_##opt( \
446 const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
447 const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
448 ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
449 void *unused)
450 #define DECLS(opt) \
451 DECL(4, opt); \
452 DECL(8, opt); \
453 DECL(16, opt)
454
455 DECLS(sse2);
456 DECLS(ssse3);
457 #undef DECL
458 #undef DECLS
459
460 #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
461 unsigned int aom_sub_pixel_avg_variance##w##x##h##_##opt( \
462 const uint8_t *src, int src_stride, int x_offset, int y_offset, \
463 const uint8_t *dst, int dst_stride, unsigned int *sse_ptr, \
464 const uint8_t *sec) { \
465 /*Avoid overflow in helper by capping height.*/ \
466 const int hf = AOMMIN(h, 64); \
467 unsigned int sse = 0; \
468 int se = 0; \
469 for (int i = 0; i < (w / wf); ++i) { \
470 const uint8_t *src_ptr = src; \
471 const uint8_t *dst_ptr = dst; \
472 const uint8_t *sec_ptr = sec; \
473 for (int j = 0; j < (h / hf); ++j) { \
474 unsigned int sse2; \
475 const int se2 = aom_sub_pixel_avg_variance##wf##xh_##opt( \
476 src_ptr, src_stride, x_offset, y_offset, dst_ptr, dst_stride, \
477 sec_ptr, w, hf, &sse2, NULL, NULL); \
478 dst_ptr += hf * dst_stride; \
479 src_ptr += hf * src_stride; \
480 sec_ptr += hf * w; \
481 se += se2; \
482 sse += sse2; \
483 } \
484 src += wf; \
485 dst += wf; \
486 sec += wf; \
487 } \
488 *sse_ptr = sse; \
489 return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
490 }
491
492 #if !CONFIG_REALTIME_ONLY
493 #define FNS(opt) \
494 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
495 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
496 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
497 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
498 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
499 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
500 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
501 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
502 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
503 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
504 FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
505 FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
506 FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
507 FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
508 FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
509 FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t)); \
510 FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
511 FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
512 FN(8, 32, 8, 3, 5, opt, (uint32_t), (int64_t)); \
513 FN(32, 8, 16, 5, 3, opt, (uint32_t), (int64_t)); \
514 FN(16, 64, 16, 4, 6, opt, (int64_t), (int64_t)); \
515 FN(64, 16, 16, 6, 4, opt, (int64_t), (int64_t))
516 #else
517 #define FNS(opt) \
518 FN(128, 128, 16, 7, 7, opt, (int64_t), (int64_t)); \
519 FN(128, 64, 16, 7, 6, opt, (int64_t), (int64_t)); \
520 FN(64, 128, 16, 6, 7, opt, (int64_t), (int64_t)); \
521 FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
522 FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
523 FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
524 FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
525 FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
526 FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
527 FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
528 FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
529 FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
530 FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
531 FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
532 FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
533 FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t));
534 #endif
535
536 FNS(sse2);
537 FNS(ssse3);
538
539 #undef FNS
540 #undef FN
541
aom_upsampled_pred_sse2(MACROBLOCKD * xd,const struct AV1Common * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,int subpel_search)542 void aom_upsampled_pred_sse2(MACROBLOCKD *xd, const struct AV1Common *const cm,
543 int mi_row, int mi_col, const MV *const mv,
544 uint8_t *comp_pred, int width, int height,
545 int subpel_x_q3, int subpel_y_q3,
546 const uint8_t *ref, int ref_stride,
547 int subpel_search) {
548 // expect xd == NULL only in tests
549 if (xd != NULL) {
550 const MB_MODE_INFO *mi = xd->mi[0];
551 const int ref_num = 0;
552 const int is_intrabc = is_intrabc_block(mi);
553 const struct scale_factors *const sf =
554 is_intrabc ? &cm->sf_identity : xd->block_ref_scale_factors[ref_num];
555 const int is_scaled = av1_is_scaled(sf);
556
557 if (is_scaled) {
558 int plane = 0;
559 const int mi_x = mi_col * MI_SIZE;
560 const int mi_y = mi_row * MI_SIZE;
561 const struct macroblockd_plane *const pd = &xd->plane[plane];
562 const struct buf_2d *const dst_buf = &pd->dst;
563 const struct buf_2d *const pre_buf =
564 is_intrabc ? dst_buf : &pd->pre[ref_num];
565
566 InterPredParams inter_pred_params;
567 inter_pred_params.conv_params = get_conv_params(0, plane, xd->bd);
568 const int_interpfilters filters =
569 av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
570 av1_init_inter_params(
571 &inter_pred_params, width, height, mi_y >> pd->subsampling_y,
572 mi_x >> pd->subsampling_x, pd->subsampling_x, pd->subsampling_y,
573 xd->bd, is_cur_buf_hbd(xd), is_intrabc, sf, pre_buf, filters);
574 av1_enc_build_one_inter_predictor(comp_pred, width, mv,
575 &inter_pred_params);
576 return;
577 }
578 }
579
580 const InterpFilterParams *filter = av1_get_filter(subpel_search);
581 // (TODO:yunqing) 2-tap case uses 4-tap functions since there is no SIMD for
582 // 2-tap yet.
583 int filter_taps = (subpel_search <= USE_4_TAPS) ? 4 : SUBPEL_TAPS;
584
585 if (!subpel_x_q3 && !subpel_y_q3) {
586 if (width >= 16) {
587 int i;
588 assert(!(width & 15));
589 /*Read 16 pixels one row at a time.*/
590 for (i = 0; i < height; i++) {
591 int j;
592 for (j = 0; j < width; j += 16) {
593 xx_storeu_128(comp_pred, xx_loadu_128(ref));
594 comp_pred += 16;
595 ref += 16;
596 }
597 ref += ref_stride - width;
598 }
599 } else if (width >= 8) {
600 int i;
601 assert(!(width & 7));
602 assert(!(height & 1));
603 /*Read 8 pixels two rows at a time.*/
604 for (i = 0; i < height; i += 2) {
605 __m128i s0 = xx_loadl_64(ref + 0 * ref_stride);
606 __m128i s1 = xx_loadl_64(ref + 1 * ref_stride);
607 xx_storeu_128(comp_pred, _mm_unpacklo_epi64(s0, s1));
608 comp_pred += 16;
609 ref += 2 * ref_stride;
610 }
611 } else {
612 int i;
613 assert(!(width & 3));
614 assert(!(height & 3));
615 /*Read 4 pixels four rows at a time.*/
616 for (i = 0; i < height; i++) {
617 const __m128i row0 = xx_loadl_64(ref + 0 * ref_stride);
618 const __m128i row1 = xx_loadl_64(ref + 1 * ref_stride);
619 const __m128i row2 = xx_loadl_64(ref + 2 * ref_stride);
620 const __m128i row3 = xx_loadl_64(ref + 3 * ref_stride);
621 const __m128i reg = _mm_unpacklo_epi64(_mm_unpacklo_epi32(row0, row1),
622 _mm_unpacklo_epi32(row2, row3));
623 xx_storeu_128(comp_pred, reg);
624 comp_pred += 16;
625 ref += 4 * ref_stride;
626 }
627 }
628 } else if (!subpel_y_q3) {
629 const int16_t *const kernel =
630 av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
631 aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL, -1,
632 width, height);
633 } else if (!subpel_x_q3) {
634 const int16_t *const kernel =
635 av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
636 aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel, 16,
637 width, height);
638 } else {
639 DECLARE_ALIGNED(16, uint8_t,
640 temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
641 const int16_t *const kernel_x =
642 av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
643 const int16_t *const kernel_y =
644 av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
645 const uint8_t *ref_start = ref - ref_stride * ((filter_taps >> 1) - 1);
646 uint8_t *temp_start_horiz = (subpel_search <= USE_4_TAPS)
647 ? temp + (filter_taps >> 1) * MAX_SB_SIZE
648 : temp;
649 uint8_t *temp_start_vert = temp + MAX_SB_SIZE * ((filter->taps >> 1) - 1);
650 int intermediate_height =
651 (((height - 1) * 8 + subpel_y_q3) >> 3) + filter_taps;
652 assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
653 aom_convolve8_horiz(ref_start, ref_stride, temp_start_horiz, MAX_SB_SIZE,
654 kernel_x, 16, NULL, -1, width, intermediate_height);
655 aom_convolve8_vert(temp_start_vert, MAX_SB_SIZE, comp_pred, width, NULL, -1,
656 kernel_y, 16, width, height);
657 }
658 }
659
aom_comp_avg_upsampled_pred_sse2(MACROBLOCKD * xd,const struct AV1Common * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,const uint8_t * pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,int subpel_search)660 void aom_comp_avg_upsampled_pred_sse2(
661 MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
662 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
663 int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
664 int ref_stride, int subpel_search) {
665 int n;
666 int i;
667 aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
668 subpel_x_q3, subpel_y_q3, ref, ref_stride, subpel_search);
669 /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
670 assert(!(width * height & 15));
671 n = width * height >> 4;
672 for (i = 0; i < n; i++) {
673 __m128i s0 = xx_loadu_128(comp_pred);
674 __m128i p0 = xx_loadu_128(pred);
675 xx_storeu_128(comp_pred, _mm_avg_epu8(s0, p0));
676 comp_pred += 16;
677 pred += 16;
678 }
679 }
680
aom_comp_mask_upsampled_pred_sse2(MACROBLOCKD * xd,const AV1_COMMON * const cm,int mi_row,int mi_col,const MV * const mv,uint8_t * comp_pred,const uint8_t * pred,int width,int height,int subpel_x_q3,int subpel_y_q3,const uint8_t * ref,int ref_stride,const uint8_t * mask,int mask_stride,int invert_mask,int subpel_search)681 void aom_comp_mask_upsampled_pred_sse2(
682 MACROBLOCKD *xd, const AV1_COMMON *const cm, int mi_row, int mi_col,
683 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
684 int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
685 int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask,
686 int subpel_search) {
687 if (subpel_x_q3 | subpel_y_q3) {
688 aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
689 subpel_x_q3, subpel_y_q3, ref, ref_stride,
690 subpel_search);
691 ref = comp_pred;
692 ref_stride = width;
693 }
694 aom_comp_mask_pred(comp_pred, pred, width, height, ref, ref_stride, mask,
695 mask_stride, invert_mask);
696 }
697
highbd_comp_mask_pred_line_sse2(const __m128i s0,const __m128i s1,const __m128i a)698 static INLINE __m128i highbd_comp_mask_pred_line_sse2(const __m128i s0,
699 const __m128i s1,
700 const __m128i a) {
701 const __m128i alpha_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
702 const __m128i round_const =
703 _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
704 const __m128i a_inv = _mm_sub_epi16(alpha_max, a);
705
706 const __m128i s_lo = _mm_unpacklo_epi16(s0, s1);
707 const __m128i a_lo = _mm_unpacklo_epi16(a, a_inv);
708 const __m128i pred_lo = _mm_madd_epi16(s_lo, a_lo);
709 const __m128i pred_l = _mm_srai_epi32(_mm_add_epi32(pred_lo, round_const),
710 AOM_BLEND_A64_ROUND_BITS);
711
712 const __m128i s_hi = _mm_unpackhi_epi16(s0, s1);
713 const __m128i a_hi = _mm_unpackhi_epi16(a, a_inv);
714 const __m128i pred_hi = _mm_madd_epi16(s_hi, a_hi);
715 const __m128i pred_h = _mm_srai_epi32(_mm_add_epi32(pred_hi, round_const),
716 AOM_BLEND_A64_ROUND_BITS);
717
718 const __m128i comp = _mm_packs_epi32(pred_l, pred_h);
719
720 return comp;
721 }
722
aom_highbd_comp_mask_pred_sse2(uint8_t * comp_pred8,const uint8_t * pred8,int width,int height,const uint8_t * ref8,int ref_stride,const uint8_t * mask,int mask_stride,int invert_mask)723 void aom_highbd_comp_mask_pred_sse2(uint8_t *comp_pred8, const uint8_t *pred8,
724 int width, int height, const uint8_t *ref8,
725 int ref_stride, const uint8_t *mask,
726 int mask_stride, int invert_mask) {
727 int i = 0;
728 uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8);
729 uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
730 uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
731 const uint16_t *src0 = invert_mask ? pred : ref;
732 const uint16_t *src1 = invert_mask ? ref : pred;
733 const int stride0 = invert_mask ? width : ref_stride;
734 const int stride1 = invert_mask ? ref_stride : width;
735 const __m128i zero = _mm_setzero_si128();
736
737 if (width == 8) {
738 do {
739 const __m128i s0 = _mm_loadu_si128((const __m128i *)(src0));
740 const __m128i s1 = _mm_loadu_si128((const __m128i *)(src1));
741 const __m128i m_8 = _mm_loadl_epi64((const __m128i *)mask);
742 const __m128i m_16 = _mm_unpacklo_epi8(m_8, zero);
743
744 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m_16);
745
746 _mm_storeu_si128((__m128i *)comp_pred, comp);
747
748 src0 += stride0;
749 src1 += stride1;
750 mask += mask_stride;
751 comp_pred += width;
752 i += 1;
753 } while (i < height);
754 } else if (width == 16) {
755 do {
756 const __m128i s0 = _mm_loadu_si128((const __m128i *)(src0));
757 const __m128i s2 = _mm_loadu_si128((const __m128i *)(src0 + 8));
758 const __m128i s1 = _mm_loadu_si128((const __m128i *)(src1));
759 const __m128i s3 = _mm_loadu_si128((const __m128i *)(src1 + 8));
760
761 const __m128i m_8 = _mm_loadu_si128((const __m128i *)mask);
762 const __m128i m01_16 = _mm_unpacklo_epi8(m_8, zero);
763 const __m128i m23_16 = _mm_unpackhi_epi8(m_8, zero);
764
765 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m01_16);
766 const __m128i comp1 = highbd_comp_mask_pred_line_sse2(s2, s3, m23_16);
767
768 _mm_storeu_si128((__m128i *)comp_pred, comp);
769 _mm_storeu_si128((__m128i *)(comp_pred + 8), comp1);
770
771 src0 += stride0;
772 src1 += stride1;
773 mask += mask_stride;
774 comp_pred += width;
775 i += 1;
776 } while (i < height);
777 } else {
778 do {
779 for (int x = 0; x < width; x += 32) {
780 for (int j = 0; j < 2; j++) {
781 const __m128i s0 =
782 _mm_loadu_si128((const __m128i *)(src0 + x + j * 16));
783 const __m128i s2 =
784 _mm_loadu_si128((const __m128i *)(src0 + x + 8 + j * 16));
785 const __m128i s1 =
786 _mm_loadu_si128((const __m128i *)(src1 + x + j * 16));
787 const __m128i s3 =
788 _mm_loadu_si128((const __m128i *)(src1 + x + 8 + j * 16));
789
790 const __m128i m_8 =
791 _mm_loadu_si128((const __m128i *)(mask + x + j * 16));
792 const __m128i m01_16 = _mm_unpacklo_epi8(m_8, zero);
793 const __m128i m23_16 = _mm_unpackhi_epi8(m_8, zero);
794
795 const __m128i comp = highbd_comp_mask_pred_line_sse2(s0, s1, m01_16);
796 const __m128i comp1 = highbd_comp_mask_pred_line_sse2(s2, s3, m23_16);
797
798 _mm_storeu_si128((__m128i *)(comp_pred + j * 16), comp);
799 _mm_storeu_si128((__m128i *)(comp_pred + 8 + j * 16), comp1);
800 }
801 comp_pred += 32;
802 }
803 src0 += stride0;
804 src1 += stride1;
805 mask += mask_stride;
806 i += 1;
807 } while (i < height);
808 }
809 }
810
aom_mse_4xh_16bit_sse2(uint8_t * dst,int dstride,uint16_t * src,int sstride,int h)811 uint64_t aom_mse_4xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
812 int sstride, int h) {
813 uint64_t sum = 0;
814 __m128i dst0_8x8, dst1_8x8, dst_16x8;
815 __m128i src0_16x4, src1_16x4, src_16x8;
816 __m128i res0_32x4, res1_32x4, res0_64x4, res1_64x4, res2_64x4, res3_64x4;
817 __m128i sub_result_16x8;
818 const __m128i zeros = _mm_setzero_si128();
819 __m128i square_result = _mm_setzero_si128();
820 for (int i = 0; i < h; i += 2) {
821 dst0_8x8 = _mm_cvtsi32_si128(*(uint32_t const *)(&dst[(i + 0) * dstride]));
822 dst1_8x8 = _mm_cvtsi32_si128(*(uint32_t const *)(&dst[(i + 1) * dstride]));
823 dst_16x8 = _mm_unpacklo_epi8(_mm_unpacklo_epi32(dst0_8x8, dst1_8x8), zeros);
824
825 src0_16x4 = _mm_loadl_epi64((__m128i const *)(&src[(i + 0) * sstride]));
826 src1_16x4 = _mm_loadl_epi64((__m128i const *)(&src[(i + 1) * sstride]));
827 src_16x8 = _mm_unpacklo_epi64(src0_16x4, src1_16x4);
828
829 sub_result_16x8 = _mm_sub_epi16(src_16x8, dst_16x8);
830
831 res0_32x4 = _mm_unpacklo_epi16(sub_result_16x8, zeros);
832 res1_32x4 = _mm_unpackhi_epi16(sub_result_16x8, zeros);
833
834 res0_32x4 = _mm_madd_epi16(res0_32x4, res0_32x4);
835 res1_32x4 = _mm_madd_epi16(res1_32x4, res1_32x4);
836
837 res0_64x4 = _mm_unpacklo_epi32(res0_32x4, zeros);
838 res1_64x4 = _mm_unpackhi_epi32(res0_32x4, zeros);
839 res2_64x4 = _mm_unpacklo_epi32(res1_32x4, zeros);
840 res3_64x4 = _mm_unpackhi_epi32(res1_32x4, zeros);
841
842 square_result = _mm_add_epi64(
843 square_result,
844 _mm_add_epi64(
845 _mm_add_epi64(_mm_add_epi64(res0_64x4, res1_64x4), res2_64x4),
846 res3_64x4));
847 }
848 const __m128i sum_1x64 =
849 _mm_add_epi64(square_result, _mm_srli_si128(square_result, 8));
850 xx_storel_64(&sum, sum_1x64);
851 return sum;
852 }
853
aom_mse_8xh_16bit_sse2(uint8_t * dst,int dstride,uint16_t * src,int sstride,int h)854 uint64_t aom_mse_8xh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
855 int sstride, int h) {
856 uint64_t sum = 0;
857 __m128i dst_8x8, dst_16x8;
858 __m128i src_16x8;
859 __m128i res0_32x4, res1_32x4, res0_64x4, res1_64x4, res2_64x4, res3_64x4;
860 __m128i sub_result_16x8;
861 const __m128i zeros = _mm_setzero_si128();
862 __m128i square_result = _mm_setzero_si128();
863
864 for (int i = 0; i < h; i++) {
865 dst_8x8 = _mm_loadl_epi64((__m128i const *)(&dst[(i + 0) * dstride]));
866 dst_16x8 = _mm_unpacklo_epi8(dst_8x8, zeros);
867
868 src_16x8 = _mm_loadu_si128((__m128i *)&src[i * sstride]);
869
870 sub_result_16x8 = _mm_sub_epi16(src_16x8, dst_16x8);
871
872 res0_32x4 = _mm_unpacklo_epi16(sub_result_16x8, zeros);
873 res1_32x4 = _mm_unpackhi_epi16(sub_result_16x8, zeros);
874
875 res0_32x4 = _mm_madd_epi16(res0_32x4, res0_32x4);
876 res1_32x4 = _mm_madd_epi16(res1_32x4, res1_32x4);
877
878 res0_64x4 = _mm_unpacklo_epi32(res0_32x4, zeros);
879 res1_64x4 = _mm_unpackhi_epi32(res0_32x4, zeros);
880 res2_64x4 = _mm_unpacklo_epi32(res1_32x4, zeros);
881 res3_64x4 = _mm_unpackhi_epi32(res1_32x4, zeros);
882
883 square_result = _mm_add_epi64(
884 square_result,
885 _mm_add_epi64(
886 _mm_add_epi64(_mm_add_epi64(res0_64x4, res1_64x4), res2_64x4),
887 res3_64x4));
888 }
889 const __m128i sum_1x64 =
890 _mm_add_epi64(square_result, _mm_srli_si128(square_result, 8));
891 xx_storel_64(&sum, sum_1x64);
892 return sum;
893 }
894
aom_mse_wxh_16bit_sse2(uint8_t * dst,int dstride,uint16_t * src,int sstride,int w,int h)895 uint64_t aom_mse_wxh_16bit_sse2(uint8_t *dst, int dstride, uint16_t *src,
896 int sstride, int w, int h) {
897 assert((w == 8 || w == 4) && (h == 8 || h == 4) &&
898 "w=8/4 and h=8/4 must satisfy");
899 switch (w) {
900 case 4: return aom_mse_4xh_16bit_sse2(dst, dstride, src, sstride, h);
901 case 8: return aom_mse_8xh_16bit_sse2(dst, dstride, src, sstride, h);
902 default: assert(0 && "unsupported width"); return -1;
903 }
904 }
905