1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <assert.h>
13 #include <immintrin.h>
14 
15 #include "config/aom_config.h"
16 
17 #include "aom_ports/mem.h"
18 #include "aom/aom_integer.h"
19 
20 #include "aom_dsp/aom_dsp_common.h"
21 #include "aom_dsp/aom_filter.h"
22 #include "aom_dsp/x86/obmc_intrinsic_sse4.h"
23 
24 ////////////////////////////////////////////////////////////////////////////////
25 // 8 bit
26 ////////////////////////////////////////////////////////////////////////////////
27 
obmc_variance_w8n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)28 static INLINE void obmc_variance_w8n(const uint8_t *pre, const int pre_stride,
29                                      const int32_t *wsrc, const int32_t *mask,
30                                      unsigned int *const sse, int *const sum,
31                                      const int w, const int h) {
32   int n = 0, width, height = h;
33   __m128i v_sum_d = _mm_setzero_si128();
34   __m128i v_sse_d = _mm_setzero_si128();
35   const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
36   __m128i v_d;
37   const uint8_t *pre_temp;
38   assert(w >= 8);
39   assert(IS_POWER_OF_TWO(w));
40   assert(IS_POWER_OF_TWO(h));
41   do {
42     width = w;
43     pre_temp = pre;
44     do {
45       const __m128i v_p_b = _mm_loadl_epi64((const __m128i *)pre_temp);
46       const __m256i v_m_d = _mm256_loadu_si256((__m256i const *)(mask + n));
47       const __m256i v_w_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
48       const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
49 
50       // Values in both pre and mask fit in 15 bits, and are packed at 32 bit
51       // boundaries. We use pmaddwd, as it has lower latency on Haswell
52       // than pmulld but produces the same result with these inputs.
53       const __m256i v_pm_d = _mm256_madd_epi16(v_p0_d, v_m_d);
54       const __m256i v_diff0_d = _mm256_sub_epi32(v_w_d, v_pm_d);
55 
56       const __m256i v_sign_d = _mm256_srai_epi32(v_diff0_d, 31);
57       const __m256i v_tmp_d =
58           _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d), v_sign_d);
59       const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp_d, 12);
60       const __m128i v_rdiff_d = _mm256_castsi256_si128(v_rdiff0_d);
61       const __m128i v_rdiff1_d = _mm256_extracti128_si256(v_rdiff0_d, 1);
62 
63       const __m128i v_rdiff01_w = _mm_packs_epi32(v_rdiff_d, v_rdiff1_d);
64       const __m128i v_sqrdiff_d = _mm_madd_epi16(v_rdiff01_w, v_rdiff01_w);
65 
66       v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff_d);
67       v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff1_d);
68       v_sse_d = _mm_add_epi32(v_sse_d, v_sqrdiff_d);
69 
70       pre_temp += 8;
71       n += 8;
72       width -= 8;
73     } while (width > 0);
74     pre += pre_stride;
75     height -= 1;
76   } while (height > 0);
77   v_d = _mm_hadd_epi32(v_sum_d, v_sse_d);
78   v_d = _mm_hadd_epi32(v_d, v_d);
79   *sum = _mm_cvtsi128_si32(v_d);
80   *sse = _mm_cvtsi128_si32(_mm_srli_si128(v_d, 4));
81 }
82 
obmc_variance_w16n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)83 static INLINE void obmc_variance_w16n(const uint8_t *pre, const int pre_stride,
84                                       const int32_t *wsrc, const int32_t *mask,
85                                       unsigned int *const sse, int *const sum,
86                                       const int w, const int h) {
87   int n = 0, width, height = h;
88   __m256i v_d;
89   __m128i res0;
90   const uint8_t *pre_temp;
91   const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
92   __m256i v_sum_d = _mm256_setzero_si256();
93   __m256i v_sse_d = _mm256_setzero_si256();
94 
95   assert(w >= 16);
96   assert(IS_POWER_OF_TWO(w));
97   assert(IS_POWER_OF_TWO(h));
98   do {
99     width = w;
100     pre_temp = pre;
101     do {
102       const __m128i v_p_b = _mm_loadu_si128((__m128i *)pre_temp);
103       const __m256i v_m0_d = _mm256_loadu_si256((__m256i const *)(mask + n));
104       const __m256i v_w0_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
105       const __m256i v_m1_d =
106           _mm256_loadu_si256((__m256i const *)(mask + n + 8));
107       const __m256i v_w1_d =
108           _mm256_loadu_si256((__m256i const *)(wsrc + n + 8));
109 
110       const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
111       const __m256i v_p1_d = _mm256_cvtepu8_epi32(_mm_srli_si128(v_p_b, 8));
112 
113       const __m256i v_pm0_d = _mm256_madd_epi16(v_p0_d, v_m0_d);
114       const __m256i v_pm1_d = _mm256_madd_epi16(v_p1_d, v_m1_d);
115 
116       const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d);
117       const __m256i v_diff1_d = _mm256_sub_epi32(v_w1_d, v_pm1_d);
118 
119       const __m256i v_sign0_d = _mm256_srai_epi32(v_diff0_d, 31);
120       const __m256i v_sign1_d = _mm256_srai_epi32(v_diff1_d, 31);
121 
122       const __m256i v_tmp0_d =
123           _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d), v_sign0_d);
124       const __m256i v_tmp1_d =
125           _mm256_add_epi32(_mm256_add_epi32(v_diff1_d, v_bias_d), v_sign1_d);
126 
127       const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp0_d, 12);
128       const __m256i v_rdiff2_d = _mm256_srai_epi32(v_tmp1_d, 12);
129 
130       const __m256i v_rdiff1_d = _mm256_add_epi32(v_rdiff0_d, v_rdiff2_d);
131       const __m256i v_rdiff01_w = _mm256_packs_epi32(v_rdiff0_d, v_rdiff2_d);
132       const __m256i v_sqrdiff_d = _mm256_madd_epi16(v_rdiff01_w, v_rdiff01_w);
133 
134       v_sum_d = _mm256_add_epi32(v_sum_d, v_rdiff1_d);
135       v_sse_d = _mm256_add_epi32(v_sse_d, v_sqrdiff_d);
136 
137       pre_temp += 16;
138       n += 16;
139       width -= 16;
140     } while (width > 0);
141     pre += pre_stride;
142     height -= 1;
143   } while (height > 0);
144 
145   v_d = _mm256_hadd_epi32(v_sum_d, v_sse_d);
146   v_d = _mm256_hadd_epi32(v_d, v_d);
147   res0 = _mm256_castsi256_si128(v_d);
148   res0 = _mm_add_epi32(res0, _mm256_extractf128_si256(v_d, 1));
149   *sum = _mm_cvtsi128_si32(res0);
150   *sse = _mm_cvtsi128_si32(_mm_srli_si128(res0, 4));
151 }
152 
153 #define OBMCVARWXH(W, H)                                                \
154   unsigned int aom_obmc_variance##W##x##H##_avx2(                       \
155       const uint8_t *pre, int pre_stride, const int32_t *wsrc,          \
156       const int32_t *mask, unsigned int *sse) {                         \
157     int sum;                                                            \
158     if (W == 4) {                                                       \
159       obmc_variance_w4(pre, pre_stride, wsrc, mask, sse, &sum, H);      \
160     } else if (W == 8) {                                                \
161       obmc_variance_w8n(pre, pre_stride, wsrc, mask, sse, &sum, W, H);  \
162     } else {                                                            \
163       obmc_variance_w16n(pre, pre_stride, wsrc, mask, sse, &sum, W, H); \
164     }                                                                   \
165                                                                         \
166     return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H));       \
167   }
168 
169 OBMCVARWXH(128, 128)
170 OBMCVARWXH(128, 64)
171 OBMCVARWXH(64, 128)
172 OBMCVARWXH(64, 64)
173 OBMCVARWXH(64, 32)
174 OBMCVARWXH(32, 64)
175 OBMCVARWXH(32, 32)
176 OBMCVARWXH(32, 16)
177 OBMCVARWXH(16, 32)
178 OBMCVARWXH(16, 16)
179 OBMCVARWXH(16, 8)
180 OBMCVARWXH(8, 16)
181 OBMCVARWXH(8, 8)
182 OBMCVARWXH(8, 4)
183 OBMCVARWXH(4, 8)
184 OBMCVARWXH(4, 4)
185 OBMCVARWXH(4, 16)
186 OBMCVARWXH(16, 4)
187 OBMCVARWXH(8, 32)
188 OBMCVARWXH(32, 8)
189 OBMCVARWXH(16, 64)
190 OBMCVARWXH(64, 16)
191