1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at https://www.aomedia.org/license/software-license. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at https://www.aomedia.org/license/patent-license.
10 */
11 #include "synonyms.h"
12 #include <assert.h>
13 #include <immintrin.h>
14 #include "aom_dsp_rtcd.h"
15
16 ////////////////////////////////////////////////////////////////////////////////
17 // 8 bit
18 ////////////////////////////////////////////////////////////////////////////////
xx_hsum_epi32_si32(__m128i v_d)19 static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
20 v_d = _mm_hadd_epi32(v_d, v_d);
21 v_d = _mm_hadd_epi32(v_d, v_d);
22 return _mm_cvtsi128_si32(v_d);
23 }
24
obmc_variance_w4(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int h)25 static INLINE void obmc_variance_w4(const uint8_t *pre, const int pre_stride, const int32_t *wsrc,
26 const int32_t *mask, unsigned int *const sse, int *const sum,
27 const int h) {
28 const int pre_step = pre_stride - 4;
29 int n = 0;
30 __m128i v_sum_d = _mm_setzero_si128();
31 __m128i v_sse_d = _mm_setzero_si128();
32
33 assert(IS_POWER_OF_TWO(h));
34
35 do {
36 const __m128i v_p_b = _mm_cvtsi32_si128(*(const uint32_t *)(pre + n));
37 const __m128i v_m_d = _mm_loadu_si128((const __m128i *)(mask + n));
38 const __m128i v_w_d = _mm_loadu_si128((const __m128i *)(wsrc + n));
39
40 const __m128i v_p_d = _mm_cvtepu8_epi32(v_p_b);
41
42 // Values in both pre and mask fit in 15 bits, and are packed at 32 bit
43 // boundaries. We use pmaddwd, as it has lower latency on Haswell
44 // than pmulld but produces the same result with these inputs.
45 const __m128i v_pm_d = _mm_madd_epi16(v_p_d, v_m_d);
46
47 const __m128i v_diff_d = _mm_sub_epi32(v_w_d, v_pm_d);
48 const __m128i v_rdiff_d = xx_roundn_epi32(v_diff_d, 12);
49 const __m128i v_sqrdiff_d = _mm_mullo_epi32(v_rdiff_d, v_rdiff_d);
50
51 v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff_d);
52 v_sse_d = _mm_add_epi32(v_sse_d, v_sqrdiff_d);
53
54 n += 4;
55
56 if (n % 4 == 0)
57 pre += pre_step;
58 } while (n < 4 * h);
59
60 *sum = xx_hsum_epi32_si32(v_sum_d);
61 *sse = xx_hsum_epi32_si32(v_sse_d);
62 }
63
obmc_variance_w8n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)64 static INLINE void obmc_variance_w8n(const uint8_t *pre, const int pre_stride, const int32_t *wsrc,
65 const int32_t *mask, unsigned int *const sse, int *const sum,
66 const int w, const int h) {
67 int n = 0, height = h;
68 __m128i v_sum_d = _mm_setzero_si128();
69 __m128i v_sse_d = _mm_setzero_si128();
70 const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
71 __m128i v_d;
72 assert(w >= 8);
73 assert(IS_POWER_OF_TWO(w));
74 assert(IS_POWER_OF_TWO(h));
75 do {
76 int width = w;
77 const uint8_t *pre_temp = pre;
78 do {
79 const __m128i v_p_b = _mm_loadl_epi64((const __m128i *)pre_temp);
80 const __m256i v_m_d = _mm256_loadu_si256((__m256i const *)(mask + n));
81 const __m256i v_w_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
82 const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
83
84 // Values in both pre and mask fit in 15 bits, and are packed at 32 bit
85 // boundaries. We use pmaddwd, as it has lower latency on Haswell
86 // than pmulld but produces the same result with these inputs.
87 const __m256i v_pm_d = _mm256_madd_epi16(v_p0_d, v_m_d);
88 const __m256i v_diff0_d = _mm256_sub_epi32(v_w_d, v_pm_d);
89
90 const __m256i v_sign_d = _mm256_srai_epi32(v_diff0_d, 31);
91 const __m256i v_tmp_d = _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d),
92 v_sign_d);
93 const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp_d, 12);
94 const __m128i v_rdiff_d = _mm256_castsi256_si128(v_rdiff0_d);
95 const __m128i v_rdiff1_d = _mm256_extracti128_si256(v_rdiff0_d, 1);
96
97 const __m128i v_rdiff01_w = _mm_packs_epi32(v_rdiff_d, v_rdiff1_d);
98 const __m128i v_sqrdiff_d = _mm_madd_epi16(v_rdiff01_w, v_rdiff01_w);
99
100 v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff_d);
101 v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff1_d);
102 v_sse_d = _mm_add_epi32(v_sse_d, v_sqrdiff_d);
103
104 pre_temp += 8;
105 n += 8;
106 width -= 8;
107 } while (width > 0);
108 pre += pre_stride;
109 height -= 1;
110 } while (height > 0);
111 v_d = _mm_hadd_epi32(v_sum_d, v_sse_d);
112 v_d = _mm_hadd_epi32(v_d, v_d);
113 *sum = _mm_cvtsi128_si32(v_d);
114 *sse = _mm_cvtsi128_si32(_mm_srli_si128(v_d, 4));
115 }
116
obmc_variance_w16n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)117 static INLINE void obmc_variance_w16n(const uint8_t *pre, const int pre_stride, const int32_t *wsrc,
118 const int32_t *mask, unsigned int *const sse, int *const sum,
119 const int w, const int h) {
120 int n = 0, height = h;
121 __m256i v_d;
122 __m128i res0;
123 const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
124 __m256i v_sum_d = _mm256_setzero_si256();
125 __m256i v_sse_d = _mm256_setzero_si256();
126
127 assert(w >= 16);
128 assert(IS_POWER_OF_TWO(w));
129 assert(IS_POWER_OF_TWO(h));
130 do {
131 int width = w;
132 const uint8_t *pre_temp = pre;
133 do {
134 const __m128i v_p_b = _mm_loadu_si128((__m128i *)pre_temp);
135 const __m256i v_m0_d = _mm256_loadu_si256((__m256i const *)(mask + n));
136 const __m256i v_w0_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
137 const __m256i v_m1_d = _mm256_loadu_si256((__m256i const *)(mask + n + 8));
138 const __m256i v_w1_d = _mm256_loadu_si256((__m256i const *)(wsrc + n + 8));
139
140 const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
141 const __m256i v_p1_d = _mm256_cvtepu8_epi32(_mm_srli_si128(v_p_b, 8));
142
143 const __m256i v_pm0_d = _mm256_madd_epi16(v_p0_d, v_m0_d);
144 const __m256i v_pm1_d = _mm256_madd_epi16(v_p1_d, v_m1_d);
145
146 const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d);
147 const __m256i v_diff1_d = _mm256_sub_epi32(v_w1_d, v_pm1_d);
148
149 const __m256i v_sign0_d = _mm256_srai_epi32(v_diff0_d, 31);
150 const __m256i v_sign1_d = _mm256_srai_epi32(v_diff1_d, 31);
151
152 const __m256i v_tmp0_d = _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d),
153 v_sign0_d);
154 const __m256i v_tmp1_d = _mm256_add_epi32(_mm256_add_epi32(v_diff1_d, v_bias_d),
155 v_sign1_d);
156
157 const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp0_d, 12);
158 const __m256i v_rdiff2_d = _mm256_srai_epi32(v_tmp1_d, 12);
159
160 const __m256i v_rdiff1_d = _mm256_add_epi32(v_rdiff0_d, v_rdiff2_d);
161 const __m256i v_rdiff01_w = _mm256_packs_epi32(v_rdiff0_d, v_rdiff2_d);
162 const __m256i v_sqrdiff_d = _mm256_madd_epi16(v_rdiff01_w, v_rdiff01_w);
163
164 v_sum_d = _mm256_add_epi32(v_sum_d, v_rdiff1_d);
165 v_sse_d = _mm256_add_epi32(v_sse_d, v_sqrdiff_d);
166
167 pre_temp += 16;
168 n += 16;
169 width -= 16;
170 } while (width > 0);
171 pre += pre_stride;
172 height -= 1;
173 } while (height > 0);
174
175 v_d = _mm256_hadd_epi32(v_sum_d, v_sse_d);
176 v_d = _mm256_hadd_epi32(v_d, v_d);
177 res0 = _mm256_castsi256_si128(v_d);
178 res0 = _mm_add_epi32(res0, _mm256_extractf128_si256(v_d, 1));
179 *sum = _mm_cvtsi128_si32(res0);
180 *sse = _mm_cvtsi128_si32(_mm_srli_si128(res0, 4));
181 }
182
183 #define OBMCVARWXH(W, H) \
184 unsigned int svt_aom_obmc_variance##W##x##H##_avx2(const uint8_t *pre, \
185 int pre_stride, \
186 const int32_t *wsrc, \
187 const int32_t *mask, \
188 unsigned int * sse) { \
189 int sum; \
190 if (W == 4) { \
191 obmc_variance_w4(pre, pre_stride, wsrc, mask, sse, &sum, H); \
192 } else if (W == 8) { \
193 obmc_variance_w8n(pre, pre_stride, wsrc, mask, sse, &sum, W, H); \
194 } else { \
195 obmc_variance_w16n(pre, pre_stride, wsrc, mask, sse, &sum, W, H); \
196 } \
197 \
198 return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H)); \
199 }
200
201 OBMCVARWXH(128, 128)
202 OBMCVARWXH(128, 64)
203 OBMCVARWXH(64, 128)
204 OBMCVARWXH(64, 64)
205 OBMCVARWXH(64, 32)
206 OBMCVARWXH(32, 64)
207 OBMCVARWXH(32, 32)
208 OBMCVARWXH(32, 16)
209 OBMCVARWXH(16, 32)
210 OBMCVARWXH(16, 16)
211 OBMCVARWXH(16, 8)
212 OBMCVARWXH(8, 16)
213 OBMCVARWXH(8, 8)
214 OBMCVARWXH(8, 4)
215 OBMCVARWXH(4, 8)
216 OBMCVARWXH(4, 4)
217 OBMCVARWXH(4, 16)
218 OBMCVARWXH(16, 4)
219 OBMCVARWXH(8, 32)
220 OBMCVARWXH(32, 8)
221 OBMCVARWXH(16, 64)
222 OBMCVARWXH(64, 16)
223