1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <emmintrin.h>
12
13 #include "./vpx_config.h"
14 #include "./vp9_rtcd.h"
15
16 #include "vpx_ports/emmintrin_compat.h"
17 #include "vpx/vpx_integer.h"
18 #include "vp9/common/vp9_reconinter.h"
19 #include "vp9/encoder/vp9_context_tree.h"
20 #include "vp9/encoder/vp9_denoiser.h"
21 #include "vpx_mem/vpx_mem.h"
22
23 // Compute the sum of all pixel differences of this MB.
sum_diff_16x1(__m128i acc_diff)24 static INLINE int sum_diff_16x1(__m128i acc_diff) {
25 const __m128i k_1 = _mm_set1_epi16(1);
26 const __m128i acc_diff_lo =
27 _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
28 const __m128i acc_diff_hi =
29 _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
30 const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
31 const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
32 const __m128i hgfe_dcba =
33 _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
34 const __m128i hgfedcba =
35 _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
36 return _mm_cvtsi128_si32(hgfedcba);
37 }
38
39 // Denoise a 16x1 vector.
vp9_denoiser_16x1_sse2(const uint8_t * sig,const uint8_t * mc_running_avg_y,uint8_t * running_avg_y,const __m128i * k_0,const __m128i * k_4,const __m128i * k_8,const __m128i * k_16,const __m128i * l3,const __m128i * l32,const __m128i * l21,__m128i acc_diff)40 static INLINE __m128i vp9_denoiser_16x1_sse2(
41 const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
42 const __m128i *k_0, const __m128i *k_4, const __m128i *k_8,
43 const __m128i *k_16, const __m128i *l3, const __m128i *l32,
44 const __m128i *l21, __m128i acc_diff) {
45 // Calculate differences
46 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
47 const __m128i v_mc_running_avg_y =
48 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
49 __m128i v_running_avg_y;
50 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
51 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
52 // Obtain the sign. FF if diff is negative.
53 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0);
54 // Clamp absolute difference to 16 to be used to get mask. Doing this
55 // allows us to use _mm_cmpgt_epi8, which operates on signed byte.
56 const __m128i clamped_absdiff =
57 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16);
58 // Get masks for l2 l1 and l0 adjustments.
59 const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff);
60 const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff);
61 const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff);
62 // Get adjustments for l2, l1, and l0.
63 __m128i adj2 = _mm_and_si128(mask2, *l32);
64 const __m128i adj1 = _mm_and_si128(mask1, *l21);
65 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
66 __m128i adj, padj, nadj;
67
68 // Combine the adjustments and get absolute adjustments.
69 adj2 = _mm_add_epi8(adj2, adj1);
70 adj = _mm_sub_epi8(*l3, adj2);
71 adj = _mm_andnot_si128(mask0, adj);
72 adj = _mm_or_si128(adj, adj0);
73
74 // Restore the sign and get positive and negative adjustments.
75 padj = _mm_andnot_si128(diff_sign, adj);
76 nadj = _mm_and_si128(diff_sign, adj);
77
78 // Calculate filtered value.
79 v_running_avg_y = _mm_adds_epu8(v_sig, padj);
80 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
81 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
82
83 // Adjustments <=7, and each element in acc_diff can fit in signed
84 // char.
85 acc_diff = _mm_adds_epi8(acc_diff, padj);
86 acc_diff = _mm_subs_epi8(acc_diff, nadj);
87 return acc_diff;
88 }
89
90 // Denoise a 16x1 vector with a weaker filter.
vp9_denoiser_adj_16x1_sse2(const uint8_t * sig,const uint8_t * mc_running_avg_y,uint8_t * running_avg_y,const __m128i k_0,const __m128i k_delta,__m128i acc_diff)91 static INLINE __m128i vp9_denoiser_adj_16x1_sse2(
92 const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
93 const __m128i k_0, const __m128i k_delta, __m128i acc_diff) {
94 __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0]));
95 // Calculate differences.
96 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
97 const __m128i v_mc_running_avg_y =
98 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
99 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
100 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
101 // Obtain the sign. FF if diff is negative.
102 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
103 // Clamp absolute difference to delta to get the adjustment.
104 const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
105 // Restore the sign and get positive and negative adjustments.
106 __m128i padj, nadj;
107 padj = _mm_andnot_si128(diff_sign, adj);
108 nadj = _mm_and_si128(diff_sign, adj);
109 // Calculate filtered value.
110 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
111 v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
112 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
113
114 // Accumulate the adjustments.
115 acc_diff = _mm_subs_epi8(acc_diff, padj);
116 acc_diff = _mm_adds_epi8(acc_diff, nadj);
117 return acc_diff;
118 }
119
120 // Denoise 8x8 and 8x16 blocks.
vp9_denoiser_NxM_sse2_small(const uint8_t * sig,int sig_stride,const uint8_t * mc_running_avg_y,int mc_avg_y_stride,uint8_t * running_avg_y,int avg_y_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude,int width)121 static int vp9_denoiser_NxM_sse2_small(const uint8_t *sig, int sig_stride,
122 const uint8_t *mc_running_avg_y,
123 int mc_avg_y_stride,
124 uint8_t *running_avg_y, int avg_y_stride,
125 int increase_denoising, BLOCK_SIZE bs,
126 int motion_magnitude, int width) {
127 int sum_diff_thresh, r, sum_diff = 0;
128 const int shift_inc =
129 (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
130 ? 1
131 : 0;
132 uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
133 __m128i acc_diff = _mm_setzero_si128();
134 const __m128i k_0 = _mm_setzero_si128();
135 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
136 const __m128i k_8 = _mm_set1_epi8(8);
137 const __m128i k_16 = _mm_set1_epi8(16);
138 // Modify each level's adjustment according to motion_magnitude.
139 const __m128i l3 = _mm_set1_epi8(
140 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
141 // Difference between level 3 and level 2 is 2.
142 const __m128i l32 = _mm_set1_epi8(2);
143 // Difference between level 2 and level 1 is 1.
144 const __m128i l21 = _mm_set1_epi8(1);
145 const int b_height = (4 << b_height_log2_lookup[bs]) >> 1;
146
147 for (r = 0; r < b_height; ++r) {
148 memcpy(sig_buffer[r], sig, width);
149 memcpy(sig_buffer[r] + width, sig + sig_stride, width);
150 memcpy(mc_running_buffer[r], mc_running_avg_y, width);
151 memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride,
152 width);
153 memcpy(running_buffer[r], running_avg_y, width);
154 memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
155 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], mc_running_buffer[r],
156 running_buffer[r], &k_0, &k_4, &k_8,
157 &k_16, &l3, &l32, &l21, acc_diff);
158 memcpy(running_avg_y, running_buffer[r], width);
159 memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
160 // Update pointers for next iteration.
161 sig += (sig_stride << 1);
162 mc_running_avg_y += (mc_avg_y_stride << 1);
163 running_avg_y += (avg_y_stride << 1);
164 }
165
166 {
167 sum_diff = sum_diff_16x1(acc_diff);
168 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
169 if (abs(sum_diff) > sum_diff_thresh) {
170 // Before returning to copy the block (i.e., apply no denoising),
171 // check if we can still apply some (weaker) temporal filtering to
172 // this block, that would otherwise not be denoised at all. Simplest
173 // is to apply an additional adjustment to running_avg_y to bring it
174 // closer to sig. The adjustment is capped by a maximum delta, and
175 // chosen such that in most cases the resulting sum_diff will be
176 // within the acceptable range given by sum_diff_thresh.
177
178 // The delta is set by the excess of absolute pixel diff over the
179 // threshold.
180 const int delta =
181 ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
182 // Only apply the adjustment for max delta up to 3.
183 if (delta < 4) {
184 const __m128i k_delta = _mm_set1_epi8(delta);
185 running_avg_y -= avg_y_stride * (b_height << 1);
186 for (r = 0; r < b_height; ++r) {
187 acc_diff = vp9_denoiser_adj_16x1_sse2(
188 sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_0,
189 k_delta, acc_diff);
190 memcpy(running_avg_y, running_buffer[r], width);
191 memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width,
192 width);
193 // Update pointers for next iteration.
194 running_avg_y += (avg_y_stride << 1);
195 }
196 sum_diff = sum_diff_16x1(acc_diff);
197 if (abs(sum_diff) > sum_diff_thresh) {
198 return COPY_BLOCK;
199 }
200 } else {
201 return COPY_BLOCK;
202 }
203 }
204 }
205 return FILTER_BLOCK;
206 }
207
208 // Denoise 16x16, 16x32, 32x16, 32x32, 32x64, 64x32 and 64x64 blocks.
vp9_denoiser_NxM_sse2_big(const uint8_t * sig,int sig_stride,const uint8_t * mc_running_avg_y,int mc_avg_y_stride,uint8_t * running_avg_y,int avg_y_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude)209 static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride,
210 const uint8_t *mc_running_avg_y,
211 int mc_avg_y_stride,
212 uint8_t *running_avg_y, int avg_y_stride,
213 int increase_denoising, BLOCK_SIZE bs,
214 int motion_magnitude) {
215 int sum_diff_thresh, r, c, sum_diff = 0;
216 const int shift_inc =
217 (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
218 ? 1
219 : 0;
220 __m128i acc_diff[4][4];
221 const __m128i k_0 = _mm_setzero_si128();
222 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
223 const __m128i k_8 = _mm_set1_epi8(8);
224 const __m128i k_16 = _mm_set1_epi8(16);
225 // Modify each level's adjustment according to motion_magnitude.
226 const __m128i l3 = _mm_set1_epi8(
227 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
228 // Difference between level 3 and level 2 is 2.
229 const __m128i l32 = _mm_set1_epi8(2);
230 // Difference between level 2 and level 1 is 1.
231 const __m128i l21 = _mm_set1_epi8(1);
232 const int b_width = (4 << b_width_log2_lookup[bs]);
233 const int b_height = (4 << b_height_log2_lookup[bs]);
234 const int b_width_shift4 = b_width >> 4;
235
236 for (r = 0; r < 4; ++r) {
237 for (c = 0; c < b_width_shift4; ++c) {
238 acc_diff[c][r] = _mm_setzero_si128();
239 }
240 }
241
242 for (r = 0; r < b_height; ++r) {
243 for (c = 0; c < b_width_shift4; ++c) {
244 acc_diff[c][r >> 4] = vp9_denoiser_16x1_sse2(
245 sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, &k_8, &k_16, &l3,
246 &l32, &l21, acc_diff[c][r >> 4]);
247 // Update pointers for next iteration.
248 sig += 16;
249 mc_running_avg_y += 16;
250 running_avg_y += 16;
251 }
252
253 if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
254 for (c = 0; c < b_width_shift4; ++c) {
255 sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
256 }
257 }
258
259 // Update pointers for next iteration.
260 sig = sig - b_width + sig_stride;
261 mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
262 running_avg_y = running_avg_y - b_width + avg_y_stride;
263 }
264
265 {
266 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
267 if (abs(sum_diff) > sum_diff_thresh) {
268 const int delta =
269 ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
270
271 // Only apply the adjustment for max delta up to 3.
272 if (delta < 4) {
273 const __m128i k_delta = _mm_set1_epi8(delta);
274 sig -= sig_stride * b_height;
275 mc_running_avg_y -= mc_avg_y_stride * b_height;
276 running_avg_y -= avg_y_stride * b_height;
277 sum_diff = 0;
278 for (r = 0; r < b_height; ++r) {
279 for (c = 0; c < b_width_shift4; ++c) {
280 acc_diff[c][r >> 4] =
281 vp9_denoiser_adj_16x1_sse2(sig, mc_running_avg_y, running_avg_y,
282 k_0, k_delta, acc_diff[c][r >> 4]);
283 // Update pointers for next iteration.
284 sig += 16;
285 mc_running_avg_y += 16;
286 running_avg_y += 16;
287 }
288
289 if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
290 for (c = 0; c < b_width_shift4; ++c) {
291 sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
292 }
293 }
294 sig = sig - b_width + sig_stride;
295 mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
296 running_avg_y = running_avg_y - b_width + avg_y_stride;
297 }
298 if (abs(sum_diff) > sum_diff_thresh) {
299 return COPY_BLOCK;
300 }
301 } else {
302 return COPY_BLOCK;
303 }
304 }
305 }
306 return FILTER_BLOCK;
307 }
308
vp9_denoiser_filter_sse2(const uint8_t * sig,int sig_stride,const uint8_t * mc_avg,int mc_avg_stride,uint8_t * avg,int avg_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude)309 int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride,
310 const uint8_t *mc_avg, int mc_avg_stride,
311 uint8_t *avg, int avg_stride,
312 int increase_denoising, BLOCK_SIZE bs,
313 int motion_magnitude) {
314 // Rank by frequency of the block type to have an early termination.
315 if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 ||
316 bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 ||
317 bs == BLOCK_32X64 || bs == BLOCK_64X32) {
318 return vp9_denoiser_NxM_sse2_big(sig, sig_stride, mc_avg, mc_avg_stride,
319 avg, avg_stride, increase_denoising, bs,
320 motion_magnitude);
321 } else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) {
322 return vp9_denoiser_NxM_sse2_small(sig, sig_stride, mc_avg, mc_avg_stride,
323 avg, avg_stride, increase_denoising, bs,
324 motion_magnitude, 8);
325 } else {
326 return COPY_BLOCK;
327 }
328 }
329