1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <math.h>
13 #include <limits.h>
14 
15 #include "vp9/common/vp9_alloccommon.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/common/vp9_onyxc_int.h"
18 #include "vp9/common/vp9_quant_common.h"
19 #include "vp9/common/vp9_reconinter.h"
20 #include "vp9/encoder/vp9_encodeframe.h"
21 #include "vp9/encoder/vp9_ethread.h"
22 #include "vp9/encoder/vp9_extend.h"
23 #include "vp9/encoder/vp9_firstpass.h"
24 #include "vp9/encoder/vp9_mcomp.h"
25 #include "vp9/encoder/vp9_encoder.h"
26 #include "vp9/encoder/vp9_quantize.h"
27 #include "vp9/encoder/vp9_ratectrl.h"
28 #include "vp9/encoder/vp9_segmentation.h"
29 #include "vp9/encoder/vp9_temporal_filter.h"
30 #include "vpx_dsp/vpx_dsp_common.h"
31 #include "vpx_mem/vpx_mem.h"
32 #include "vpx_ports/mem.h"
33 #include "vpx_ports/vpx_timer.h"
34 #include "vpx_scale/vpx_scale.h"
35 
36 static int fixed_divide[512];
37 static unsigned int index_mult[14] = { 0,     0,     0,     0,     49152,
38                                        39322, 32768, 28087, 24576, 21846,
39                                        19661, 17874, 0,     15124 };
40 #if CONFIG_VP9_HIGHBITDEPTH
41 static int64_t highbd_index_mult[14] = { 0U,          0U,          0U,
42                                          0U,          3221225472U, 2576980378U,
43                                          2147483648U, 1840700270U, 1610612736U,
44                                          1431655766U, 1288490189U, 1171354718U,
45                                          0U,          991146300U };
46 #endif  // CONFIG_VP9_HIGHBITDEPTH
47 
temporal_filter_predictors_mb_c(MACROBLOCKD * xd,uint8_t * y_mb_ptr,uint8_t * u_mb_ptr,uint8_t * v_mb_ptr,int stride,int uv_block_width,int uv_block_height,int mv_row,int mv_col,uint8_t * pred,struct scale_factors * scale,int x,int y,MV * blk_mvs,int use_32x32)48 static void temporal_filter_predictors_mb_c(
49     MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
50     int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
51     uint8_t *pred, struct scale_factors *scale, int x, int y, MV *blk_mvs,
52     int use_32x32) {
53   const int which_mv = 0;
54   const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP_SHARP];
55   int i, j, k = 0, ys = (BH >> 1), xs = (BW >> 1);
56 
57   enum mv_precision mv_precision_uv;
58   int uv_stride;
59   if (uv_block_width == (BW >> 1)) {
60     uv_stride = (stride + 1) >> 1;
61     mv_precision_uv = MV_PRECISION_Q4;
62   } else {
63     uv_stride = stride;
64     mv_precision_uv = MV_PRECISION_Q3;
65   }
66 #if !CONFIG_VP9_HIGHBITDEPTH
67   (void)xd;
68 #endif
69 
70   if (use_32x32) {
71     const MV mv = { mv_row, mv_col };
72 #if CONFIG_VP9_HIGHBITDEPTH
73     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
74       vp9_highbd_build_inter_predictor(CONVERT_TO_SHORTPTR(y_mb_ptr), stride,
75                                        CONVERT_TO_SHORTPTR(&pred[0]), BW, &mv,
76                                        scale, BW, BH, which_mv, kernel,
77                                        MV_PRECISION_Q3, x, y, xd->bd);
78 
79       vp9_highbd_build_inter_predictor(
80           CONVERT_TO_SHORTPTR(u_mb_ptr), uv_stride,
81           CONVERT_TO_SHORTPTR(&pred[BLK_PELS]), uv_block_width, &mv, scale,
82           uv_block_width, uv_block_height, which_mv, kernel, mv_precision_uv, x,
83           y, xd->bd);
84 
85       vp9_highbd_build_inter_predictor(
86           CONVERT_TO_SHORTPTR(v_mb_ptr), uv_stride,
87           CONVERT_TO_SHORTPTR(&pred[(BLK_PELS << 1)]), uv_block_width, &mv,
88           scale, uv_block_width, uv_block_height, which_mv, kernel,
89           mv_precision_uv, x, y, xd->bd);
90       return;
91     }
92 #endif  // CONFIG_VP9_HIGHBITDEPTH
93     vp9_build_inter_predictor(y_mb_ptr, stride, &pred[0], BW, &mv, scale, BW,
94                               BH, which_mv, kernel, MV_PRECISION_Q3, x, y);
95 
96     vp9_build_inter_predictor(u_mb_ptr, uv_stride, &pred[BLK_PELS],
97                               uv_block_width, &mv, scale, uv_block_width,
98                               uv_block_height, which_mv, kernel,
99                               mv_precision_uv, x, y);
100 
101     vp9_build_inter_predictor(v_mb_ptr, uv_stride, &pred[(BLK_PELS << 1)],
102                               uv_block_width, &mv, scale, uv_block_width,
103                               uv_block_height, which_mv, kernel,
104                               mv_precision_uv, x, y);
105     return;
106   }
107 
108   // While use_32x32 = 0, construct the 32x32 predictor using 4 16x16
109   // predictors.
110   // Y predictor
111   for (i = 0; i < BH; i += ys) {
112     for (j = 0; j < BW; j += xs) {
113       const MV mv = blk_mvs[k];
114       const int y_offset = i * stride + j;
115       const int p_offset = i * BW + j;
116 
117 #if CONFIG_VP9_HIGHBITDEPTH
118       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
119         vp9_highbd_build_inter_predictor(
120             CONVERT_TO_SHORTPTR(y_mb_ptr + y_offset), stride,
121             CONVERT_TO_SHORTPTR(&pred[p_offset]), BW, &mv, scale, xs, ys,
122             which_mv, kernel, MV_PRECISION_Q3, x, y, xd->bd);
123       } else {
124         vp9_build_inter_predictor(y_mb_ptr + y_offset, stride, &pred[p_offset],
125                                   BW, &mv, scale, xs, ys, which_mv, kernel,
126                                   MV_PRECISION_Q3, x, y);
127       }
128 #else
129       vp9_build_inter_predictor(y_mb_ptr + y_offset, stride, &pred[p_offset],
130                                 BW, &mv, scale, xs, ys, which_mv, kernel,
131                                 MV_PRECISION_Q3, x, y);
132 #endif  // CONFIG_VP9_HIGHBITDEPTH
133       k++;
134     }
135   }
136 
137   // U and V predictors
138   ys = (uv_block_height >> 1);
139   xs = (uv_block_width >> 1);
140   k = 0;
141 
142   for (i = 0; i < uv_block_height; i += ys) {
143     for (j = 0; j < uv_block_width; j += xs) {
144       const MV mv = blk_mvs[k];
145       const int uv_offset = i * uv_stride + j;
146       const int p_offset = i * uv_block_width + j;
147 
148 #if CONFIG_VP9_HIGHBITDEPTH
149       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
150         vp9_highbd_build_inter_predictor(
151             CONVERT_TO_SHORTPTR(u_mb_ptr + uv_offset), uv_stride,
152             CONVERT_TO_SHORTPTR(&pred[BLK_PELS + p_offset]), uv_block_width,
153             &mv, scale, xs, ys, which_mv, kernel, mv_precision_uv, x, y,
154             xd->bd);
155 
156         vp9_highbd_build_inter_predictor(
157             CONVERT_TO_SHORTPTR(v_mb_ptr + uv_offset), uv_stride,
158             CONVERT_TO_SHORTPTR(&pred[(BLK_PELS << 1) + p_offset]),
159             uv_block_width, &mv, scale, xs, ys, which_mv, kernel,
160             mv_precision_uv, x, y, xd->bd);
161       } else {
162         vp9_build_inter_predictor(u_mb_ptr + uv_offset, uv_stride,
163                                   &pred[BLK_PELS + p_offset], uv_block_width,
164                                   &mv, scale, xs, ys, which_mv, kernel,
165                                   mv_precision_uv, x, y);
166 
167         vp9_build_inter_predictor(v_mb_ptr + uv_offset, uv_stride,
168                                   &pred[(BLK_PELS << 1) + p_offset],
169                                   uv_block_width, &mv, scale, xs, ys, which_mv,
170                                   kernel, mv_precision_uv, x, y);
171       }
172 #else
173       vp9_build_inter_predictor(u_mb_ptr + uv_offset, uv_stride,
174                                 &pred[BLK_PELS + p_offset], uv_block_width, &mv,
175                                 scale, xs, ys, which_mv, kernel,
176                                 mv_precision_uv, x, y);
177 
178       vp9_build_inter_predictor(v_mb_ptr + uv_offset, uv_stride,
179                                 &pred[(BLK_PELS << 1) + p_offset],
180                                 uv_block_width, &mv, scale, xs, ys, which_mv,
181                                 kernel, mv_precision_uv, x, y);
182 #endif  // CONFIG_VP9_HIGHBITDEPTH
183       k++;
184     }
185   }
186 }
187 
vp9_temporal_filter_init(void)188 void vp9_temporal_filter_init(void) {
189   int i;
190 
191   fixed_divide[0] = 0;
192   for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i;
193 }
194 
mod_index(int sum_dist,int index,int rounding,int strength,int filter_weight)195 static INLINE int mod_index(int sum_dist, int index, int rounding, int strength,
196                             int filter_weight) {
197   int mod;
198 
199   assert(index >= 0 && index <= 13);
200   assert(index_mult[index] != 0);
201 
202   mod =
203       ((unsigned int)clamp(sum_dist, 0, UINT16_MAX) * index_mult[index]) >> 16;
204   mod += rounding;
205   mod >>= strength;
206 
207   mod = VPXMIN(16, mod);
208 
209   mod = 16 - mod;
210   mod *= filter_weight;
211 
212   return mod;
213 }
214 
215 #if CONFIG_VP9_HIGHBITDEPTH
highbd_mod_index(int sum_dist,int index,int rounding,int strength,int filter_weight)216 static INLINE int highbd_mod_index(int sum_dist, int index, int rounding,
217                                    int strength, int filter_weight) {
218   int mod;
219 
220   assert(index >= 0 && index <= 13);
221   assert(highbd_index_mult[index] != 0);
222 
223   mod = (int)((clamp(sum_dist, 0, INT32_MAX) * highbd_index_mult[index]) >> 32);
224   mod += rounding;
225   mod >>= strength;
226 
227   mod = VPXMIN(16, mod);
228 
229   mod = 16 - mod;
230   mod *= filter_weight;
231 
232   return mod;
233 }
234 #endif  // CONFIG_VP9_HIGHBITDEPTH
235 
get_filter_weight(unsigned int i,unsigned int j,unsigned int block_height,unsigned int block_width,const int * const blk_fw,int use_32x32)236 static INLINE int get_filter_weight(unsigned int i, unsigned int j,
237                                     unsigned int block_height,
238                                     unsigned int block_width,
239                                     const int *const blk_fw, int use_32x32) {
240   // blk_fw[0] ~ blk_fw[3] are the same.
241   if (use_32x32) {
242     return blk_fw[0];
243   }
244 
245   if (i < block_height / 2) {
246     if (j < block_width / 2) {
247       return blk_fw[0];
248     }
249 
250     return blk_fw[1];
251   }
252 
253   if (j < block_width / 2) {
254     return blk_fw[2];
255   }
256 
257   return blk_fw[3];
258 }
259 
vp9_apply_temporal_filter_c(const uint8_t * y_frame1,int y_stride,const uint8_t * y_pred,int y_buf_stride,const uint8_t * u_frame1,const uint8_t * v_frame1,int uv_stride,const uint8_t * u_pred,const uint8_t * v_pred,int uv_buf_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * const blk_fw,int use_32x32,uint32_t * y_accumulator,uint16_t * y_count,uint32_t * u_accumulator,uint16_t * u_count,uint32_t * v_accumulator,uint16_t * v_count)260 void vp9_apply_temporal_filter_c(
261     const uint8_t *y_frame1, int y_stride, const uint8_t *y_pred,
262     int y_buf_stride, const uint8_t *u_frame1, const uint8_t *v_frame1,
263     int uv_stride, const uint8_t *u_pred, const uint8_t *v_pred,
264     int uv_buf_stride, unsigned int block_width, unsigned int block_height,
265     int ss_x, int ss_y, int strength, const int *const blk_fw, int use_32x32,
266     uint32_t *y_accumulator, uint16_t *y_count, uint32_t *u_accumulator,
267     uint16_t *u_count, uint32_t *v_accumulator, uint16_t *v_count) {
268   unsigned int i, j, k, m;
269   int modifier;
270   const int rounding = (1 << strength) >> 1;
271   const unsigned int uv_block_width = block_width >> ss_x;
272   const unsigned int uv_block_height = block_height >> ss_y;
273   DECLARE_ALIGNED(16, uint16_t, y_diff_sse[BLK_PELS]);
274   DECLARE_ALIGNED(16, uint16_t, u_diff_sse[BLK_PELS]);
275   DECLARE_ALIGNED(16, uint16_t, v_diff_sse[BLK_PELS]);
276 
277   int idx = 0, idy;
278 
279   assert(strength >= 0);
280   assert(strength <= 6);
281 
282   memset(y_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
283   memset(u_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
284   memset(v_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
285 
286   // Calculate diff^2 for each pixel of the 16x16 block.
287   // TODO(yunqing): the following code needs to be optimized.
288   for (i = 0; i < block_height; i++) {
289     for (j = 0; j < block_width; j++) {
290       const int16_t diff =
291           y_frame1[i * (int)y_stride + j] - y_pred[i * (int)block_width + j];
292       y_diff_sse[idx++] = diff * diff;
293     }
294   }
295   idx = 0;
296   for (i = 0; i < uv_block_height; i++) {
297     for (j = 0; j < uv_block_width; j++) {
298       const int16_t diffu =
299           u_frame1[i * uv_stride + j] - u_pred[i * uv_buf_stride + j];
300       const int16_t diffv =
301           v_frame1[i * uv_stride + j] - v_pred[i * uv_buf_stride + j];
302       u_diff_sse[idx] = diffu * diffu;
303       v_diff_sse[idx] = diffv * diffv;
304       idx++;
305     }
306   }
307 
308   for (i = 0, k = 0, m = 0; i < block_height; i++) {
309     for (j = 0; j < block_width; j++) {
310       const int pixel_value = y_pred[i * y_buf_stride + j];
311       const int filter_weight =
312           get_filter_weight(i, j, block_height, block_width, blk_fw, use_32x32);
313 
314       // non-local mean approach
315       int y_index = 0;
316 
317       const int uv_r = i >> ss_y;
318       const int uv_c = j >> ss_x;
319       modifier = 0;
320 
321       for (idy = -1; idy <= 1; ++idy) {
322         for (idx = -1; idx <= 1; ++idx) {
323           const int row = (int)i + idy;
324           const int col = (int)j + idx;
325 
326           if (row >= 0 && row < (int)block_height && col >= 0 &&
327               col < (int)block_width) {
328             modifier += y_diff_sse[row * (int)block_width + col];
329             ++y_index;
330           }
331         }
332       }
333 
334       assert(y_index > 0);
335 
336       modifier += u_diff_sse[uv_r * uv_block_width + uv_c];
337       modifier += v_diff_sse[uv_r * uv_block_width + uv_c];
338 
339       y_index += 2;
340 
341       modifier =
342           mod_index(modifier, y_index, rounding, strength, filter_weight);
343 
344       y_count[k] += modifier;
345       y_accumulator[k] += modifier * pixel_value;
346 
347       ++k;
348 
349       // Process chroma component
350       if (!(i & ss_y) && !(j & ss_x)) {
351         const int u_pixel_value = u_pred[uv_r * uv_buf_stride + uv_c];
352         const int v_pixel_value = v_pred[uv_r * uv_buf_stride + uv_c];
353 
354         // non-local mean approach
355         int cr_index = 0;
356         int u_mod = 0, v_mod = 0;
357         int y_diff = 0;
358 
359         for (idy = -1; idy <= 1; ++idy) {
360           for (idx = -1; idx <= 1; ++idx) {
361             const int row = uv_r + idy;
362             const int col = uv_c + idx;
363 
364             if (row >= 0 && row < (int)uv_block_height && col >= 0 &&
365                 col < (int)uv_block_width) {
366               u_mod += u_diff_sse[row * uv_block_width + col];
367               v_mod += v_diff_sse[row * uv_block_width + col];
368               ++cr_index;
369             }
370           }
371         }
372 
373         assert(cr_index > 0);
374 
375         for (idy = 0; idy < 1 + ss_y; ++idy) {
376           for (idx = 0; idx < 1 + ss_x; ++idx) {
377             const int row = (uv_r << ss_y) + idy;
378             const int col = (uv_c << ss_x) + idx;
379             y_diff += y_diff_sse[row * (int)block_width + col];
380             ++cr_index;
381           }
382         }
383 
384         u_mod += y_diff;
385         v_mod += y_diff;
386 
387         u_mod = mod_index(u_mod, cr_index, rounding, strength, filter_weight);
388         v_mod = mod_index(v_mod, cr_index, rounding, strength, filter_weight);
389 
390         u_count[m] += u_mod;
391         u_accumulator[m] += u_mod * u_pixel_value;
392         v_count[m] += v_mod;
393         v_accumulator[m] += v_mod * v_pixel_value;
394 
395         ++m;
396       }  // Complete YUV pixel
397     }
398   }
399 }
400 
401 #if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_apply_temporal_filter_c(const uint16_t * y_src,int y_src_stride,const uint16_t * y_pre,int y_pre_stride,const uint16_t * u_src,const uint16_t * v_src,int uv_src_stride,const uint16_t * u_pre,const uint16_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * const blk_fw,int use_32x32,uint32_t * y_accum,uint16_t * y_count,uint32_t * u_accum,uint16_t * u_count,uint32_t * v_accum,uint16_t * v_count)402 void vp9_highbd_apply_temporal_filter_c(
403     const uint16_t *y_src, int y_src_stride, const uint16_t *y_pre,
404     int y_pre_stride, const uint16_t *u_src, const uint16_t *v_src,
405     int uv_src_stride, const uint16_t *u_pre, const uint16_t *v_pre,
406     int uv_pre_stride, unsigned int block_width, unsigned int block_height,
407     int ss_x, int ss_y, int strength, const int *const blk_fw, int use_32x32,
408     uint32_t *y_accum, uint16_t *y_count, uint32_t *u_accum, uint16_t *u_count,
409     uint32_t *v_accum, uint16_t *v_count) {
410   const int uv_block_width = block_width >> ss_x;
411   const int uv_block_height = block_height >> ss_y;
412   const int y_diff_stride = BW;
413   const int uv_diff_stride = BW;
414 
415   DECLARE_ALIGNED(16, uint32_t, y_diff_sse[BLK_PELS]);
416   DECLARE_ALIGNED(16, uint32_t, u_diff_sse[BLK_PELS]);
417   DECLARE_ALIGNED(16, uint32_t, v_diff_sse[BLK_PELS]);
418 
419   const int rounding = (1 << strength) >> 1;
420 
421   // Loop variables
422   int row, col;
423   int uv_row, uv_col;
424   int row_step, col_step;
425 
426   memset(y_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
427   memset(u_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
428   memset(v_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
429 
430   // Get the square diffs
431   for (row = 0; row < (int)block_height; row++) {
432     for (col = 0; col < (int)block_width; col++) {
433       const int diff =
434           y_src[row * y_src_stride + col] - y_pre[row * y_pre_stride + col];
435       y_diff_sse[row * y_diff_stride + col] = diff * diff;
436     }
437   }
438 
439   for (row = 0; row < uv_block_height; row++) {
440     for (col = 0; col < uv_block_width; col++) {
441       const int u_diff =
442           u_src[row * uv_src_stride + col] - u_pre[row * uv_pre_stride + col];
443       const int v_diff =
444           v_src[row * uv_src_stride + col] - v_pre[row * uv_pre_stride + col];
445       u_diff_sse[row * uv_diff_stride + col] = u_diff * u_diff;
446       v_diff_sse[row * uv_diff_stride + col] = v_diff * v_diff;
447     }
448   }
449 
450   // Apply the filter to luma
451   for (row = 0; row < (int)block_height; row++) {
452     for (col = 0; col < (int)block_width; col++) {
453       const int uv_row = row >> ss_y;
454       const int uv_col = col >> ss_x;
455       const int filter_weight = get_filter_weight(
456           row, col, block_height, block_width, blk_fw, use_32x32);
457 
458       // First we get the modifier for the current y pixel
459       const int y_pixel = y_pre[row * y_pre_stride + col];
460       int y_num_used = 0;
461       int y_mod = 0;
462 
463       // Sum the neighboring 3x3 y pixels
464       for (row_step = -1; row_step <= 1; row_step++) {
465         for (col_step = -1; col_step <= 1; col_step++) {
466           const int sub_row = row + row_step;
467           const int sub_col = col + col_step;
468 
469           if (sub_row >= 0 && sub_row < (int)block_height && sub_col >= 0 &&
470               sub_col < (int)block_width) {
471             y_mod += y_diff_sse[sub_row * y_diff_stride + sub_col];
472             y_num_used++;
473           }
474         }
475       }
476 
477       // Sum the corresponding uv pixels to the current y modifier
478       // Note we are rounding down instead of rounding to the nearest pixel.
479       y_mod += u_diff_sse[uv_row * uv_diff_stride + uv_col];
480       y_mod += v_diff_sse[uv_row * uv_diff_stride + uv_col];
481 
482       y_num_used += 2;
483 
484       // Set the modifier
485       y_mod = highbd_mod_index(y_mod, y_num_used, rounding, strength,
486                                filter_weight);
487 
488       // Accumulate the result
489       y_count[row * block_width + col] += y_mod;
490       y_accum[row * block_width + col] += y_mod * y_pixel;
491     }
492   }
493 
494   // Apply the filter to chroma
495   for (uv_row = 0; uv_row < uv_block_height; uv_row++) {
496     for (uv_col = 0; uv_col < uv_block_width; uv_col++) {
497       const int y_row = uv_row << ss_y;
498       const int y_col = uv_col << ss_x;
499       const int filter_weight = get_filter_weight(
500           uv_row, uv_col, uv_block_height, uv_block_width, blk_fw, use_32x32);
501 
502       const int u_pixel = u_pre[uv_row * uv_pre_stride + uv_col];
503       const int v_pixel = v_pre[uv_row * uv_pre_stride + uv_col];
504 
505       int uv_num_used = 0;
506       int u_mod = 0, v_mod = 0;
507 
508       // Sum the neighboring 3x3 chromal pixels to the chroma modifier
509       for (row_step = -1; row_step <= 1; row_step++) {
510         for (col_step = -1; col_step <= 1; col_step++) {
511           const int sub_row = uv_row + row_step;
512           const int sub_col = uv_col + col_step;
513 
514           if (sub_row >= 0 && sub_row < uv_block_height && sub_col >= 0 &&
515               sub_col < uv_block_width) {
516             u_mod += u_diff_sse[sub_row * uv_diff_stride + sub_col];
517             v_mod += v_diff_sse[sub_row * uv_diff_stride + sub_col];
518             uv_num_used++;
519           }
520         }
521       }
522 
523       // Sum all the luma pixels associated with the current luma pixel
524       for (row_step = 0; row_step < 1 + ss_y; row_step++) {
525         for (col_step = 0; col_step < 1 + ss_x; col_step++) {
526           const int sub_row = y_row + row_step;
527           const int sub_col = y_col + col_step;
528           const int y_diff = y_diff_sse[sub_row * y_diff_stride + sub_col];
529 
530           u_mod += y_diff;
531           v_mod += y_diff;
532           uv_num_used++;
533         }
534       }
535 
536       // Set the modifier
537       u_mod = highbd_mod_index(u_mod, uv_num_used, rounding, strength,
538                                filter_weight);
539       v_mod = highbd_mod_index(v_mod, uv_num_used, rounding, strength,
540                                filter_weight);
541 
542       // Accumulate the result
543       u_count[uv_row * uv_block_width + uv_col] += u_mod;
544       u_accum[uv_row * uv_block_width + uv_col] += u_mod * u_pixel;
545       v_count[uv_row * uv_block_width + uv_col] += v_mod;
546       v_accum[uv_row * uv_block_width + uv_col] += v_mod * v_pixel;
547     }
548   }
549 }
550 #endif  // CONFIG_VP9_HIGHBITDEPTH
551 
temporal_filter_find_matching_mb_c(VP9_COMP * cpi,ThreadData * td,uint8_t * arf_frame_buf,uint8_t * frame_ptr_buf,int stride,MV * ref_mv,MV * blk_mvs,int * blk_bestsme)552 static uint32_t temporal_filter_find_matching_mb_c(
553     VP9_COMP *cpi, ThreadData *td, uint8_t *arf_frame_buf,
554     uint8_t *frame_ptr_buf, int stride, MV *ref_mv, MV *blk_mvs,
555     int *blk_bestsme) {
556   MACROBLOCK *const x = &td->mb;
557   MACROBLOCKD *const xd = &x->e_mbd;
558   MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
559   const SEARCH_METHODS search_method = MESH;
560   const SEARCH_METHODS search_method_16 = cpi->sf.temporal_filter_search_method;
561   int step_param;
562   int sadpb = x->sadperbit16;
563   uint32_t bestsme = UINT_MAX;
564   uint32_t distortion;
565   uint32_t sse;
566   int cost_list[5];
567   const MvLimits tmp_mv_limits = x->mv_limits;
568 
569   MV best_ref_mv1 = { 0, 0 };
570   MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
571 
572   // Save input state
573   struct buf_2d src = x->plane[0].src;
574   struct buf_2d pre = xd->plane[0].pre[0];
575   int i, j, k = 0;
576 
577   best_ref_mv1_full.col = best_ref_mv1.col >> 3;
578   best_ref_mv1_full.row = best_ref_mv1.row >> 3;
579 
580   // Setup frame pointers
581   x->plane[0].src.buf = arf_frame_buf;
582   x->plane[0].src.stride = stride;
583   xd->plane[0].pre[0].buf = frame_ptr_buf;
584   xd->plane[0].pre[0].stride = stride;
585 
586   step_param = mv_sf->reduce_first_step_size;
587   step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
588 
589   vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
590 
591   vp9_full_pixel_search(cpi, x, TF_BLOCK, &best_ref_mv1_full, step_param,
592                         search_method, sadpb, cond_cost_list(cpi, cost_list),
593                         &best_ref_mv1, ref_mv, 0, 0);
594 
595   /* restore UMV window */
596   x->mv_limits = tmp_mv_limits;
597 
598   // find_fractional_mv_step parameters: best_ref_mv1 is for mv rate cost
599   // calculation. The start full mv and the search result are stored in
600   // ref_mv.
601   bestsme = cpi->find_fractional_mv_step(
602       x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
603       x->errorperbit, &cpi->fn_ptr[TF_BLOCK], 0, mv_sf->subpel_search_level,
604       cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, BW,
605       BH, USE_8_TAPS_SHARP);
606 
607   // DO motion search on 4 16x16 sub_blocks.
608   best_ref_mv1.row = ref_mv->row;
609   best_ref_mv1.col = ref_mv->col;
610   best_ref_mv1_full.col = best_ref_mv1.col >> 3;
611   best_ref_mv1_full.row = best_ref_mv1.row >> 3;
612 
613   for (i = 0; i < BH; i += SUB_BH) {
614     for (j = 0; j < BW; j += SUB_BW) {
615       // Setup frame pointers
616       x->plane[0].src.buf = arf_frame_buf + i * stride + j;
617       x->plane[0].src.stride = stride;
618       xd->plane[0].pre[0].buf = frame_ptr_buf + i * stride + j;
619       xd->plane[0].pre[0].stride = stride;
620 
621       vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
622       vp9_full_pixel_search(cpi, x, TF_SUB_BLOCK, &best_ref_mv1_full,
623                             step_param, search_method_16, sadpb,
624                             cond_cost_list(cpi, cost_list), &best_ref_mv1,
625                             &blk_mvs[k], 0, 0);
626       /* restore UMV window */
627       x->mv_limits = tmp_mv_limits;
628 
629       blk_bestsme[k] = cpi->find_fractional_mv_step(
630           x, &blk_mvs[k], &best_ref_mv1, cpi->common.allow_high_precision_mv,
631           x->errorperbit, &cpi->fn_ptr[TF_SUB_BLOCK], 0,
632           mv_sf->subpel_search_level, cond_cost_list(cpi, cost_list), NULL,
633           NULL, &distortion, &sse, NULL, SUB_BW, SUB_BH, USE_8_TAPS_SHARP);
634       k++;
635     }
636   }
637 
638   // Restore input state
639   x->plane[0].src = src;
640   xd->plane[0].pre[0] = pre;
641 
642   return bestsme;
643 }
644 
vp9_temporal_filter_iterate_row_c(VP9_COMP * cpi,ThreadData * td,int mb_row,int mb_col_start,int mb_col_end)645 void vp9_temporal_filter_iterate_row_c(VP9_COMP *cpi, ThreadData *td,
646                                        int mb_row, int mb_col_start,
647                                        int mb_col_end) {
648   ARNRFilterData *arnr_filter_data = &cpi->arnr_filter_data;
649   YV12_BUFFER_CONFIG **frames = arnr_filter_data->frames;
650   int frame_count = arnr_filter_data->frame_count;
651   int alt_ref_index = arnr_filter_data->alt_ref_index;
652   int strength = arnr_filter_data->strength;
653   struct scale_factors *scale = &arnr_filter_data->sf;
654   int byte;
655   int frame;
656   int mb_col;
657   int mb_cols = (frames[alt_ref_index]->y_crop_width + BW - 1) >> BW_LOG2;
658   int mb_rows = (frames[alt_ref_index]->y_crop_height + BH - 1) >> BH_LOG2;
659   DECLARE_ALIGNED(16, uint32_t, accumulator[BLK_PELS * 3]);
660   DECLARE_ALIGNED(16, uint16_t, count[BLK_PELS * 3]);
661   MACROBLOCKD *mbd = &td->mb.e_mbd;
662   YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
663   uint8_t *dst1, *dst2;
664 #if CONFIG_VP9_HIGHBITDEPTH
665   DECLARE_ALIGNED(16, uint16_t, predictor16[BLK_PELS * 3]);
666   DECLARE_ALIGNED(16, uint8_t, predictor8[BLK_PELS * 3]);
667   uint8_t *predictor;
668 #else
669   DECLARE_ALIGNED(16, uint8_t, predictor[BLK_PELS * 3]);
670 #endif
671   const int mb_uv_height = BH >> mbd->plane[1].subsampling_y;
672   const int mb_uv_width = BW >> mbd->plane[1].subsampling_x;
673   // Addition of the tile col level offsets
674   int mb_y_offset = mb_row * BH * (f->y_stride) + BW * mb_col_start;
675   int mb_uv_offset =
676       mb_row * mb_uv_height * f->uv_stride + mb_uv_width * mb_col_start;
677 
678 #if CONFIG_VP9_HIGHBITDEPTH
679   if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
680     predictor = CONVERT_TO_BYTEPTR(predictor16);
681   } else {
682     predictor = predictor8;
683   }
684 #endif
685 
686   // Source frames are extended to 16 pixels. This is different than
687   //  L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
688   // A 6/8 tap filter is used for motion search.  This requires 2 pixels
689   //  before and 3 pixels after.  So the largest Y mv on a border would
690   //  then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
691   //  Y and therefore only extended by 8.  The largest mv that a UV block
692   //  can support is 8 - VP9_INTERP_EXTEND.  A UV mv is half of a Y mv.
693   //  (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
694   //  8 - VP9_INTERP_EXTEND.
695   // To keep the mv in play for both Y and UV planes the max that it
696   //  can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
697   td->mb.mv_limits.row_min = -((mb_row * BH) + (17 - 2 * VP9_INTERP_EXTEND));
698   td->mb.mv_limits.row_max =
699       ((mb_rows - 1 - mb_row) * BH) + (17 - 2 * VP9_INTERP_EXTEND);
700 
701   for (mb_col = mb_col_start; mb_col < mb_col_end; mb_col++) {
702     int i, j, k;
703     int stride;
704     MV ref_mv;
705 
706     vp9_zero_array(accumulator, BLK_PELS * 3);
707     vp9_zero_array(count, BLK_PELS * 3);
708 
709     td->mb.mv_limits.col_min = -((mb_col * BW) + (17 - 2 * VP9_INTERP_EXTEND));
710     td->mb.mv_limits.col_max =
711         ((mb_cols - 1 - mb_col) * BW) + (17 - 2 * VP9_INTERP_EXTEND);
712 
713     if (cpi->oxcf.content == VP9E_CONTENT_FILM) {
714       unsigned int src_variance;
715       struct buf_2d src;
716 
717       src.buf = f->y_buffer + mb_y_offset;
718       src.stride = f->y_stride;
719 
720 #if CONFIG_VP9_HIGHBITDEPTH
721       if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
722         src_variance =
723             vp9_high_get_sby_perpixel_variance(cpi, &src, TF_BLOCK, mbd->bd);
724       } else {
725         src_variance = vp9_get_sby_perpixel_variance(cpi, &src, TF_BLOCK);
726       }
727 #else
728       src_variance = vp9_get_sby_perpixel_variance(cpi, &src, TF_BLOCK);
729 #endif  // CONFIG_VP9_HIGHBITDEPTH
730 
731       if (src_variance <= 2) {
732         strength = VPXMAX(0, arnr_filter_data->strength - 2);
733       }
734     }
735 
736     for (frame = 0; frame < frame_count; frame++) {
737       // MVs for 4 16x16 sub blocks.
738       MV blk_mvs[4];
739       // Filter weights for 4 16x16 sub blocks.
740       int blk_fw[4] = { 0, 0, 0, 0 };
741       int use_32x32 = 0;
742 
743       if (frames[frame] == NULL) continue;
744 
745       ref_mv.row = 0;
746       ref_mv.col = 0;
747       blk_mvs[0] = kZeroMv;
748       blk_mvs[1] = kZeroMv;
749       blk_mvs[2] = kZeroMv;
750       blk_mvs[3] = kZeroMv;
751 
752       if (frame == alt_ref_index) {
753         blk_fw[0] = blk_fw[1] = blk_fw[2] = blk_fw[3] = 2;
754         use_32x32 = 1;
755       } else {
756         const int thresh_low = 10000;
757         const int thresh_high = 20000;
758         int blk_bestsme[4] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX };
759 
760         // Find best match in this frame by MC
761         int err = temporal_filter_find_matching_mb_c(
762             cpi, td, frames[alt_ref_index]->y_buffer + mb_y_offset,
763             frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride,
764             &ref_mv, blk_mvs, blk_bestsme);
765 
766         int err16 =
767             blk_bestsme[0] + blk_bestsme[1] + blk_bestsme[2] + blk_bestsme[3];
768         int max_err = INT_MIN, min_err = INT_MAX;
769         for (k = 0; k < 4; k++) {
770           if (min_err > blk_bestsme[k]) min_err = blk_bestsme[k];
771           if (max_err < blk_bestsme[k]) max_err = blk_bestsme[k];
772         }
773 
774         if (((err * 15 < (err16 << 4)) && max_err - min_err < 10000) ||
775             ((err * 14 < (err16 << 4)) && max_err - min_err < 5000)) {
776           use_32x32 = 1;
777           // Assign higher weight to matching MB if it's error
778           // score is lower. If not applying MC default behavior
779           // is to weight all MBs equal.
780           blk_fw[0] = err < (thresh_low << THR_SHIFT)
781                           ? 2
782                           : err < (thresh_high << THR_SHIFT) ? 1 : 0;
783           blk_fw[1] = blk_fw[2] = blk_fw[3] = blk_fw[0];
784         } else {
785           use_32x32 = 0;
786           for (k = 0; k < 4; k++)
787             blk_fw[k] = blk_bestsme[k] < thresh_low
788                             ? 2
789                             : blk_bestsme[k] < thresh_high ? 1 : 0;
790         }
791 
792         for (k = 0; k < 4; k++) {
793           switch (abs(frame - alt_ref_index)) {
794             case 1: blk_fw[k] = VPXMIN(blk_fw[k], 2); break;
795             case 2:
796             case 3: blk_fw[k] = VPXMIN(blk_fw[k], 1); break;
797             default: break;
798           }
799         }
800       }
801 
802       if (blk_fw[0] | blk_fw[1] | blk_fw[2] | blk_fw[3]) {
803         // Construct the predictors
804         temporal_filter_predictors_mb_c(
805             mbd, frames[frame]->y_buffer + mb_y_offset,
806             frames[frame]->u_buffer + mb_uv_offset,
807             frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
808             mb_uv_width, mb_uv_height, ref_mv.row, ref_mv.col, predictor, scale,
809             mb_col * BW, mb_row * BH, blk_mvs, use_32x32);
810 
811 #if CONFIG_VP9_HIGHBITDEPTH
812         if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
813           int adj_strength = strength + 2 * (mbd->bd - 8);
814           // Apply the filter (YUV)
815           vp9_highbd_apply_temporal_filter(
816               CONVERT_TO_SHORTPTR(f->y_buffer + mb_y_offset), f->y_stride,
817               CONVERT_TO_SHORTPTR(predictor), BW,
818               CONVERT_TO_SHORTPTR(f->u_buffer + mb_uv_offset),
819               CONVERT_TO_SHORTPTR(f->v_buffer + mb_uv_offset), f->uv_stride,
820               CONVERT_TO_SHORTPTR(predictor + BLK_PELS),
821               CONVERT_TO_SHORTPTR(predictor + (BLK_PELS << 1)), mb_uv_width, BW,
822               BH, mbd->plane[1].subsampling_x, mbd->plane[1].subsampling_y,
823               adj_strength, blk_fw, use_32x32, accumulator, count,
824               accumulator + BLK_PELS, count + BLK_PELS,
825               accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
826         } else {
827           // Apply the filter (YUV)
828           vp9_apply_temporal_filter(
829               f->y_buffer + mb_y_offset, f->y_stride, predictor, BW,
830               f->u_buffer + mb_uv_offset, f->v_buffer + mb_uv_offset,
831               f->uv_stride, predictor + BLK_PELS, predictor + (BLK_PELS << 1),
832               mb_uv_width, BW, BH, mbd->plane[1].subsampling_x,
833               mbd->plane[1].subsampling_y, strength, blk_fw, use_32x32,
834               accumulator, count, accumulator + BLK_PELS, count + BLK_PELS,
835               accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
836         }
837 #else
838         // Apply the filter (YUV)
839         vp9_apply_temporal_filter(
840             f->y_buffer + mb_y_offset, f->y_stride, predictor, BW,
841             f->u_buffer + mb_uv_offset, f->v_buffer + mb_uv_offset,
842             f->uv_stride, predictor + BLK_PELS, predictor + (BLK_PELS << 1),
843             mb_uv_width, BW, BH, mbd->plane[1].subsampling_x,
844             mbd->plane[1].subsampling_y, strength, blk_fw, use_32x32,
845             accumulator, count, accumulator + BLK_PELS, count + BLK_PELS,
846             accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
847 #endif  // CONFIG_VP9_HIGHBITDEPTH
848       }
849     }
850 
851 #if CONFIG_VP9_HIGHBITDEPTH
852     if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
853       uint16_t *dst1_16;
854       uint16_t *dst2_16;
855       // Normalize filter output to produce AltRef frame
856       dst1 = cpi->alt_ref_buffer.y_buffer;
857       dst1_16 = CONVERT_TO_SHORTPTR(dst1);
858       stride = cpi->alt_ref_buffer.y_stride;
859       byte = mb_y_offset;
860       for (i = 0, k = 0; i < BH; i++) {
861         for (j = 0; j < BW; j++, k++) {
862           unsigned int pval = accumulator[k] + (count[k] >> 1);
863           pval *= fixed_divide[count[k]];
864           pval >>= 19;
865 
866           dst1_16[byte] = (uint16_t)pval;
867 
868           // move to next pixel
869           byte++;
870         }
871 
872         byte += stride - BW;
873       }
874 
875       dst1 = cpi->alt_ref_buffer.u_buffer;
876       dst2 = cpi->alt_ref_buffer.v_buffer;
877       dst1_16 = CONVERT_TO_SHORTPTR(dst1);
878       dst2_16 = CONVERT_TO_SHORTPTR(dst2);
879       stride = cpi->alt_ref_buffer.uv_stride;
880       byte = mb_uv_offset;
881       for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
882         for (j = 0; j < mb_uv_width; j++, k++) {
883           int m = k + BLK_PELS;
884 
885           // U
886           unsigned int pval = accumulator[k] + (count[k] >> 1);
887           pval *= fixed_divide[count[k]];
888           pval >>= 19;
889           dst1_16[byte] = (uint16_t)pval;
890 
891           // V
892           pval = accumulator[m] + (count[m] >> 1);
893           pval *= fixed_divide[count[m]];
894           pval >>= 19;
895           dst2_16[byte] = (uint16_t)pval;
896 
897           // move to next pixel
898           byte++;
899         }
900 
901         byte += stride - mb_uv_width;
902       }
903     } else {
904       // Normalize filter output to produce AltRef frame
905       dst1 = cpi->alt_ref_buffer.y_buffer;
906       stride = cpi->alt_ref_buffer.y_stride;
907       byte = mb_y_offset;
908       for (i = 0, k = 0; i < BH; i++) {
909         for (j = 0; j < BW; j++, k++) {
910           unsigned int pval = accumulator[k] + (count[k] >> 1);
911           pval *= fixed_divide[count[k]];
912           pval >>= 19;
913 
914           dst1[byte] = (uint8_t)pval;
915 
916           // move to next pixel
917           byte++;
918         }
919         byte += stride - BW;
920       }
921 
922       dst1 = cpi->alt_ref_buffer.u_buffer;
923       dst2 = cpi->alt_ref_buffer.v_buffer;
924       stride = cpi->alt_ref_buffer.uv_stride;
925       byte = mb_uv_offset;
926       for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
927         for (j = 0; j < mb_uv_width; j++, k++) {
928           int m = k + BLK_PELS;
929 
930           // U
931           unsigned int pval = accumulator[k] + (count[k] >> 1);
932           pval *= fixed_divide[count[k]];
933           pval >>= 19;
934           dst1[byte] = (uint8_t)pval;
935 
936           // V
937           pval = accumulator[m] + (count[m] >> 1);
938           pval *= fixed_divide[count[m]];
939           pval >>= 19;
940           dst2[byte] = (uint8_t)pval;
941 
942           // move to next pixel
943           byte++;
944         }
945         byte += stride - mb_uv_width;
946       }
947     }
948 #else
949     // Normalize filter output to produce AltRef frame
950     dst1 = cpi->alt_ref_buffer.y_buffer;
951     stride = cpi->alt_ref_buffer.y_stride;
952     byte = mb_y_offset;
953     for (i = 0, k = 0; i < BH; i++) {
954       for (j = 0; j < BW; j++, k++) {
955         unsigned int pval = accumulator[k] + (count[k] >> 1);
956         pval *= fixed_divide[count[k]];
957         pval >>= 19;
958 
959         dst1[byte] = (uint8_t)pval;
960 
961         // move to next pixel
962         byte++;
963       }
964       byte += stride - BW;
965     }
966 
967     dst1 = cpi->alt_ref_buffer.u_buffer;
968     dst2 = cpi->alt_ref_buffer.v_buffer;
969     stride = cpi->alt_ref_buffer.uv_stride;
970     byte = mb_uv_offset;
971     for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
972       for (j = 0; j < mb_uv_width; j++, k++) {
973         int m = k + BLK_PELS;
974 
975         // U
976         unsigned int pval = accumulator[k] + (count[k] >> 1);
977         pval *= fixed_divide[count[k]];
978         pval >>= 19;
979         dst1[byte] = (uint8_t)pval;
980 
981         // V
982         pval = accumulator[m] + (count[m] >> 1);
983         pval *= fixed_divide[count[m]];
984         pval >>= 19;
985         dst2[byte] = (uint8_t)pval;
986 
987         // move to next pixel
988         byte++;
989       }
990       byte += stride - mb_uv_width;
991     }
992 #endif  // CONFIG_VP9_HIGHBITDEPTH
993     mb_y_offset += BW;
994     mb_uv_offset += mb_uv_width;
995   }
996 }
997 
temporal_filter_iterate_tile_c(VP9_COMP * cpi,int tile_row,int tile_col)998 static void temporal_filter_iterate_tile_c(VP9_COMP *cpi, int tile_row,
999                                            int tile_col) {
1000   VP9_COMMON *const cm = &cpi->common;
1001   const int tile_cols = 1 << cm->log2_tile_cols;
1002   TileInfo *tile_info =
1003       &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
1004   const int mb_row_start = (tile_info->mi_row_start) >> TF_SHIFT;
1005   const int mb_row_end = (tile_info->mi_row_end + TF_ROUND) >> TF_SHIFT;
1006   const int mb_col_start = (tile_info->mi_col_start) >> TF_SHIFT;
1007   const int mb_col_end = (tile_info->mi_col_end + TF_ROUND) >> TF_SHIFT;
1008   int mb_row;
1009 
1010   for (mb_row = mb_row_start; mb_row < mb_row_end; mb_row++) {
1011     vp9_temporal_filter_iterate_row_c(cpi, &cpi->td, mb_row, mb_col_start,
1012                                       mb_col_end);
1013   }
1014 }
1015 
temporal_filter_iterate_c(VP9_COMP * cpi)1016 static void temporal_filter_iterate_c(VP9_COMP *cpi) {
1017   VP9_COMMON *const cm = &cpi->common;
1018   const int tile_cols = 1 << cm->log2_tile_cols;
1019   const int tile_rows = 1 << cm->log2_tile_rows;
1020   int tile_row, tile_col;
1021   vp9_init_tile_data(cpi);
1022 
1023   for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1024     for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1025       temporal_filter_iterate_tile_c(cpi, tile_row, tile_col);
1026     }
1027   }
1028 }
1029 
1030 // Apply buffer limits and context specific adjustments to arnr filter.
adjust_arnr_filter(VP9_COMP * cpi,int distance,int group_boost,int * arnr_frames,int * arnr_strength)1031 static void adjust_arnr_filter(VP9_COMP *cpi, int distance, int group_boost,
1032                                int *arnr_frames, int *arnr_strength) {
1033   const VP9EncoderConfig *const oxcf = &cpi->oxcf;
1034   const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
1035   const int frames_after_arf =
1036       vp9_lookahead_depth(cpi->lookahead) - distance - 1;
1037   int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
1038   int frames_bwd;
1039   int q, frames, base_strength, strength;
1040 
1041   // Context dependent two pass adjustment to strength.
1042   if (oxcf->pass == 2) {
1043     base_strength = oxcf->arnr_strength + cpi->twopass.arnr_strength_adjustment;
1044     // Clip to allowed range.
1045     base_strength = VPXMIN(6, VPXMAX(0, base_strength));
1046   } else {
1047     base_strength = oxcf->arnr_strength;
1048   }
1049 
1050   // Define the forward and backwards filter limits for this arnr group.
1051   if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
1052   if (frames_fwd > distance) frames_fwd = distance;
1053 
1054   frames_bwd = frames_fwd;
1055 
1056   // For even length filter there is one more frame backward
1057   // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
1058   if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
1059 
1060   // Set the baseline active filter size.
1061   frames = frames_bwd + 1 + frames_fwd;
1062 
1063   // Adjust the strength based on active max q.
1064   if (cpi->common.current_video_frame > 1)
1065     q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
1066                                       cpi->common.bit_depth));
1067   else
1068     q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
1069                                       cpi->common.bit_depth));
1070   if (q > 16) {
1071     strength = base_strength;
1072   } else {
1073     strength = base_strength - ((16 - q) / 2);
1074     if (strength < 0) strength = 0;
1075   }
1076 
1077   // Adjust number of frames in filter and strength based on gf boost level.
1078   if (frames > group_boost / 150) {
1079     frames = group_boost / 150;
1080     frames += !(frames & 1);
1081   }
1082 
1083   if (strength > group_boost / 300) {
1084     strength = group_boost / 300;
1085   }
1086 
1087   // Adjustments for second level arf in multi arf case.
1088   // Leave commented out place holder for possible filtering adjustment with
1089   // new multi-layer arf code.
1090   // if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed)
1091   //   if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) strength >>= 1;
1092 
1093   // TODO(jingning): Skip temporal filtering for intermediate frames that will
1094   // be used as show_existing_frame. Need to further explore the possibility to
1095   // apply certain filter.
1096   if (gf_group->arf_src_offset[gf_group->index] <
1097       cpi->rc.baseline_gf_interval - 1)
1098     frames = 1;
1099 
1100   *arnr_frames = frames;
1101   *arnr_strength = strength;
1102 }
1103 
vp9_temporal_filter(VP9_COMP * cpi,int distance)1104 void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
1105   VP9_COMMON *const cm = &cpi->common;
1106   RATE_CONTROL *const rc = &cpi->rc;
1107   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1108   ARNRFilterData *arnr_filter_data = &cpi->arnr_filter_data;
1109   int frame;
1110   int frames_to_blur;
1111   int start_frame;
1112   int strength;
1113   int frames_to_blur_backward;
1114   int frames_to_blur_forward;
1115   struct scale_factors *sf = &arnr_filter_data->sf;
1116   YV12_BUFFER_CONFIG **frames = arnr_filter_data->frames;
1117   int rdmult;
1118 
1119   // Apply context specific adjustments to the arnr filter parameters.
1120   adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
1121   frames_to_blur_backward = (frames_to_blur / 2);
1122   frames_to_blur_forward = ((frames_to_blur - 1) / 2);
1123   start_frame = distance + frames_to_blur_forward;
1124 
1125   arnr_filter_data->strength = strength;
1126   arnr_filter_data->frame_count = frames_to_blur;
1127   arnr_filter_data->alt_ref_index = frames_to_blur_backward;
1128 
1129   // Setup frame pointers, NULL indicates frame not included in filter.
1130   for (frame = 0; frame < frames_to_blur; ++frame) {
1131     const int which_buffer = start_frame - frame;
1132     struct lookahead_entry *buf =
1133         vp9_lookahead_peek(cpi->lookahead, which_buffer);
1134     frames[frames_to_blur - 1 - frame] = &buf->img;
1135   }
1136 
1137   if (frames_to_blur > 0) {
1138     // Setup scaling factors. Scaling on each of the arnr frames is not
1139     // supported.
1140     if (cpi->use_svc) {
1141       // In spatial svc the scaling factors might be less then 1/2.
1142       // So we will use non-normative scaling.
1143       int frame_used = 0;
1144 #if CONFIG_VP9_HIGHBITDEPTH
1145       vp9_setup_scale_factors_for_frame(
1146           sf, get_frame_new_buffer(cm)->y_crop_width,
1147           get_frame_new_buffer(cm)->y_crop_height,
1148           get_frame_new_buffer(cm)->y_crop_width,
1149           get_frame_new_buffer(cm)->y_crop_height, cm->use_highbitdepth);
1150 #else
1151       vp9_setup_scale_factors_for_frame(
1152           sf, get_frame_new_buffer(cm)->y_crop_width,
1153           get_frame_new_buffer(cm)->y_crop_height,
1154           get_frame_new_buffer(cm)->y_crop_width,
1155           get_frame_new_buffer(cm)->y_crop_height);
1156 #endif  // CONFIG_VP9_HIGHBITDEPTH
1157 
1158       for (frame = 0; frame < frames_to_blur; ++frame) {
1159         if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
1160             cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
1161           if (vpx_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
1162                                        cm->width, cm->height, cm->subsampling_x,
1163                                        cm->subsampling_y,
1164 #if CONFIG_VP9_HIGHBITDEPTH
1165                                        cm->use_highbitdepth,
1166 #endif
1167                                        VP9_ENC_BORDER_IN_PIXELS,
1168                                        cm->byte_alignment, NULL, NULL, NULL)) {
1169             vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1170                                "Failed to reallocate alt_ref_buffer");
1171           }
1172           frames[frame] = vp9_scale_if_required(
1173               cm, frames[frame], &cpi->svc.scaled_frames[frame_used], 0,
1174               EIGHTTAP, 0);
1175           ++frame_used;
1176         }
1177       }
1178       cm->mi = cm->mip + cm->mi_stride + 1;
1179       xd->mi = cm->mi_grid_visible;
1180       xd->mi[0] = cm->mi;
1181     } else {
1182 // ARF is produced at the native frame size and resized when coded.
1183 #if CONFIG_VP9_HIGHBITDEPTH
1184       vp9_setup_scale_factors_for_frame(
1185           sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
1186           frames[0]->y_crop_width, frames[0]->y_crop_height,
1187           cm->use_highbitdepth);
1188 #else
1189       vp9_setup_scale_factors_for_frame(
1190           sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
1191           frames[0]->y_crop_width, frames[0]->y_crop_height);
1192 #endif  // CONFIG_VP9_HIGHBITDEPTH
1193     }
1194   }
1195 
1196   // Initialize errorperbit and sabperbit.
1197   rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, ARNR_FILT_QINDEX);
1198   set_error_per_bit(&cpi->td.mb, rdmult);
1199   vp9_initialize_me_consts(cpi, &cpi->td.mb, ARNR_FILT_QINDEX);
1200 
1201   if (!cpi->row_mt)
1202     temporal_filter_iterate_c(cpi);
1203   else
1204     vp9_temporal_filter_row_mt(cpi);
1205 }
1206