1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
11 */
12
13 #include <assert.h>
14 #include <limits.h>
15 #include <math.h>
16 #include <stdio.h>
17
18 #include "config/aom_dsp_rtcd.h"
19 #include "config/av1_rtcd.h"
20
21 #include "aom_dsp/aom_dsp_common.h"
22 #include "aom_dsp/blend.h"
23 #include "aom_mem/aom_mem.h"
24 #include "aom_ports/aom_timer.h"
25 #include "aom_ports/mem.h"
26 #include "aom_ports/system_state.h"
27
28 #include "av1/encoder/model_rd.h"
29 #include "av1/common/mvref_common.h"
30 #include "av1/common/pred_common.h"
31 #include "av1/common/reconinter.h"
32 #include "av1/common/reconintra.h"
33
34 #include "av1/encoder/encodemv.h"
35 #include "av1/encoder/rdopt.h"
36 #include "av1/encoder/reconinter_enc.h"
37
38 extern int g_pick_inter_mode_cnt;
39 /*!\cond */
40 typedef struct {
41 uint8_t *data;
42 int stride;
43 int in_use;
44 } PRED_BUFFER;
45
46 typedef struct {
47 PRED_BUFFER *best_pred;
48 PREDICTION_MODE best_mode;
49 TX_SIZE best_tx_size;
50 MV_REFERENCE_FRAME best_ref_frame;
51 uint8_t best_mode_skip_txfm;
52 uint8_t best_mode_initial_skip_flag;
53 int_interpfilters best_pred_filter;
54 } BEST_PICKMODE;
55
56 typedef struct {
57 MV_REFERENCE_FRAME ref_frame;
58 PREDICTION_MODE pred_mode;
59 } REF_MODE;
60 /*!\endcond */
61
62 static const int pos_shift_16x16[4][4] = {
63 { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
64 };
65
66 #define NUM_INTER_MODES_RT 9
67 #define NUM_INTER_MODES_REDUCED 8
68
69 static const REF_MODE ref_mode_set_rt[NUM_INTER_MODES_RT] = {
70 { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV },
71 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
72 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV },
73 { ALTREF_FRAME, NEARESTMV }, { ALTREF_FRAME, NEARMV },
74 { ALTREF_FRAME, NEWMV }
75 };
76
77 // GLOBALMV in the set below is in fact ZEROMV as we don't do global ME in RT
78 // mode
79 static const REF_MODE ref_mode_set_reduced[NUM_INTER_MODES_REDUCED] = {
80 { LAST_FRAME, GLOBALMV }, { LAST_FRAME, NEARESTMV },
81 { GOLDEN_FRAME, GLOBALMV }, { LAST_FRAME, NEARMV },
82 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
83 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV }
84 };
85
86 static const THR_MODES mode_idx[REF_FRAMES][4] = {
87 { THR_DC, THR_V_PRED, THR_H_PRED, THR_SMOOTH },
88 { THR_NEARESTMV, THR_NEARMV, THR_GLOBALMV, THR_NEWMV },
89 { THR_NEARESTL2, THR_NEARL2, THR_GLOBALL2, THR_NEWL2 },
90 { THR_NEARESTL3, THR_NEARL3, THR_GLOBALL3, THR_NEWL3 },
91 { THR_NEARESTG, THR_NEARG, THR_GLOBALMV, THR_NEWG },
92 };
93
94 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
95 SMOOTH_PRED };
96
mode_offset(const PREDICTION_MODE mode)97 static INLINE int mode_offset(const PREDICTION_MODE mode) {
98 if (mode >= NEARESTMV) {
99 return INTER_OFFSET(mode);
100 } else {
101 switch (mode) {
102 case DC_PRED: return 0;
103 case V_PRED: return 1;
104 case H_PRED: return 2;
105 case SMOOTH_PRED: return 3;
106 default: assert(0); return -1;
107 }
108 }
109 }
110
111 enum {
112 // INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV),
113 INTER_NEAREST = (1 << NEARESTMV),
114 INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV),
115 INTER_NEAREST_NEAR = (1 << NEARESTMV) | (1 << NEARMV),
116 INTER_NEAR_NEW = (1 << NEARMV) | (1 << NEWMV),
117 };
118
init_best_pickmode(BEST_PICKMODE * bp)119 static INLINE void init_best_pickmode(BEST_PICKMODE *bp) {
120 bp->best_mode = NEARESTMV;
121 bp->best_ref_frame = LAST_FRAME;
122 bp->best_tx_size = TX_8X8;
123 bp->best_pred_filter = av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
124 bp->best_mode_skip_txfm = 0;
125 bp->best_mode_initial_skip_flag = 0;
126 bp->best_pred = NULL;
127 }
128
129 /*!\brief Runs Motion Estimation for a specific block and specific ref frame.
130 *
131 * \ingroup nonrd_mode_search
132 * \callgraph
133 * \callergraph
134 * Finds the best Motion Vector by running Motion Estimation for a specific
135 * block and a specific reference frame. Exits early if RDCost of Full Pel part
136 * exceeds best RD Cost fund so far
137 * \param[in] cpi Top-level encoder structure
138 * \param[in] x Pointer to structure holding all the
139 * data for the current macroblock
140 * \param[in] bsize Current block size
141 * \param[in] mi_row Row index in 4x4 units
142 * \param[in] mi_col Column index in 4x4 units
143 * \param[in] tmp_mv Pointer to best found New MV
144 * \param[in] rate_mv Pointer to Rate of the best new MV
145 * \param[in] best_rd_sofar RD Cost of the best mode found so far
146 * \param[in] use_base_mv Flag, indicating that tmp_mv holds
147 * specific MV to start the search with
148 *
149 * \return Returns 0 if ME was terminated after Full Pel Search because too
150 * high RD Cost. Otherwise returns 1. Best New MV is placed into \c tmp_mv.
151 * Rate estimation for this vector is placed to \c rate_mv
152 */
combined_motion_search(AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,int_mv * tmp_mv,int * rate_mv,int64_t best_rd_sofar,int use_base_mv)153 static int combined_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
154 BLOCK_SIZE bsize, int mi_row, int mi_col,
155 int_mv *tmp_mv, int *rate_mv,
156 int64_t best_rd_sofar, int use_base_mv) {
157 MACROBLOCKD *xd = &x->e_mbd;
158 const AV1_COMMON *cm = &cpi->common;
159 const int num_planes = av1_num_planes(cm);
160 MB_MODE_INFO *mi = xd->mi[0];
161 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
162 int step_param = (cpi->sf.rt_sf.fullpel_search_step_param)
163 ? cpi->sf.rt_sf.fullpel_search_step_param
164 : cpi->mv_search_params.mv_step_param;
165 FULLPEL_MV start_mv;
166 const int ref = mi->ref_frame[0];
167 const MV ref_mv = av1_get_ref_mv(x, mi->ref_mv_idx).as_mv;
168 MV center_mv;
169 int dis;
170 int rv = 0;
171 int cost_list[5];
172 int search_subpel = 1;
173 const YV12_BUFFER_CONFIG *scaled_ref_frame =
174 av1_get_scaled_ref_frame(cpi, ref);
175
176 if (scaled_ref_frame) {
177 int i;
178 // Swap out the reference frame for a version that's been scaled to
179 // match the resolution of the current frame, allowing the existing
180 // motion search code to be used without additional modifications.
181 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
182 av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL,
183 num_planes);
184 }
185
186 start_mv = get_fullmv_from_mv(&ref_mv);
187
188 if (!use_base_mv)
189 center_mv = ref_mv;
190 else
191 center_mv = tmp_mv->as_mv;
192 const search_site_config *src_search_sites =
193 cpi->mv_search_params.search_site_cfg[SS_CFG_SRC];
194 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
195 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize, ¢er_mv,
196 src_search_sites,
197 /*fine_search_interval=*/0);
198
199 av1_full_pixel_search(start_mv, &full_ms_params, step_param,
200 cond_cost_list(cpi, cost_list), &tmp_mv->as_fullmv,
201 NULL);
202
203 // calculate the bit cost on motion vector
204 MV mvp_full = get_mv_from_fullmv(&tmp_mv->as_fullmv);
205
206 *rate_mv = av1_mv_bit_cost(&mvp_full, &ref_mv, x->mv_costs->nmv_joint_cost,
207 x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
208
209 // TODO(kyslov) Account for Rate Mode!
210 rv = !(RDCOST(x->rdmult, (*rate_mv), 0) > best_rd_sofar);
211
212 if (rv && search_subpel) {
213 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
214 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
215 cost_list);
216 MV subpel_start_mv = get_mv_from_fullmv(&tmp_mv->as_fullmv);
217 cpi->mv_search_params.find_fractional_mv_step(
218 xd, cm, &ms_params, subpel_start_mv, &tmp_mv->as_mv, &dis,
219 &x->pred_sse[ref], NULL);
220
221 *rate_mv =
222 av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->mv_costs->nmv_joint_cost,
223 x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
224 }
225
226 if (scaled_ref_frame) {
227 int i;
228 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
229 }
230 // Final MV can not be equal to referance MV as this will trigger assert
231 // later. This can happen if both NEAREST and NEAR modes were skipped
232 rv = (tmp_mv->as_mv.col != ref_mv.col || tmp_mv->as_mv.row != ref_mv.row);
233 return rv;
234 }
235
236 /*!\brief Searches for the best New Motion Vector.
237 *
238 * \ingroup nonrd_mode_search
239 * \callgraph
240 * \callergraph
241 * Finds the best Motion Vector by doing Motion Estimation. Uses reduced
242 * complexity ME for non-LAST frames or calls \c combined_motion_search
243 * for LAST reference frame
244 * \param[in] cpi Top-level encoder structure
245 * \param[in] x Pointer to structure holding all the
246 * data for the current macroblock
247 * \param[in] frame_mv Array that holds MVs for all modes
248 * and ref frames
249 * \param[in] ref_frame Reference freme for which to find
250 * the best New MVs
251 * \param[in] gf_temporal_ref Flag, indicating temporal reference
252 * for GOLDEN frame
253 * \param[in] bsize Current block size
254 * \param[in] mi_row Row index in 4x4 units
255 * \param[in] mi_col Column index in 4x4 units
256 * \param[in] rate_mv Pointer to Rate of the best new MV
257 * \param[in] best_rdc Pointer to the RD Cost for the best
258 * mode found so far
259 *
260 * \return Returns -1 if the search was not done, otherwise returns 0.
261 * Best New MV is placed into \c frame_mv array, Rate estimation for this
262 * vector is placed to \c rate_mv
263 */
search_new_mv(AV1_COMP * cpi,MACROBLOCK * x,int_mv frame_mv[][REF_FRAMES],MV_REFERENCE_FRAME ref_frame,int gf_temporal_ref,BLOCK_SIZE bsize,int mi_row,int mi_col,int * rate_mv,RD_STATS * best_rdc)264 static int search_new_mv(AV1_COMP *cpi, MACROBLOCK *x,
265 int_mv frame_mv[][REF_FRAMES],
266 MV_REFERENCE_FRAME ref_frame, int gf_temporal_ref,
267 BLOCK_SIZE bsize, int mi_row, int mi_col, int *rate_mv,
268 RD_STATS *best_rdc) {
269 MACROBLOCKD *const xd = &x->e_mbd;
270 MB_MODE_INFO *const mi = xd->mi[0];
271 AV1_COMMON *cm = &cpi->common;
272 if (ref_frame > LAST_FRAME && cpi->oxcf.rc_cfg.mode == AOM_CBR &&
273 gf_temporal_ref) {
274 int tmp_sad;
275 int dis;
276 int cost_list[5] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX };
277
278 if (bsize < BLOCK_16X16) return -1;
279
280 tmp_sad = av1_int_pro_motion_estimation(
281 cpi, x, bsize, mi_row, mi_col,
282 &x->mbmi_ext.ref_mv_stack[ref_frame][0].this_mv.as_mv);
283
284 if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) return -1;
285
286 frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
287 int_mv best_mv = mi->mv[0];
288 best_mv.as_mv.row >>= 3;
289 best_mv.as_mv.col >>= 3;
290 MV ref_mv = av1_get_ref_mv(x, 0).as_mv;
291
292 *rate_mv = av1_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, &ref_mv,
293 x->mv_costs->nmv_joint_cost,
294 x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
295 frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
296 frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
297
298 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
299 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
300 cost_list);
301 MV start_mv = get_mv_from_fullmv(&best_mv.as_fullmv);
302 cpi->mv_search_params.find_fractional_mv_step(
303 xd, cm, &ms_params, start_mv, &best_mv.as_mv, &dis,
304 &x->pred_sse[ref_frame], NULL);
305 frame_mv[NEWMV][ref_frame].as_int = best_mv.as_int;
306 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
307 &frame_mv[NEWMV][ref_frame], rate_mv,
308 best_rdc->rdcost, 0)) {
309 return -1;
310 }
311
312 return 0;
313 }
314
315 /*!\brief Finds predicted motion vectors for a block.
316 *
317 * \ingroup nonrd_mode_search
318 * \callgraph
319 * \callergraph
320 * Finds predicted motion vectors for a block from a certain reference frame.
321 * First, it fills reference MV stack, then picks the test from the stack and
322 * predicts the final MV for a block for each mode.
323 * \param[in] cpi Top-level encoder structure
324 * \param[in] x Pointer to structure holding all the
325 * data for the current macroblock
326 * \param[in] ref_frame Reference freme for which to find
327 * ref MVs
328 * \param[in] frame_mv Predicted MVs for a block
329 * \param[in] tile_data Pointer to struct holding adaptive
330 * data/contexts/models for the tile
331 * during encoding
332 * \param[in] yv12_mb Buffer to hold predicted block
333 * \param[in] bsize Current block size
334 * \param[in] force_skip_low_temp_var Flag indicating possible mode search
335 * prune for low temporal variace block
336 *
337 * \return Nothing is returned. Instead, predicted MVs are placed into
338 * \c frame_mv array
339 */
find_predictors(AV1_COMP * cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES],TileDataEnc * tile_data,struct buf_2d yv12_mb[8][MAX_MB_PLANE],BLOCK_SIZE bsize,int force_skip_low_temp_var)340 static INLINE void find_predictors(AV1_COMP *cpi, MACROBLOCK *x,
341 MV_REFERENCE_FRAME ref_frame,
342 int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES],
343 TileDataEnc *tile_data,
344 struct buf_2d yv12_mb[8][MAX_MB_PLANE],
345 BLOCK_SIZE bsize,
346 int force_skip_low_temp_var) {
347 AV1_COMMON *const cm = &cpi->common;
348 MACROBLOCKD *const xd = &x->e_mbd;
349 MB_MODE_INFO *const mbmi = xd->mi[0];
350 MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
351 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref_frame);
352 const int num_planes = av1_num_planes(cm);
353 (void)tile_data;
354
355 x->pred_mv_sad[ref_frame] = INT_MAX;
356 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
357 // TODO(kyslov) this needs various further optimizations. to be continued..
358 assert(yv12 != NULL);
359 if (yv12 != NULL) {
360 const struct scale_factors *const sf =
361 get_ref_scale_factors_const(cm, ref_frame);
362 av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
363 av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
364 xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
365 mbmi_ext->mode_context);
366 // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
367 // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
368 av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
369 av1_find_best_ref_mvs_from_stack(
370 cm->features.allow_high_precision_mv, mbmi_ext, ref_frame,
371 &frame_mv[NEARESTMV][ref_frame], &frame_mv[NEARMV][ref_frame], 0);
372 frame_mv[GLOBALMV][ref_frame] = mbmi_ext->global_mvs[ref_frame];
373 // Early exit for non-LAST frame if force_skip_low_temp_var is set.
374 if (!av1_is_scaled(sf) && bsize >= BLOCK_8X8 &&
375 !(force_skip_low_temp_var && ref_frame != LAST_FRAME)) {
376 av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
377 bsize);
378 }
379 }
380 av1_count_overlappable_neighbors(cm, xd);
381 mbmi->num_proj_ref = 1;
382 }
383
estimate_single_ref_frame_costs(const AV1_COMMON * cm,const MACROBLOCKD * xd,const ModeCosts * mode_costs,int segment_id,unsigned int * ref_costs_single)384 static void estimate_single_ref_frame_costs(const AV1_COMMON *cm,
385 const MACROBLOCKD *xd,
386 const ModeCosts *mode_costs,
387 int segment_id,
388 unsigned int *ref_costs_single) {
389 int seg_ref_active =
390 segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
391 if (seg_ref_active) {
392 memset(ref_costs_single, 0, REF_FRAMES * sizeof(*ref_costs_single));
393 } else {
394 int intra_inter_ctx = av1_get_intra_inter_context(xd);
395 ref_costs_single[INTRA_FRAME] =
396 mode_costs->intra_inter_cost[intra_inter_ctx][0];
397 unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
398 ref_costs_single[LAST_FRAME] = base_cost;
399 ref_costs_single[GOLDEN_FRAME] = base_cost;
400 ref_costs_single[ALTREF_FRAME] = base_cost;
401 // add cost for last, golden, altref
402 ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[0][0][0];
403 ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][0][1];
404 ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][1][0];
405 ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][0][1];
406 ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][2][0];
407 }
408 }
409
estimate_comp_ref_frame_costs(const AV1_COMMON * cm,const MACROBLOCKD * xd,const ModeCosts * mode_costs,int segment_id,unsigned int (* ref_costs_comp)[REF_FRAMES])410 static void estimate_comp_ref_frame_costs(
411 const AV1_COMMON *cm, const MACROBLOCKD *xd, const ModeCosts *mode_costs,
412 int segment_id, unsigned int (*ref_costs_comp)[REF_FRAMES]) {
413 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
414 for (int ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
415 memset(ref_costs_comp[ref_frame], 0,
416 REF_FRAMES * sizeof((*ref_costs_comp)[0]));
417 } else {
418 int intra_inter_ctx = av1_get_intra_inter_context(xd);
419 unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
420
421 if (cm->current_frame.reference_mode != SINGLE_REFERENCE) {
422 // Similar to single ref, determine cost of compound ref frames.
423 // cost_compound_refs = cost_first_ref + cost_second_ref
424 const int bwdref_comp_ctx_p = av1_get_pred_context_comp_bwdref_p(xd);
425 const int bwdref_comp_ctx_p1 = av1_get_pred_context_comp_bwdref_p1(xd);
426 const int ref_comp_ctx_p = av1_get_pred_context_comp_ref_p(xd);
427 const int ref_comp_ctx_p1 = av1_get_pred_context_comp_ref_p1(xd);
428 const int ref_comp_ctx_p2 = av1_get_pred_context_comp_ref_p2(xd);
429
430 const int comp_ref_type_ctx = av1_get_comp_reference_type_context(xd);
431 unsigned int ref_bicomp_costs[REF_FRAMES] = { 0 };
432
433 ref_bicomp_costs[LAST_FRAME] = ref_bicomp_costs[LAST2_FRAME] =
434 ref_bicomp_costs[LAST3_FRAME] = ref_bicomp_costs[GOLDEN_FRAME] =
435 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][1];
436 ref_bicomp_costs[BWDREF_FRAME] = ref_bicomp_costs[ALTREF2_FRAME] = 0;
437 ref_bicomp_costs[ALTREF_FRAME] = 0;
438
439 // cost of first ref frame
440 ref_bicomp_costs[LAST_FRAME] +=
441 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
442 ref_bicomp_costs[LAST2_FRAME] +=
443 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
444 ref_bicomp_costs[LAST3_FRAME] +=
445 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
446 ref_bicomp_costs[GOLDEN_FRAME] +=
447 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
448
449 ref_bicomp_costs[LAST_FRAME] +=
450 mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][0];
451 ref_bicomp_costs[LAST2_FRAME] +=
452 mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][1];
453
454 ref_bicomp_costs[LAST3_FRAME] +=
455 mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][0];
456 ref_bicomp_costs[GOLDEN_FRAME] +=
457 mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][1];
458
459 // cost of second ref frame
460 ref_bicomp_costs[BWDREF_FRAME] +=
461 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
462 ref_bicomp_costs[ALTREF2_FRAME] +=
463 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
464 ref_bicomp_costs[ALTREF_FRAME] +=
465 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][1];
466
467 ref_bicomp_costs[BWDREF_FRAME] +=
468 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][0];
469 ref_bicomp_costs[ALTREF2_FRAME] +=
470 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][1];
471
472 // cost: if one ref frame is forward ref, the other ref is backward ref
473 for (int ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
474 for (int ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1) {
475 ref_costs_comp[ref0][ref1] =
476 ref_bicomp_costs[ref0] + ref_bicomp_costs[ref1];
477 }
478 }
479
480 // cost: if both ref frames are the same side.
481 const int uni_comp_ref_ctx_p = av1_get_pred_context_uni_comp_ref_p(xd);
482 const int uni_comp_ref_ctx_p1 = av1_get_pred_context_uni_comp_ref_p1(xd);
483 const int uni_comp_ref_ctx_p2 = av1_get_pred_context_uni_comp_ref_p2(xd);
484 ref_costs_comp[LAST_FRAME][LAST2_FRAME] =
485 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
486 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
487 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][0];
488 ref_costs_comp[LAST_FRAME][LAST3_FRAME] =
489 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
490 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
491 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
492 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][0];
493 ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] =
494 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
495 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
496 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
497 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][1];
498 ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] =
499 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
500 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][1];
501 } else {
502 for (int ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
503 for (int ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1)
504 ref_costs_comp[ref0][ref1] = 512;
505 }
506 ref_costs_comp[LAST_FRAME][LAST2_FRAME] = 512;
507 ref_costs_comp[LAST_FRAME][LAST3_FRAME] = 512;
508 ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] = 512;
509 ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] = 512;
510 }
511 }
512 }
513
calculate_tx_size(const AV1_COMP * const cpi,BLOCK_SIZE bsize,MACROBLOCK * const x,unsigned int var,unsigned int sse)514 static TX_SIZE calculate_tx_size(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
515 MACROBLOCK *const x, unsigned int var,
516 unsigned int sse) {
517 MACROBLOCKD *const xd = &x->e_mbd;
518 TX_SIZE tx_size;
519 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
520 if (txfm_params->tx_mode_search_type == TX_MODE_SELECT) {
521 if (sse > (var << 2))
522 tx_size =
523 AOMMIN(max_txsize_lookup[bsize],
524 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
525 else
526 tx_size = TX_8X8;
527
528 if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ &&
529 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
530 tx_size = TX_8X8;
531 else if (tx_size > TX_16X16)
532 tx_size = TX_16X16;
533 } else {
534 tx_size =
535 AOMMIN(max_txsize_lookup[bsize],
536 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
537 }
538
539 if (txfm_params->tx_mode_search_type != ONLY_4X4 && bsize > BLOCK_32X32)
540 tx_size = TX_16X16;
541
542 return AOMMIN(tx_size, TX_16X16);
543 }
544
545 static const uint8_t b_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 1, 1, 1, 2,
546 2, 2, 3, 3, 3, 4,
547 4, 4, 5, 5 };
548 static const uint8_t b_height_log2_lookup[BLOCK_SIZES] = { 0, 1, 0, 1, 2, 1,
549 2, 3, 2, 3, 4, 3,
550 4, 5, 4, 5 };
551
block_variance(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int w,int h,unsigned int * sse,int * sum,int block_size,uint32_t * sse8x8,int * sum8x8,uint32_t * var8x8)552 static void block_variance(const uint8_t *src, int src_stride,
553 const uint8_t *ref, int ref_stride, int w, int h,
554 unsigned int *sse, int *sum, int block_size,
555 uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) {
556 int i, j, k = 0;
557
558 *sse = 0;
559 *sum = 0;
560
561 for (i = 0; i < h; i += block_size) {
562 for (j = 0; j < w; j += block_size) {
563 aom_get8x8var(src + src_stride * i + j, src_stride,
564 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
565 &sum8x8[k]);
566 *sse += sse8x8[k];
567 *sum += sum8x8[k];
568 var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
569 k++;
570 }
571 }
572 }
573
calculate_variance(int bw,int bh,TX_SIZE tx_size,unsigned int * sse_i,int * sum_i,unsigned int * var_o,unsigned int * sse_o,int * sum_o)574 static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
575 unsigned int *sse_i, int *sum_i,
576 unsigned int *var_o, unsigned int *sse_o,
577 int *sum_o) {
578 const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
579 const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
580 const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
581 int i, j, k = 0;
582
583 for (i = 0; i < nh; i += 2) {
584 for (j = 0; j < nw; j += 2) {
585 sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
586 sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
587 sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
588 sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
589 var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
590 (b_width_log2_lookup[unit_size] +
591 b_height_log2_lookup[unit_size] + 6));
592 k++;
593 }
594 }
595 }
596
597 // Adjust the ac_thr according to speed, width, height and normalized sum
ac_thr_factor(const int speed,const int width,const int height,const int norm_sum)598 static int ac_thr_factor(const int speed, const int width, const int height,
599 const int norm_sum) {
600 if (speed >= 8 && norm_sum < 5) {
601 if (width <= 640 && height <= 480)
602 return 4;
603 else
604 return 2;
605 }
606 return 1;
607 }
608
model_skip_for_sb_y_large(AV1_COMP * cpi,BLOCK_SIZE bsize,int mi_row,int mi_col,MACROBLOCK * x,MACROBLOCKD * xd,RD_STATS * rd_stats,int * early_term,int calculate_rd)609 static void model_skip_for_sb_y_large(AV1_COMP *cpi, BLOCK_SIZE bsize,
610 int mi_row, int mi_col, MACROBLOCK *x,
611 MACROBLOCKD *xd, RD_STATS *rd_stats,
612 int *early_term, int calculate_rd) {
613 // Note our transform coeffs are 8 times an orthogonal transform.
614 // Hence quantizer step is also 8 times. To get effective quantizer
615 // we need to divide by 8 before sending to modeling function.
616 unsigned int sse;
617 struct macroblock_plane *const p = &x->plane[0];
618 struct macroblockd_plane *const pd = &xd->plane[0];
619 const uint32_t dc_quant = p->dequant_QTX[0];
620 const uint32_t ac_quant = p->dequant_QTX[1];
621 const int64_t dc_thr = dc_quant * dc_quant >> 6;
622 int64_t ac_thr = ac_quant * ac_quant >> 6;
623 unsigned int var;
624 int sum;
625
626 const int bw = b_width_log2_lookup[bsize];
627 const int bh = b_height_log2_lookup[bsize];
628 const int num8x8 = 1 << (bw + bh - 2);
629 unsigned int sse8x8[256] = { 0 };
630 int sum8x8[256] = { 0 };
631 unsigned int var8x8[256] = { 0 };
632 TX_SIZE tx_size;
633 int k;
634 // Calculate variance for whole partition, and also save 8x8 blocks' variance
635 // to be used in following transform skipping test.
636 block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
637 4 << bw, 4 << bh, &sse, &sum, 8, sse8x8, sum8x8, var8x8);
638 var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4));
639
640 rd_stats->sse = sse;
641
642 #if CONFIG_AV1_TEMPORAL_DENOISING
643 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
644 cpi->oxcf.speed > 5)
645 ac_thr = av1_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level,
646 (abs(sum) >> (bw + bh)),
647 cpi->svc.temporal_layer_id);
648 else
649 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
650 cpi->common.height, abs(sum) >> (bw + bh));
651 #else
652 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
653 cpi->common.height, abs(sum) >> (bw + bh));
654
655 #endif
656 tx_size = calculate_tx_size(cpi, bsize, x, var, sse);
657 // The code below for setting skip flag assumes tranform size of at least 8x8,
658 // so force this lower limit on transform.
659 if (tx_size < TX_8X8) tx_size = TX_8X8;
660 xd->mi[0]->tx_size = tx_size;
661
662 // Evaluate if the partition block is a skippable block in Y plane.
663 {
664 unsigned int sse16x16[64] = { 0 };
665 int sum16x16[64] = { 0 };
666 unsigned int var16x16[64] = { 0 };
667 const int num16x16 = num8x8 >> 2;
668
669 unsigned int sse32x32[16] = { 0 };
670 int sum32x32[16] = { 0 };
671 unsigned int var32x32[16] = { 0 };
672 const int num32x32 = num8x8 >> 4;
673
674 int ac_test = 1;
675 int dc_test = 1;
676 const int num = (tx_size == TX_8X8)
677 ? num8x8
678 : ((tx_size == TX_16X16) ? num16x16 : num32x32);
679 const unsigned int *sse_tx =
680 (tx_size == TX_8X8) ? sse8x8
681 : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
682 const unsigned int *var_tx =
683 (tx_size == TX_8X8) ? var8x8
684 : ((tx_size == TX_16X16) ? var16x16 : var32x32);
685
686 // Calculate variance if tx_size > TX_8X8
687 if (tx_size >= TX_16X16)
688 calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
689 sum16x16);
690 if (tx_size == TX_32X32)
691 calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
692 sse32x32, sum32x32);
693
694 // Skipping test
695 *early_term = 0;
696 for (k = 0; k < num; k++)
697 // Check if all ac coefficients can be quantized to zero.
698 if (!(var_tx[k] < ac_thr || var == 0)) {
699 ac_test = 0;
700 break;
701 }
702
703 for (k = 0; k < num; k++)
704 // Check if dc coefficient can be quantized to zero.
705 if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
706 dc_test = 0;
707 break;
708 }
709
710 if (ac_test && dc_test) {
711 int skip_uv[2] = { 0 };
712 unsigned int var_uv[2];
713 unsigned int sse_uv[2];
714 AV1_COMMON *const cm = &cpi->common;
715 // Transform skipping test in UV planes.
716 for (int i = 1; i <= 2; i++) {
717 int j = i - 1;
718 skip_uv[j] = 1;
719 if (x->color_sensitivity[j]) {
720 skip_uv[j] = 0;
721 struct macroblock_plane *const puv = &x->plane[i];
722 struct macroblockd_plane *const puvd = &xd->plane[i];
723 const BLOCK_SIZE uv_bsize = get_plane_block_size(
724 bsize, puvd->subsampling_x, puvd->subsampling_y);
725 // Adjust these thresholds for UV.
726 const int64_t uv_dc_thr =
727 (puv->dequant_QTX[0] * puv->dequant_QTX[0]) >> 3;
728 const int64_t uv_ac_thr =
729 (puv->dequant_QTX[1] * puv->dequant_QTX[1]) >> 3;
730 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, i,
731 i);
732 var_uv[j] = cpi->fn_ptr[uv_bsize].vf(puv->src.buf, puv->src.stride,
733 puvd->dst.buf, puvd->dst.stride,
734 &sse_uv[j]);
735 if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
736 (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
737 skip_uv[j] = 1;
738 else
739 break;
740 }
741 }
742 if (skip_uv[0] & skip_uv[1]) {
743 *early_term = 1;
744 }
745 }
746 }
747 if (calculate_rd) {
748 if (!*early_term) {
749 const int bwide = block_size_wide[bsize];
750 const int bhigh = block_size_high[bsize];
751
752 model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, sse, bwide * bhigh,
753 &rd_stats->rate, &rd_stats->dist);
754 }
755
756 if (*early_term) {
757 rd_stats->rate = 0;
758 rd_stats->dist = sse << 4;
759 }
760 }
761 }
762
model_rd_for_sb_y(const AV1_COMP * const cpi,BLOCK_SIZE bsize,MACROBLOCK * x,MACROBLOCKD * xd,RD_STATS * rd_stats,int calculate_rd)763 static void model_rd_for_sb_y(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
764 MACROBLOCK *x, MACROBLOCKD *xd,
765 RD_STATS *rd_stats, int calculate_rd) {
766 // Note our transform coeffs are 8 times an orthogonal transform.
767 // Hence quantizer step is also 8 times. To get effective quantizer
768 // we need to divide by 8 before sending to modeling function.
769 const int ref = xd->mi[0]->ref_frame[0];
770
771 assert(bsize < BLOCK_SIZES_ALL);
772
773 struct macroblock_plane *const p = &x->plane[0];
774 struct macroblockd_plane *const pd = &xd->plane[0];
775 unsigned int sse;
776 int rate;
777 int64_t dist;
778
779 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
780 pd->dst.buf, pd->dst.stride, &sse);
781 xd->mi[0]->tx_size = calculate_tx_size(cpi, bsize, x, var, sse);
782
783 if (calculate_rd) {
784 const int bwide = block_size_wide[bsize];
785 const int bhigh = block_size_high[bsize];
786 model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, sse, bwide * bhigh, &rate,
787 &dist);
788 } else {
789 rate = INT_MAX; // this will be overwritten later with block_yrd
790 dist = INT_MAX;
791 }
792 rd_stats->sse = sse;
793 x->pred_sse[ref] = (unsigned int)AOMMIN(sse, UINT_MAX);
794
795 assert(rate >= 0);
796
797 rd_stats->skip_txfm = (rate == 0);
798 rate = AOMMIN(rate, INT_MAX);
799 rd_stats->rate = rate;
800 rd_stats->dist = dist;
801 }
802
803 /*!\brief Calculates RD Cost using Hadamard transform.
804 *
805 * \ingroup nonrd_mode_search
806 * \callgraph
807 * \callergraph
808 * Calculates RD Cost using Hadamard transform. For low bit depth this function
809 * uses low-precision set of functions (16-bit) and 32 bit for high bit depth
810 * \param[in] cpi Top-level encoder structure
811 * \param[in] x Pointer to structure holding all the data for
812 the current macroblock
813 * \param[in] mi_row Row index in 4x4 units
814 * \param[in] mi_col Column index in 4x4 units
815 * \param[in] this_rdc Pointer to calculated RD Cost
816 * \param[in] skippable Pointer to a flag indicating possible tx skip
817 * \param[in] bsize Current block size
818 * \param[in] tx_size Transform size
819 *
820 * \return Nothing is returned. Instead, calculated RD cost is placed to
821 * \c this_rdc. \c skippable flag is set if there is no non-zero quantized
822 * coefficients for Hadamard transform
823 */
block_yrd(AV1_COMP * cpi,MACROBLOCK * x,int mi_row,int mi_col,RD_STATS * this_rdc,int * skippable,BLOCK_SIZE bsize,TX_SIZE tx_size)824 static void block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col,
825 RD_STATS *this_rdc, int *skippable, BLOCK_SIZE bsize,
826 TX_SIZE tx_size) {
827 MACROBLOCKD *xd = &x->e_mbd;
828 const struct macroblockd_plane *pd = &xd->plane[0];
829 struct macroblock_plane *const p = &x->plane[0];
830 const int num_4x4_w = mi_size_wide[bsize];
831 const int num_4x4_h = mi_size_high[bsize];
832 const int step = 1 << (tx_size << 1);
833 const int block_step = (1 << tx_size);
834 int block = 0;
835 const int max_blocks_wide =
836 num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
837 const int max_blocks_high =
838 num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
839 int eob_cost = 0;
840 const int bw = 4 * num_4x4_w;
841 const int bh = 4 * num_4x4_h;
842
843 (void)mi_row;
844 (void)mi_col;
845 (void)cpi;
846
847 #if CONFIG_AV1_HIGHBITDEPTH
848 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
849 aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
850 p->src.stride, pd->dst.buf, pd->dst.stride,
851 x->e_mbd.bd);
852 } else {
853 aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
854 pd->dst.buf, pd->dst.stride);
855 }
856 #else
857 aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
858 pd->dst.buf, pd->dst.stride);
859 #endif
860
861 *skippable = 1;
862 // Keep track of the row and column of the blocks we use so that we know
863 // if we are in the unrestricted motion border.
864 for (int r = 0; r < max_blocks_high; r += block_step) {
865 for (int c = 0; c < num_4x4_w; c += block_step) {
866 if (c < max_blocks_wide) {
867 const SCAN_ORDER *const scan_order = &av1_scan_orders[tx_size][DCT_DCT];
868 const int block_offset = BLOCK_OFFSET(block);
869 #if CONFIG_AV1_HIGHBITDEPTH
870 tran_low_t *const coeff = p->coeff + block_offset;
871 tran_low_t *const qcoeff = p->qcoeff + block_offset;
872 tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
873 #else
874 int16_t *const low_coeff = (int16_t *)p->coeff + block_offset;
875 int16_t *const low_qcoeff = (int16_t *)p->qcoeff + block_offset;
876 int16_t *const low_dqcoeff = (int16_t *)p->dqcoeff + block_offset;
877 #endif
878 uint16_t *const eob = &p->eobs[block];
879 const int diff_stride = bw;
880 const int16_t *src_diff;
881 src_diff = &p->src_diff[(r * diff_stride + c) << 2];
882
883 switch (tx_size) {
884 case TX_64X64:
885 assert(0); // Not implemented
886 break;
887 case TX_32X32:
888 assert(0); // Not used
889 break;
890 #if CONFIG_AV1_HIGHBITDEPTH
891 case TX_16X16:
892 aom_hadamard_16x16(src_diff, diff_stride, coeff);
893 av1_quantize_fp(coeff, 16 * 16, p->zbin_QTX, p->round_fp_QTX,
894 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
895 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
896 scan_order->iscan);
897 break;
898 case TX_8X8:
899 aom_hadamard_8x8(src_diff, diff_stride, coeff);
900 av1_quantize_fp(coeff, 8 * 8, p->zbin_QTX, p->round_fp_QTX,
901 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
902 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
903 scan_order->iscan);
904 break;
905 default:
906 assert(tx_size == TX_4X4);
907 aom_fdct4x4(src_diff, coeff, diff_stride);
908 av1_quantize_fp(coeff, 4 * 4, p->zbin_QTX, p->round_fp_QTX,
909 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
910 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
911 scan_order->iscan);
912 break;
913 #else
914 case TX_16X16:
915 aom_hadamard_lp_16x16(src_diff, diff_stride, low_coeff);
916 av1_quantize_lp(low_coeff, 16 * 16, p->round_fp_QTX,
917 p->quant_fp_QTX, low_qcoeff, low_dqcoeff,
918 p->dequant_QTX, eob, scan_order->scan);
919 break;
920 case TX_8X8:
921 aom_hadamard_lp_8x8(src_diff, diff_stride, low_coeff);
922 av1_quantize_lp(low_coeff, 8 * 8, p->round_fp_QTX, p->quant_fp_QTX,
923 low_qcoeff, low_dqcoeff, p->dequant_QTX, eob,
924 scan_order->scan);
925 break;
926 default:
927 assert(tx_size == TX_4X4);
928 aom_fdct4x4_lp(src_diff, low_coeff, diff_stride);
929 av1_quantize_lp(low_coeff, 4 * 4, p->round_fp_QTX, p->quant_fp_QTX,
930 low_qcoeff, low_dqcoeff, p->dequant_QTX, eob,
931 scan_order->scan);
932 break;
933 #endif
934 }
935 assert(*eob <= 1024);
936 *skippable &= (*eob == 0);
937 eob_cost += 1;
938 }
939 block += step;
940 }
941 }
942 this_rdc->skip_txfm = *skippable;
943 this_rdc->rate = 0;
944 if (this_rdc->sse < INT64_MAX) {
945 this_rdc->sse = (this_rdc->sse << 6) >> 2;
946 if (*skippable) {
947 this_rdc->dist = this_rdc->sse;
948 return;
949 }
950 }
951
952 block = 0;
953 this_rdc->dist = 0;
954 for (int r = 0; r < max_blocks_high; r += block_step) {
955 for (int c = 0; c < num_4x4_w; c += block_step) {
956 if (c < max_blocks_wide) {
957 const int block_offset = BLOCK_OFFSET(block);
958 uint16_t *const eob = &p->eobs[block];
959 #if CONFIG_AV1_HIGHBITDEPTH
960 int64_t dummy;
961 tran_low_t *const coeff = p->coeff + block_offset;
962 tran_low_t *const qcoeff = p->qcoeff + block_offset;
963 tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
964
965 if (*eob == 1)
966 this_rdc->rate += (int)abs(qcoeff[0]);
967 else if (*eob > 1)
968 this_rdc->rate += aom_satd(qcoeff, step << 4);
969
970 this_rdc->dist +=
971 av1_block_error(coeff, dqcoeff, step << 4, &dummy) >> 2;
972 #else
973 int16_t *const low_coeff = (int16_t *)p->coeff + block_offset;
974 int16_t *const low_qcoeff = (int16_t *)p->qcoeff + block_offset;
975 int16_t *const low_dqcoeff = (int16_t *)p->dqcoeff + block_offset;
976
977 if (*eob == 1)
978 this_rdc->rate += (int)abs(low_qcoeff[0]);
979 else if (*eob > 1)
980 this_rdc->rate += aom_satd_lp(low_qcoeff, step << 4);
981
982 this_rdc->dist +=
983 av1_block_error_lp(low_coeff, low_dqcoeff, step << 4) >> 2;
984 #endif
985 }
986 block += step;
987 }
988 }
989
990 // If skippable is set, rate gets clobbered later.
991 this_rdc->rate <<= (2 + AV1_PROB_COST_SHIFT);
992 this_rdc->rate += (eob_cost << AV1_PROB_COST_SHIFT);
993 }
994
init_mbmi(MB_MODE_INFO * mbmi,PREDICTION_MODE pred_mode,MV_REFERENCE_FRAME ref_frame0,MV_REFERENCE_FRAME ref_frame1,const AV1_COMMON * cm)995 static INLINE void init_mbmi(MB_MODE_INFO *mbmi, PREDICTION_MODE pred_mode,
996 MV_REFERENCE_FRAME ref_frame0,
997 MV_REFERENCE_FRAME ref_frame1,
998 const AV1_COMMON *cm) {
999 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1000 mbmi->ref_mv_idx = 0;
1001 mbmi->mode = pred_mode;
1002 mbmi->uv_mode = UV_DC_PRED;
1003 mbmi->ref_frame[0] = ref_frame0;
1004 mbmi->ref_frame[1] = ref_frame1;
1005 pmi->palette_size[0] = 0;
1006 pmi->palette_size[1] = 0;
1007 mbmi->filter_intra_mode_info.use_filter_intra = 0;
1008 mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
1009 mbmi->motion_mode = SIMPLE_TRANSLATION;
1010 mbmi->num_proj_ref = 1;
1011 mbmi->interintra_mode = 0;
1012 set_default_interp_filters(mbmi, cm->features.interp_filter);
1013 }
1014
1015 #if CONFIG_INTERNAL_STATS
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index)1016 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
1017 int mode_index) {
1018 #else
1019 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
1020 #endif // CONFIG_INTERNAL_STATS
1021 MACROBLOCKD *const xd = &x->e_mbd;
1022 TxfmSearchInfo *txfm_info = &x->txfm_search_info;
1023
1024 // Take a snapshot of the coding context so it can be
1025 // restored if we decide to encode this way
1026 ctx->rd_stats.skip_txfm = txfm_info->skip_txfm;
1027
1028 memset(ctx->blk_skip, 0, sizeof(ctx->blk_skip[0]) * ctx->num_4x4_blk);
1029 memset(ctx->tx_type_map, DCT_DCT,
1030 sizeof(ctx->tx_type_map[0]) * ctx->num_4x4_blk);
1031 ctx->skippable = txfm_info->skip_txfm;
1032 #if CONFIG_INTERNAL_STATS
1033 ctx->best_mode_index = mode_index;
1034 #endif // CONFIG_INTERNAL_STATS
1035 ctx->mic = *xd->mi[0];
1036 ctx->skippable = txfm_info->skip_txfm;
1037 av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, &x->mbmi_ext,
1038 av1_ref_frame_type(xd->mi[0]->ref_frame));
1039 ctx->comp_pred_diff = 0;
1040 ctx->hybrid_pred_diff = 0;
1041 ctx->single_pred_diff = 0;
1042 }
1043
1044 static int get_pred_buffer(PRED_BUFFER *p, int len) {
1045 for (int i = 0; i < len; i++) {
1046 if (!p[i].in_use) {
1047 p[i].in_use = 1;
1048 return i;
1049 }
1050 }
1051 return -1;
1052 }
1053
1054 static void free_pred_buffer(PRED_BUFFER *p) {
1055 if (p != NULL) p->in_use = 0;
1056 }
1057
1058 static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode,
1059 int16_t mode_context) {
1060 if (is_inter_compound_mode(mode)) {
1061 return mode_costs
1062 ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
1063 }
1064
1065 int mode_cost = 0;
1066 int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
1067
1068 assert(is_inter_mode(mode));
1069
1070 if (mode == NEWMV) {
1071 mode_cost = mode_costs->newmv_mode_cost[mode_ctx][0];
1072 return mode_cost;
1073 } else {
1074 mode_cost = mode_costs->newmv_mode_cost[mode_ctx][1];
1075 mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
1076
1077 if (mode == GLOBALMV) {
1078 mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][0];
1079 return mode_cost;
1080 } else {
1081 mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][1];
1082 mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
1083 mode_cost += mode_costs->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
1084 return mode_cost;
1085 }
1086 }
1087 }
1088
1089 static void newmv_diff_bias(MACROBLOCKD *xd, PREDICTION_MODE this_mode,
1090 RD_STATS *this_rdc, BLOCK_SIZE bsize, int mv_row,
1091 int mv_col, int speed, uint32_t spatial_variance,
1092 CONTENT_STATE_SB content_state_sb) {
1093 // Bias against MVs associated with NEWMV mode that are very different from
1094 // top/left neighbors.
1095 if (this_mode == NEWMV) {
1096 int al_mv_average_row;
1097 int al_mv_average_col;
1098 int left_row, left_col;
1099 int row_diff, col_diff;
1100 int above_mv_valid = 0;
1101 int left_mv_valid = 0;
1102 int above_row = 0;
1103 int above_col = 0;
1104 if (bsize >= BLOCK_64X64 && content_state_sb.source_sad != kHighSad &&
1105 spatial_variance < 300 &&
1106 (mv_row > 16 || mv_row < -16 || mv_col > 16 || mv_col < -16)) {
1107 this_rdc->rdcost = this_rdc->rdcost << 2;
1108 return;
1109 }
1110 if (xd->above_mbmi) {
1111 above_mv_valid = xd->above_mbmi->mv[0].as_int != INVALID_MV;
1112 above_row = xd->above_mbmi->mv[0].as_mv.row;
1113 above_col = xd->above_mbmi->mv[0].as_mv.col;
1114 }
1115 if (xd->left_mbmi) {
1116 left_mv_valid = xd->left_mbmi->mv[0].as_int != INVALID_MV;
1117 left_row = xd->left_mbmi->mv[0].as_mv.row;
1118 left_col = xd->left_mbmi->mv[0].as_mv.col;
1119 }
1120 if (above_mv_valid && left_mv_valid) {
1121 al_mv_average_row = (above_row + left_row + 1) >> 1;
1122 al_mv_average_col = (above_col + left_col + 1) >> 1;
1123 } else if (above_mv_valid) {
1124 al_mv_average_row = above_row;
1125 al_mv_average_col = above_col;
1126 } else if (left_mv_valid) {
1127 al_mv_average_row = left_row;
1128 al_mv_average_col = left_col;
1129 } else {
1130 al_mv_average_row = al_mv_average_col = 0;
1131 }
1132 row_diff = al_mv_average_row - mv_row;
1133 col_diff = al_mv_average_col - mv_col;
1134 if (row_diff > 80 || row_diff < -80 || col_diff > 80 || col_diff < -80) {
1135 if (bsize >= BLOCK_32X32)
1136 this_rdc->rdcost = this_rdc->rdcost << 1;
1137 else
1138 this_rdc->rdcost = 5 * this_rdc->rdcost >> 2;
1139 }
1140 } else {
1141 // Bias for speed >= 8 for low spatial variance.
1142 if (speed >= 8 && spatial_variance < 150 &&
1143 (mv_row > 64 || mv_row < -64 || mv_col > 64 || mv_col < -64))
1144 this_rdc->rdcost = 5 * this_rdc->rdcost >> 2;
1145 }
1146 }
1147
1148 static void model_rd_for_sb_uv(AV1_COMP *cpi, BLOCK_SIZE plane_bsize,
1149 MACROBLOCK *x, MACROBLOCKD *xd,
1150 RD_STATS *this_rdc, int64_t *sse_y,
1151 int start_plane, int stop_plane) {
1152 // Note our transform coeffs are 8 times an orthogonal transform.
1153 // Hence quantizer step is also 8 times. To get effective quantizer
1154 // we need to divide by 8 before sending to modeling function.
1155 unsigned int sse;
1156 int rate;
1157 int64_t dist;
1158 int i;
1159 int64_t tot_sse = *sse_y;
1160
1161 this_rdc->rate = 0;
1162 this_rdc->dist = 0;
1163 this_rdc->skip_txfm = 0;
1164
1165 for (i = start_plane; i <= stop_plane; ++i) {
1166 struct macroblock_plane *const p = &x->plane[i];
1167 struct macroblockd_plane *const pd = &xd->plane[i];
1168 const uint32_t dc_quant = p->dequant_QTX[0];
1169 const uint32_t ac_quant = p->dequant_QTX[1];
1170 const BLOCK_SIZE bs = plane_bsize;
1171 unsigned int var;
1172 if (!x->color_sensitivity[i - 1]) continue;
1173
1174 var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
1175 pd->dst.stride, &sse);
1176 assert(sse >= var);
1177 tot_sse += sse;
1178
1179 av1_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
1180 dc_quant >> 3, &rate, &dist);
1181
1182 this_rdc->rate += rate >> 1;
1183 this_rdc->dist += dist << 3;
1184
1185 av1_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
1186 &rate, &dist);
1187
1188 this_rdc->rate += rate;
1189 this_rdc->dist += dist << 4;
1190 }
1191
1192 if (this_rdc->rate == 0) {
1193 this_rdc->skip_txfm = 1;
1194 }
1195
1196 if (RDCOST(x->rdmult, this_rdc->rate, this_rdc->dist) >=
1197 RDCOST(x->rdmult, 0, tot_sse << 4)) {
1198 this_rdc->rate = 0;
1199 this_rdc->dist = tot_sse << 4;
1200 this_rdc->skip_txfm = 1;
1201 }
1202
1203 *sse_y = tot_sse;
1204 }
1205
1206 /*!\cond */
1207 struct estimate_block_intra_args {
1208 AV1_COMP *cpi;
1209 MACROBLOCK *x;
1210 PREDICTION_MODE mode;
1211 int skippable;
1212 RD_STATS *rdc;
1213 };
1214 /*!\endcond */
1215
1216 /*!\brief Estimation of RD cost of an intra mode for Non-RD optimized case.
1217 *
1218 * \ingroup nonrd_mode_search
1219 * \callgraph
1220 * \callergraph
1221 * Calculates RD Cost for an intra mode for a single TX block using Hadamard
1222 * transform.
1223 * \param[in] plane Color plane
1224 * \param[in] block Index of a TX block in a prediction block
1225 * \param[in] row Row of a current TX block
1226 * \param[in] col Column of a current TX block
1227 * \param[in] plane_bsize Block size of a current prediction block
1228 * \param[in] tx_size Transform size
1229 * \param[in] arg Pointer to a structure that holds paramaters
1230 * for intra mode search
1231 *
1232 * \return Nothing is returned. Instead, best mode and RD Cost of the best mode
1233 * are set in \c args->rdc and \c args->mode
1234 */
1235 static void estimate_block_intra(int plane, int block, int row, int col,
1236 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1237 void *arg) {
1238 struct estimate_block_intra_args *const args = arg;
1239 AV1_COMP *const cpi = args->cpi;
1240 AV1_COMMON *const cm = &cpi->common;
1241 MACROBLOCK *const x = args->x;
1242 MACROBLOCKD *const xd = &x->e_mbd;
1243 struct macroblock_plane *const p = &x->plane[plane];
1244 struct macroblockd_plane *const pd = &xd->plane[plane];
1245 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
1246 uint8_t *const src_buf_base = p->src.buf;
1247 uint8_t *const dst_buf_base = pd->dst.buf;
1248 const int64_t src_stride = p->src.stride;
1249 const int64_t dst_stride = pd->dst.stride;
1250 RD_STATS this_rdc;
1251
1252 (void)block;
1253
1254 p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
1255 pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
1256
1257 av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size);
1258 av1_invalid_rd_stats(&this_rdc);
1259
1260 if (plane == 0) {
1261 block_yrd(cpi, x, 0, 0, &this_rdc, &args->skippable, bsize_tx,
1262 AOMMIN(tx_size, TX_16X16));
1263 } else {
1264 int64_t sse = 0;
1265 model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &sse, plane, plane);
1266 }
1267
1268 p->src.buf = src_buf_base;
1269 pd->dst.buf = dst_buf_base;
1270 args->rdc->rate += this_rdc.rate;
1271 args->rdc->dist += this_rdc.dist;
1272 }
1273
1274 static INLINE void update_thresh_freq_fact(AV1_COMP *cpi, MACROBLOCK *x,
1275 BLOCK_SIZE bsize,
1276 MV_REFERENCE_FRAME ref_frame,
1277 THR_MODES best_mode_idx,
1278 PREDICTION_MODE mode) {
1279 const THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1280 const BLOCK_SIZE min_size = AOMMAX(bsize - 3, BLOCK_4X4);
1281 const BLOCK_SIZE max_size = AOMMIN(bsize + 6, BLOCK_128X128);
1282 for (BLOCK_SIZE bs = min_size; bs <= max_size; bs += 3) {
1283 int *freq_fact = &x->thresh_freq_fact[bs][thr_mode_idx];
1284 if (thr_mode_idx == best_mode_idx) {
1285 *freq_fact -= (*freq_fact >> 4);
1286 } else {
1287 *freq_fact =
1288 AOMMIN(*freq_fact + RD_THRESH_INC,
1289 cpi->sf.inter_sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1290 }
1291 }
1292 }
1293
1294 #if CONFIG_AV1_TEMPORAL_DENOISING
1295 static void av1_pickmode_ctx_den_update(
1296 AV1_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig,
1297 unsigned int ref_frame_cost[REF_FRAMES],
1298 int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES], int reuse_inter_pred,
1299 BEST_PICKMODE *bp) {
1300 ctx_den->zero_last_cost_orig = zero_last_cost_orig;
1301 ctx_den->ref_frame_cost = ref_frame_cost;
1302 ctx_den->frame_mv = frame_mv;
1303 ctx_den->reuse_inter_pred = reuse_inter_pred;
1304 ctx_den->best_tx_size = bp->best_tx_size;
1305 ctx_den->best_mode = bp->best_mode;
1306 ctx_den->best_ref_frame = bp->best_ref_frame;
1307 ctx_den->best_pred_filter = bp->best_pred_filter;
1308 ctx_den->best_mode_skip_txfm = bp->best_mode_skip_txfm;
1309 }
1310
1311 static void recheck_zeromv_after_denoising(
1312 AV1_COMP *cpi, MB_MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd,
1313 AV1_DENOISER_DECISION decision, AV1_PICKMODE_CTX_DEN *ctx_den,
1314 struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_STATS *best_rdc,
1315 BEST_PICKMODE *best_pickmode, BLOCK_SIZE bsize, int mi_row, int mi_col) {
1316 // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on
1317 // denoised result. Only do this under noise conditions, and if rdcost of
1318 // ZEROMV onoriginal source is not significantly higher than rdcost of best
1319 // mode.
1320 if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow &&
1321 ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) &&
1322 ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) ||
1323 (ctx_den->best_ref_frame == GOLDEN_FRAME &&
1324 cpi->svc.number_spatial_layers == 1 &&
1325 decision == FILTER_ZEROMV_BLOCK))) {
1326 // Check if we should pick ZEROMV on denoised signal.
1327 AV1_COMMON *const cm = &cpi->common;
1328 RD_STATS this_rdc;
1329 const ModeCosts *mode_costs = &x->mode_costs;
1330 TxfmSearchInfo *txfm_info = &x->txfm_search_info;
1331 MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
1332
1333 mi->mode = GLOBALMV;
1334 mi->ref_frame[0] = LAST_FRAME;
1335 mi->ref_frame[1] = NONE_FRAME;
1336 set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE_FRAME);
1337 mi->mv[0].as_int = 0;
1338 mi->interp_filters = av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
1339 xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0];
1340 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1341 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc, 1);
1342
1343 const int16_t mode_ctx =
1344 av1_mode_context_analyzer(mbmi_ext->mode_context, mi->ref_frame);
1345 this_rdc.rate += cost_mv_ref(mode_costs, GLOBALMV, mode_ctx);
1346
1347 this_rdc.rate += ctx_den->ref_frame_cost[LAST_FRAME];
1348 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
1349 txfm_info->skip_txfm = this_rdc.skip_txfm;
1350 // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source
1351 // is higher than best_ref mode (on original source).
1352 if (this_rdc.rdcost > best_rdc->rdcost) {
1353 this_rdc = *best_rdc;
1354 mi->mode = best_pickmode->best_mode;
1355 mi->ref_frame[0] = best_pickmode->best_ref_frame;
1356 set_ref_ptrs(cm, xd, mi->ref_frame[0], NONE_FRAME);
1357 mi->interp_filters = best_pickmode->best_pred_filter;
1358 if (best_pickmode->best_ref_frame == INTRA_FRAME) {
1359 mi->mv[0].as_int = INVALID_MV;
1360 } else {
1361 mi->mv[0].as_int = ctx_den
1362 ->frame_mv[best_pickmode->best_mode]
1363 [best_pickmode->best_ref_frame]
1364 .as_int;
1365 if (ctx_den->reuse_inter_pred) {
1366 xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0];
1367 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1368 }
1369 }
1370 mi->tx_size = best_pickmode->best_tx_size;
1371 txfm_info->skip_txfm = best_pickmode->best_mode_skip_txfm;
1372 } else {
1373 ctx_den->best_ref_frame = LAST_FRAME;
1374 *best_rdc = this_rdc;
1375 }
1376 }
1377 }
1378 #endif // CONFIG_AV1_TEMPORAL_DENOISING
1379
1380 static INLINE int get_force_skip_low_temp_var_small_sb(uint8_t *variance_low,
1381 int mi_row, int mi_col,
1382 BLOCK_SIZE bsize) {
1383 // Relative indices of MB inside the superblock.
1384 const int mi_x = mi_row & 0xF;
1385 const int mi_y = mi_col & 0xF;
1386 // Relative indices of 16x16 block inside the superblock.
1387 const int i = mi_x >> 2;
1388 const int j = mi_y >> 2;
1389 int force_skip_low_temp_var = 0;
1390 // Set force_skip_low_temp_var based on the block size and block offset.
1391 switch (bsize) {
1392 case BLOCK_64X64: force_skip_low_temp_var = variance_low[0]; break;
1393 case BLOCK_64X32:
1394 if (!mi_y && !mi_x) {
1395 force_skip_low_temp_var = variance_low[1];
1396 } else if (!mi_y && mi_x) {
1397 force_skip_low_temp_var = variance_low[2];
1398 }
1399 break;
1400 case BLOCK_32X64:
1401 if (!mi_y && !mi_x) {
1402 force_skip_low_temp_var = variance_low[3];
1403 } else if (mi_y && !mi_x) {
1404 force_skip_low_temp_var = variance_low[4];
1405 }
1406 break;
1407 case BLOCK_32X32:
1408 if (!mi_y && !mi_x) {
1409 force_skip_low_temp_var = variance_low[5];
1410 } else if (mi_y && !mi_x) {
1411 force_skip_low_temp_var = variance_low[6];
1412 } else if (!mi_y && mi_x) {
1413 force_skip_low_temp_var = variance_low[7];
1414 } else if (mi_y && mi_x) {
1415 force_skip_low_temp_var = variance_low[8];
1416 }
1417 break;
1418 case BLOCK_32X16:
1419 case BLOCK_16X32:
1420 case BLOCK_16X16:
1421 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]];
1422 break;
1423 default: break;
1424 }
1425
1426 return force_skip_low_temp_var;
1427 }
1428
1429 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
1430 int mi_col, BLOCK_SIZE bsize) {
1431 int force_skip_low_temp_var = 0;
1432 int x, y;
1433 x = (mi_col & 0x1F) >> 4;
1434 // y = (mi_row & 0x1F) >> 4;
1435 // const int idx64 = (y << 1) + x;
1436 y = (mi_row & 0x17) >> 3;
1437 const int idx64 = y + x;
1438
1439 x = (mi_col & 0xF) >> 3;
1440 // y = (mi_row & 0xF) >> 3;
1441 // const int idx32 = (y << 1) + x;
1442 y = (mi_row & 0xB) >> 2;
1443 const int idx32 = y + x;
1444
1445 x = (mi_col & 0x7) >> 2;
1446 // y = (mi_row & 0x7) >> 2;
1447 // const int idx16 = (y << 1) + x;
1448 y = (mi_row & 0x5) >> 1;
1449 const int idx16 = y + x;
1450 // Set force_skip_low_temp_var based on the block size and block offset.
1451 switch (bsize) {
1452 case BLOCK_128X128: force_skip_low_temp_var = variance_low[0]; break;
1453 case BLOCK_128X64:
1454 assert((mi_col & 0x1F) == 0);
1455 force_skip_low_temp_var = variance_low[1 + ((mi_row & 0x1F) != 0)];
1456 break;
1457 case BLOCK_64X128:
1458 assert((mi_row & 0x1F) == 0);
1459 force_skip_low_temp_var = variance_low[3 + ((mi_col & 0x1F) != 0)];
1460 break;
1461 case BLOCK_64X64:
1462 // Location of this 64x64 block inside the 128x128 superblock
1463 force_skip_low_temp_var = variance_low[5 + idx64];
1464 break;
1465 case BLOCK_64X32:
1466 x = (mi_col & 0x1F) >> 4;
1467 y = (mi_row & 0x1F) >> 3;
1468 /*
1469 .---------------.---------------.
1470 | x=0,y=0,idx=0 | x=0,y=0,idx=2 |
1471 :---------------+---------------:
1472 | x=0,y=1,idx=1 | x=1,y=1,idx=3 |
1473 :---------------+---------------:
1474 | x=0,y=2,idx=4 | x=1,y=2,idx=6 |
1475 :---------------+---------------:
1476 | x=0,y=3,idx=5 | x=1,y=3,idx=7 |
1477 '---------------'---------------'
1478 */
1479 const int idx64x32 = (x << 1) + (y % 2) + ((y >> 1) << 2);
1480 force_skip_low_temp_var = variance_low[9 + idx64x32];
1481 break;
1482 case BLOCK_32X64:
1483 x = (mi_col & 0x1F) >> 3;
1484 y = (mi_row & 0x1F) >> 4;
1485 const int idx32x64 = (y << 2) + x;
1486 force_skip_low_temp_var = variance_low[17 + idx32x64];
1487 break;
1488 case BLOCK_32X32:
1489 force_skip_low_temp_var = variance_low[25 + (idx64 << 2) + idx32];
1490 break;
1491 case BLOCK_32X16:
1492 case BLOCK_16X32:
1493 case BLOCK_16X16:
1494 force_skip_low_temp_var =
1495 variance_low[41 + (idx64 << 4) + (idx32 << 2) + idx16];
1496 break;
1497 default: break;
1498 }
1499 return force_skip_low_temp_var;
1500 }
1501
1502 #define FILTER_SEARCH_SIZE 2
1503 /*!\brief Searches for the best intrpolation filter
1504 *
1505 * \ingroup nonrd_mode_search
1506 * \callgraph
1507 * \callergraph
1508 * Iterates through subset of possible interpolation filters (currently
1509 * only EIGHTTAP_REGULAR and EIGTHTAP_SMOOTH in both directions) and selects
1510 * the one that gives lowest RD cost. RD cost is calculated using curvfit model
1511 *
1512 * \param[in] cpi Top-level encoder structure
1513 * \param[in] x Pointer to structure holding all the
1514 * data for the current macroblock
1515 * \param[in] this_rdc Pointer to calculated RD Cost
1516 * \param[in] mi_row Row index in 4x4 units
1517 * \param[in] mi_col Column index in 4x4 units
1518 * \param[in] tmp Pointer to a temporary buffer for
1519 * prediction re-use
1520 * \param[in] bsize Current block size
1521 * \param[in] reuse_inter_pred Flag, indicating prediction re-use
1522 * \param[out] this_mode_pred Pointer to store prediction buffer
1523 * for prediction re-use
1524 * \param[out] this_early_term Flag, indicating that transform can be
1525 * skipped
1526 * \param[in] use_model_yrd_large Flag, indicating special logic to handle
1527 * large blocks
1528 *
1529 * \return Nothing is returned. Instead, calculated RD cost is placed to
1530 * \c this_rdc and best filter is placed to \c mi->interp_filters. In case
1531 * \c reuse_inter_pred flag is set, this function also ouputs
1532 * \c this_mode_pred. Also \c this_early_temp is set if transform can be
1533 * skipped
1534 */
1535 static void search_filter_ref(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *this_rdc,
1536 int mi_row, int mi_col, PRED_BUFFER *tmp,
1537 BLOCK_SIZE bsize, int reuse_inter_pred,
1538 PRED_BUFFER **this_mode_pred,
1539 int *this_early_term, int use_model_yrd_large) {
1540 AV1_COMMON *const cm = &cpi->common;
1541 MACROBLOCKD *const xd = &x->e_mbd;
1542 struct macroblockd_plane *const pd = &xd->plane[0];
1543 MB_MODE_INFO *const mi = xd->mi[0];
1544 const int bw = block_size_wide[bsize];
1545 RD_STATS pf_rd_stats[FILTER_SEARCH_SIZE] = { 0 };
1546 TX_SIZE pf_tx_size[FILTER_SEARCH_SIZE] = { 0 };
1547 PRED_BUFFER *current_pred = *this_mode_pred;
1548 int best_skip = 0;
1549 int best_early_term = 0;
1550 int64_t best_cost = INT64_MAX;
1551 int best_filter_index = -1;
1552 InterpFilter filters[FILTER_SEARCH_SIZE] = { EIGHTTAP_REGULAR,
1553 EIGHTTAP_SMOOTH };
1554 for (int i = 0; i < FILTER_SEARCH_SIZE; ++i) {
1555 int64_t cost;
1556 InterpFilter filter = filters[i];
1557 mi->interp_filters = av1_broadcast_interp_filter(filter);
1558 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1559 if (use_model_yrd_large)
1560 model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd,
1561 &pf_rd_stats[i], this_early_term, 1);
1562 else
1563 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rd_stats[i], 1);
1564 pf_rd_stats[i].rate += av1_get_switchable_rate(
1565 x, xd, cm->features.interp_filter, cm->seq_params.enable_dual_filter);
1566 cost = RDCOST(x->rdmult, pf_rd_stats[i].rate, pf_rd_stats[i].dist);
1567 pf_tx_size[i] = mi->tx_size;
1568 if (cost < best_cost) {
1569 best_filter_index = i;
1570 best_cost = cost;
1571 best_skip = pf_rd_stats[i].skip_txfm;
1572 best_early_term = *this_early_term;
1573 if (reuse_inter_pred) {
1574 if (*this_mode_pred != current_pred) {
1575 free_pred_buffer(*this_mode_pred);
1576 *this_mode_pred = current_pred;
1577 }
1578 current_pred = &tmp[get_pred_buffer(tmp, 3)];
1579 pd->dst.buf = current_pred->data;
1580 pd->dst.stride = bw;
1581 }
1582 }
1583 }
1584 assert(best_filter_index >= 0 && best_filter_index < FILTER_SEARCH_SIZE);
1585 if (reuse_inter_pred && *this_mode_pred != current_pred)
1586 free_pred_buffer(current_pred);
1587
1588 mi->interp_filters = av1_broadcast_interp_filter(filters[best_filter_index]);
1589 mi->tx_size = pf_tx_size[best_filter_index];
1590 this_rdc->rate = pf_rd_stats[best_filter_index].rate;
1591 this_rdc->dist = pf_rd_stats[best_filter_index].dist;
1592 this_rdc->sse = pf_rd_stats[best_filter_index].sse;
1593 this_rdc->skip_txfm = (best_skip || best_early_term);
1594 *this_early_term = best_early_term;
1595 if (reuse_inter_pred) {
1596 pd->dst.buf = (*this_mode_pred)->data;
1597 pd->dst.stride = (*this_mode_pred)->stride;
1598 } else if (best_filter_index < FILTER_SEARCH_SIZE - 1) {
1599 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1600 }
1601 }
1602
1603 #define COLLECT_PICK_MODE_STAT 0
1604
1605 #if COLLECT_PICK_MODE_STAT
1606 typedef struct _mode_search_stat {
1607 int32_t num_blocks[BLOCK_SIZES];
1608 int64_t avg_block_times[BLOCK_SIZES];
1609 int32_t num_searches[BLOCK_SIZES][MB_MODE_COUNT];
1610 int32_t num_nonskipped_searches[BLOCK_SIZES][MB_MODE_COUNT];
1611 int64_t search_times[BLOCK_SIZES][MB_MODE_COUNT];
1612 int64_t nonskipped_search_times[BLOCK_SIZES][MB_MODE_COUNT];
1613 struct aom_usec_timer timer1;
1614 struct aom_usec_timer timer2;
1615 } mode_search_stat;
1616 #endif // COLLECT_PICK_MODE_STAT
1617
1618 static void compute_intra_yprediction(const AV1_COMMON *cm,
1619 PREDICTION_MODE mode, BLOCK_SIZE bsize,
1620 MACROBLOCK *x, MACROBLOCKD *xd) {
1621 struct macroblockd_plane *const pd = &xd->plane[0];
1622 struct macroblock_plane *const p = &x->plane[0];
1623 uint8_t *const src_buf_base = p->src.buf;
1624 uint8_t *const dst_buf_base = pd->dst.buf;
1625 const int src_stride = p->src.stride;
1626 const int dst_stride = pd->dst.stride;
1627 int plane = 0;
1628 int row, col;
1629 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
1630 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
1631 // transform size varies per plane, look it up in a common way.
1632 const TX_SIZE tx_size = max_txsize_lookup[bsize];
1633 const BLOCK_SIZE plane_bsize =
1634 get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
1635 // If mb_to_right_edge is < 0 we are in a situation in which
1636 // the current block size extends into the UMV and we won't
1637 // visit the sub blocks that are wholly within the UMV.
1638 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
1639 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
1640 // Keep track of the row and column of the blocks we use so that we know
1641 // if we are in the unrestricted motion border.
1642 for (row = 0; row < max_blocks_high; row += (1 << tx_size)) {
1643 // Skip visiting the sub blocks that are wholly within the UMV.
1644 for (col = 0; col < max_blocks_wide; col += (1 << tx_size)) {
1645 p->src.buf = &src_buf_base[4 * (row * (int64_t)src_stride + col)];
1646 pd->dst.buf = &dst_buf_base[4 * (row * (int64_t)dst_stride + col)];
1647 av1_predict_intra_block(cm, xd, block_size_wide[bsize],
1648 block_size_high[bsize], tx_size, mode, 0, 0,
1649 FILTER_INTRA_MODES, pd->dst.buf, dst_stride,
1650 pd->dst.buf, dst_stride, 0, 0, plane);
1651 }
1652 }
1653 p->src.buf = src_buf_base;
1654 pd->dst.buf = dst_buf_base;
1655 }
1656
1657 void av1_nonrd_pick_intra_mode(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *rd_cost,
1658 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1659 AV1_COMMON *const cm = &cpi->common;
1660 MACROBLOCKD *const xd = &x->e_mbd;
1661 MB_MODE_INFO *const mi = xd->mi[0];
1662 RD_STATS this_rdc, best_rdc;
1663 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1664 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1665 const TX_SIZE intra_tx_size =
1666 AOMMIN(max_txsize_lookup[bsize],
1667 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
1668 int *bmode_costs;
1669 const MB_MODE_INFO *above_mi = xd->above_mbmi;
1670 const MB_MODE_INFO *left_mi = xd->left_mbmi;
1671 const PREDICTION_MODE A = av1_above_block_mode(above_mi);
1672 const PREDICTION_MODE L = av1_left_block_mode(left_mi);
1673 bmode_costs = x->mode_costs.y_mode_costs[A][L];
1674
1675 av1_invalid_rd_stats(&best_rdc);
1676 av1_invalid_rd_stats(&this_rdc);
1677
1678 init_mbmi(mi, DC_PRED, INTRA_FRAME, NONE_FRAME, cm);
1679 mi->mv[0].as_int = mi->mv[1].as_int = INVALID_MV;
1680
1681 // Change the limit of this loop to add other intra prediction
1682 // mode tests.
1683 for (int i = 0; i < 4; ++i) {
1684 PREDICTION_MODE this_mode = intra_mode_list[i];
1685 this_rdc.dist = this_rdc.rate = 0;
1686 args.mode = this_mode;
1687 args.skippable = 1;
1688 args.rdc = &this_rdc;
1689 mi->tx_size = intra_tx_size;
1690 av1_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
1691 &args);
1692 if (args.skippable) {
1693 this_rdc.rate = av1_cost_symbol(av1_get_skip_txfm_cdf(xd)[1]);
1694 } else {
1695 this_rdc.rate += av1_cost_symbol(av1_get_skip_txfm_cdf(xd)[0]);
1696 }
1697 this_rdc.rate += bmode_costs[this_mode];
1698 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
1699
1700 if (this_rdc.rdcost < best_rdc.rdcost) {
1701 best_rdc = this_rdc;
1702 mi->mode = this_mode;
1703 }
1704 }
1705
1706 *rd_cost = best_rdc;
1707
1708 #if CONFIG_INTERNAL_STATS
1709 store_coding_context(x, ctx, mi->mode);
1710 #else
1711 store_coding_context(x, ctx);
1712 #endif // CONFIG_INTERNAL_STATS
1713 }
1714
1715 static AOM_INLINE int is_same_gf_and_last_scale(AV1_COMMON *cm) {
1716 struct scale_factors *const sf_last = get_ref_scale_factors(cm, LAST_FRAME);
1717 struct scale_factors *const sf_golden =
1718 get_ref_scale_factors(cm, GOLDEN_FRAME);
1719 return ((sf_last->x_scale_fp == sf_golden->x_scale_fp) &&
1720 (sf_last->y_scale_fp == sf_golden->y_scale_fp));
1721 }
1722
1723 static AOM_INLINE void get_ref_frame_use_mask(AV1_COMP *cpi, MACROBLOCK *x,
1724 MB_MODE_INFO *mi, int mi_row,
1725 int mi_col, int bsize,
1726 int gf_temporal_ref,
1727 int use_ref_frame[],
1728 int *force_skip_low_temp_var) {
1729 AV1_COMMON *const cm = &cpi->common;
1730 const struct segmentation *const seg = &cm->seg;
1731 const int is_small_sb = (cm->seq_params.sb_size == BLOCK_64X64);
1732
1733 // For SVC the usage of alt_ref is determined by the ref_frame_flags.
1734 int use_alt_ref_frame = cpi->use_svc || cpi->sf.rt_sf.use_nonrd_altref_frame;
1735 int use_golden_ref_frame = 1;
1736
1737 use_ref_frame[LAST_FRAME] = 1; // we never skip LAST
1738
1739 if (cpi->rc.frames_since_golden == 0 && gf_temporal_ref) {
1740 use_golden_ref_frame = 0;
1741 }
1742
1743 if (cpi->sf.rt_sf.short_circuit_low_temp_var &&
1744 x->nonrd_prune_ref_frame_search) {
1745 if (is_small_sb)
1746 *force_skip_low_temp_var = get_force_skip_low_temp_var_small_sb(
1747 &x->part_search_info.variance_low[0], mi_row, mi_col, bsize);
1748 else
1749 *force_skip_low_temp_var = get_force_skip_low_temp_var(
1750 &x->part_search_info.variance_low[0], mi_row, mi_col, bsize);
1751 // If force_skip_low_temp_var is set, skip golden reference.
1752 if (*force_skip_low_temp_var) {
1753 use_golden_ref_frame = 0;
1754 use_alt_ref_frame = 0;
1755 }
1756 }
1757
1758 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) &&
1759 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) == GOLDEN_FRAME) {
1760 use_golden_ref_frame = 1;
1761 use_alt_ref_frame = 0;
1762 }
1763
1764 use_alt_ref_frame =
1765 cpi->ref_frame_flags & AOM_ALT_FLAG ? use_alt_ref_frame : 0;
1766 use_golden_ref_frame =
1767 cpi->ref_frame_flags & AOM_GOLD_FLAG ? use_golden_ref_frame : 0;
1768
1769 use_ref_frame[ALTREF_FRAME] = use_alt_ref_frame;
1770 use_ref_frame[GOLDEN_FRAME] = use_golden_ref_frame;
1771 }
1772
1773 /*!\brief Estimates best intra mode for inter mode search
1774 *
1775 * \ingroup nonrd_mode_search
1776 * \callgraph
1777 * \callergraph
1778 *
1779 * Using heuristics based on best inter mode, block size, and other decides
1780 * whether to check intra modes. If so, estimates and selects best intra mode
1781 * from the reduced set of intra modes (max 4 intra modes checked)
1782 *
1783 * \param[in] cpi Top-level encoder structure
1784 * \param[in] x Pointer to structure holding all the
1785 * data for the current macroblock
1786 * \param[in] bsize Current block size
1787 * \param[in] use_modeled_non_rd_cost Flag, indicating usage of curvfit
1788 * model for RD cost
1789 * \param[in] best_early_term Flag, indicating that TX for the
1790 * best inter mode was skipped
1791 * \param[in] ref_cost_intra Cost of signalling intra mode
1792 * \param[in] reuse_prediction Flag, indicating prediction re-use
1793 * \param[in] orig_dst Original destination buffer
1794 * \param[in] tmp_buffers Pointer to a temporary buffers for
1795 * prediction re-use
1796 * \param[out] this_mode_pred Pointer to store prediction buffer
1797 * for prediction re-use
1798 * \param[in] best_rdc Pointer to RD cost for the best
1799 * selected intra mode
1800 * \param[in] best_pickmode Pointer to a structure containing
1801 * best mode picked so far
1802 *
1803 * \return Nothing is returned. Instead, calculated RD cost is placed to
1804 * \c best_rdc and best selected mode is placed to \c best_pickmode
1805 */
1806 static void estimate_intra_mode(
1807 AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int use_modeled_non_rd_cost,
1808 int best_early_term, unsigned int ref_cost_intra, int reuse_prediction,
1809 struct buf_2d *orig_dst, PRED_BUFFER *tmp_buffers,
1810 PRED_BUFFER **this_mode_pred, RD_STATS *best_rdc,
1811 BEST_PICKMODE *best_pickmode) {
1812 AV1_COMMON *const cm = &cpi->common;
1813 MACROBLOCKD *const xd = &x->e_mbd;
1814 MB_MODE_INFO *const mi = xd->mi[0];
1815 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1816 const unsigned char segment_id = mi->segment_id;
1817 const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
1818 const int *const rd_thresh_freq_fact = x->thresh_freq_fact[bsize];
1819 const int mi_row = xd->mi_row;
1820 const int mi_col = xd->mi_col;
1821 struct macroblockd_plane *const pd = &xd->plane[0];
1822
1823 const CommonQuantParams *quant_params = &cm->quant_params;
1824
1825 RD_STATS this_rdc;
1826
1827 int intra_cost_penalty = av1_get_intra_cost_penalty(
1828 quant_params->base_qindex, quant_params->y_dc_delta_q,
1829 cm->seq_params.bit_depth);
1830 int64_t inter_mode_thresh = RDCOST(x->rdmult, intra_cost_penalty, 0);
1831 int perform_intra_pred = cpi->sf.rt_sf.check_intra_pred_nonrd;
1832 // For spatial enhancemanent layer: turn off intra prediction if the
1833 // previous spatial layer as golden ref is not chosen as best reference.
1834 // only do this for temporal enhancement layer and on non-key frames.
1835 if (cpi->svc.spatial_layer_id > 0 &&
1836 best_pickmode->best_ref_frame != GOLDEN_FRAME &&
1837 cpi->svc.temporal_layer_id > 0 &&
1838 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)
1839 perform_intra_pred = 0;
1840
1841 int do_early_exit_rdthresh = 1;
1842
1843 uint32_t spatial_var_thresh = 50;
1844 int motion_thresh = 32;
1845 // Adjust thresholds to make intra mode likely tested if the other
1846 // references (golden, alt) are skipped/not checked. For now always
1847 // adjust for svc mode.
1848 if (cpi->use_svc || (cpi->sf.rt_sf.use_nonrd_altref_frame == 0 &&
1849 cpi->sf.rt_sf.nonrd_prune_ref_frame_search > 0)) {
1850 spatial_var_thresh = 150;
1851 motion_thresh = 0;
1852 }
1853
1854 // Some adjustments to checking intra mode based on source variance.
1855 if (x->source_variance < spatial_var_thresh) {
1856 // If the best inter mode is large motion or non-LAST ref reduce intra cost
1857 // penalty, so intra mode is more likely tested.
1858 if (best_pickmode->best_ref_frame != LAST_FRAME ||
1859 abs(mi->mv[0].as_mv.row) >= motion_thresh ||
1860 abs(mi->mv[0].as_mv.col) >= motion_thresh) {
1861 intra_cost_penalty = intra_cost_penalty >> 2;
1862 inter_mode_thresh = RDCOST(x->rdmult, intra_cost_penalty, 0);
1863 do_early_exit_rdthresh = 0;
1864 }
1865 // For big blocks worth checking intra (since only DC will be checked),
1866 // even if best_early_term is set.
1867 if (bsize >= BLOCK_32X32) best_early_term = 0;
1868 } else if (cpi->sf.rt_sf.source_metrics_sb_nonrd &&
1869 x->content_state_sb.source_sad == kLowSad) {
1870 perform_intra_pred = 0;
1871 }
1872
1873 if (cpi->sf.rt_sf.skip_intra_pred_if_tx_skip && best_rdc->skip_txfm &&
1874 best_pickmode->best_mode_initial_skip_flag) {
1875 perform_intra_pred = 0;
1876 }
1877
1878 if (!(best_rdc->rdcost == INT64_MAX ||
1879 (perform_intra_pred && !best_early_term &&
1880 best_rdc->rdcost > inter_mode_thresh &&
1881 bsize <= cpi->sf.part_sf.max_intra_bsize))) {
1882 return;
1883 }
1884
1885 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1886 TX_SIZE intra_tx_size = AOMMIN(
1887 AOMMIN(max_txsize_lookup[bsize],
1888 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]),
1889 TX_16X16);
1890
1891 PRED_BUFFER *const best_pred = best_pickmode->best_pred;
1892 if (reuse_prediction && best_pred != NULL) {
1893 const int bh = block_size_high[bsize];
1894 const int bw = block_size_wide[bsize];
1895 if (best_pred->data == orig_dst->buf) {
1896 *this_mode_pred = &tmp_buffers[get_pred_buffer(tmp_buffers, 3)];
1897 aom_convolve_copy(best_pred->data, best_pred->stride,
1898 (*this_mode_pred)->data, (*this_mode_pred)->stride, bw,
1899 bh);
1900 best_pickmode->best_pred = *this_mode_pred;
1901 }
1902 }
1903 pd->dst = *orig_dst;
1904
1905 for (int i = 0; i < 4; ++i) {
1906 const PREDICTION_MODE this_mode = intra_mode_list[i];
1907 const THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)];
1908 const int64_t mode_rd_thresh = rd_threshes[mode_index];
1909
1910 if (!((1 << this_mode) & cpi->sf.rt_sf.intra_y_mode_bsize_mask_nrd[bsize]))
1911 continue;
1912
1913 if (rd_less_than_thresh(best_rdc->rdcost, mode_rd_thresh,
1914 rd_thresh_freq_fact[mode_index]) &&
1915 (do_early_exit_rdthresh || this_mode == SMOOTH_PRED)) {
1916 continue;
1917 }
1918 const BLOCK_SIZE uv_bsize = get_plane_block_size(
1919 bsize, xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
1920
1921 mi->mode = this_mode;
1922 mi->ref_frame[0] = INTRA_FRAME;
1923 mi->ref_frame[1] = NONE_FRAME;
1924
1925 av1_invalid_rd_stats(&this_rdc);
1926 args.mode = this_mode;
1927 args.skippable = 1;
1928 args.rdc = &this_rdc;
1929 mi->tx_size = intra_tx_size;
1930 compute_intra_yprediction(cm, this_mode, bsize, x, xd);
1931 // Look into selecting tx_size here, based on prediction residual.
1932 if (use_modeled_non_rd_cost)
1933 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc, 1);
1934 else
1935 block_yrd(cpi, x, mi_row, mi_col, &this_rdc, &args.skippable, bsize,
1936 mi->tx_size);
1937 // TODO(kyslov@) Need to account for skippable
1938 if (x->color_sensitivity[0]) {
1939 av1_foreach_transformed_block_in_plane(xd, uv_bsize, 1,
1940 estimate_block_intra, &args);
1941 }
1942 if (x->color_sensitivity[1]) {
1943 av1_foreach_transformed_block_in_plane(xd, uv_bsize, 2,
1944 estimate_block_intra, &args);
1945 }
1946
1947 int mode_cost = 0;
1948 if (av1_is_directional_mode(this_mode) && av1_use_angle_delta(bsize)) {
1949 mode_cost +=
1950 x->mode_costs.angle_delta_cost[this_mode - V_PRED]
1951 [MAX_ANGLE_DELTA +
1952 mi->angle_delta[PLANE_TYPE_Y]];
1953 }
1954 if (this_mode == DC_PRED && av1_filter_intra_allowed_bsize(cm, bsize)) {
1955 mode_cost += x->mode_costs.filter_intra_cost[bsize][0];
1956 }
1957 this_rdc.rate += ref_cost_intra;
1958 this_rdc.rate += intra_cost_penalty;
1959 this_rdc.rate += mode_cost;
1960 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
1961
1962 if (this_rdc.rdcost < best_rdc->rdcost) {
1963 *best_rdc = this_rdc;
1964 best_pickmode->best_mode = this_mode;
1965 best_pickmode->best_tx_size = mi->tx_size;
1966 best_pickmode->best_ref_frame = INTRA_FRAME;
1967 mi->uv_mode = this_mode;
1968 mi->mv[0].as_int = INVALID_MV;
1969 mi->mv[1].as_int = INVALID_MV;
1970 }
1971 }
1972 mi->tx_size = best_pickmode->best_tx_size;
1973 }
1974
1975 static AOM_INLINE int is_filter_search_enabled(const AV1_COMP *cpi, int mi_row,
1976 int mi_col, BLOCK_SIZE bsize) {
1977 const AV1_COMMON *const cm = &cpi->common;
1978 int enable_filter_search = 0;
1979
1980 if (cpi->sf.rt_sf.use_nonrd_filter_search) {
1981 enable_filter_search = 1;
1982 if (cpi->sf.interp_sf.cb_pred_filter_search) {
1983 const int bsl = mi_size_wide_log2[bsize];
1984 enable_filter_search =
1985 (((mi_row + mi_col) >> bsl) +
1986 get_chessboard_index(cm->current_frame.frame_number)) &
1987 0x1;
1988 }
1989 }
1990 return enable_filter_search;
1991 }
1992
1993 static AOM_INLINE int skip_mode_by_threshold(
1994 PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, int_mv mv,
1995 int frames_since_golden, const int *const rd_threshes,
1996 const int *const rd_thresh_freq_fact, int64_t best_cost, int best_skip,
1997 int extra_shift) {
1998 int skip_this_mode = 0;
1999 const THR_MODES mode_index = mode_idx[ref_frame][INTER_OFFSET(mode)];
2000 int64_t mode_rd_thresh =
2001 best_skip ? ((int64_t)rd_threshes[mode_index]) << (extra_shift + 1)
2002 : ((int64_t)rd_threshes[mode_index]) << extra_shift;
2003
2004 // Increase mode_rd_thresh value for non-LAST for improved encoding
2005 // speed
2006 if (ref_frame != LAST_FRAME) {
2007 mode_rd_thresh = mode_rd_thresh << 1;
2008 if (ref_frame == GOLDEN_FRAME && frames_since_golden > 4)
2009 mode_rd_thresh = mode_rd_thresh << (extra_shift + 1);
2010 }
2011
2012 if (rd_less_than_thresh(best_cost, mode_rd_thresh,
2013 rd_thresh_freq_fact[mode_index]))
2014 if (mv.as_int != 0) skip_this_mode = 1;
2015
2016 return skip_this_mode;
2017 }
2018
2019 static AOM_INLINE int skip_mode_by_low_temp(
2020 PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, BLOCK_SIZE bsize,
2021 CONTENT_STATE_SB content_state_sb, int_mv mv, int force_skip_low_temp_var) {
2022 // Skip non-zeromv mode search for non-LAST frame if force_skip_low_temp_var
2023 // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
2024 // later.
2025 if (force_skip_low_temp_var && ref_frame != LAST_FRAME && mv.as_int != 0) {
2026 return 1;
2027 }
2028
2029 if (content_state_sb.source_sad != kHighSad && bsize >= BLOCK_64X64 &&
2030 force_skip_low_temp_var && mode == NEWMV) {
2031 return 1;
2032 }
2033 return 0;
2034 }
2035
2036 static AOM_INLINE int skip_mode_by_bsize_and_ref_frame(
2037 PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, BLOCK_SIZE bsize,
2038 int extra_prune, unsigned int sse_zeromv_norm, int more_prune) {
2039 const unsigned int thresh_skip_golden = 500;
2040
2041 if (ref_frame != LAST_FRAME && sse_zeromv_norm < thresh_skip_golden &&
2042 mode == NEWMV)
2043 return 1;
2044
2045 if (bsize == BLOCK_128X128 && mode == NEWMV) return 1;
2046
2047 // Skip testing non-LAST if this flag is set.
2048 if (extra_prune) {
2049 if (extra_prune > 1 && ref_frame != LAST_FRAME &&
2050 (bsize > BLOCK_64X64 || (bsize > BLOCK_16X16 && mode == NEWMV)))
2051 return 1;
2052
2053 if (ref_frame != LAST_FRAME && mode == NEARMV) return 1;
2054
2055 if (more_prune && bsize >= BLOCK_32X32 && mode == NEARMV) return 1;
2056 }
2057 return 0;
2058 }
2059
2060 void av1_nonrd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
2061 MACROBLOCK *x, RD_STATS *rd_cost,
2062 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2063 AV1_COMMON *const cm = &cpi->common;
2064 SVC *const svc = &cpi->svc;
2065 MACROBLOCKD *const xd = &x->e_mbd;
2066 MB_MODE_INFO *const mi = xd->mi[0];
2067 struct macroblockd_plane *const pd = &xd->plane[0];
2068
2069 BEST_PICKMODE best_pickmode;
2070 #if COLLECT_PICK_MODE_STAT
2071 static mode_search_stat ms_stat;
2072 #endif
2073 MV_REFERENCE_FRAME ref_frame;
2074 int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES];
2075 uint8_t mode_checked[MB_MODE_COUNT][REF_FRAMES];
2076 struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE];
2077 RD_STATS this_rdc, best_rdc;
2078 const unsigned char segment_id = mi->segment_id;
2079 const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
2080 const int *const rd_thresh_freq_fact = x->thresh_freq_fact[bsize];
2081 const InterpFilter filter_ref = cm->features.interp_filter;
2082 int best_early_term = 0;
2083 unsigned int ref_costs_single[REF_FRAMES],
2084 ref_costs_comp[REF_FRAMES][REF_FRAMES];
2085 int force_skip_low_temp_var = 0;
2086 int use_ref_frame_mask[REF_FRAMES] = { 0 };
2087 unsigned int sse_zeromv_norm = UINT_MAX;
2088 // Use mode set that includes zeromv (via globalmv) for speed >= 9 for
2089 // content with low motion.
2090 int use_zeromv =
2091 ((cpi->oxcf.speed >= 9 && cpi->rc.avg_frame_low_motion > 70) ||
2092 cpi->sf.rt_sf.nonrd_agressive_skip);
2093 const int num_inter_modes =
2094 use_zeromv ? NUM_INTER_MODES_REDUCED : NUM_INTER_MODES_RT;
2095 const REF_MODE *const ref_mode_set =
2096 use_zeromv ? ref_mode_set_reduced : ref_mode_set_rt;
2097 PRED_BUFFER tmp[4];
2098 DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 128 * 128]);
2099 PRED_BUFFER *this_mode_pred = NULL;
2100 const int reuse_inter_pred = cpi->sf.rt_sf.reuse_inter_pred_nonrd &&
2101 cm->seq_params.bit_depth == AOM_BITS_8;
2102
2103 const int bh = block_size_high[bsize];
2104 const int bw = block_size_wide[bsize];
2105 const int pixels_in_block = bh * bw;
2106 struct buf_2d orig_dst = pd->dst;
2107 const CommonQuantParams *quant_params = &cm->quant_params;
2108 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
2109 TxfmSearchInfo *txfm_info = &x->txfm_search_info;
2110 #if COLLECT_PICK_MODE_STAT
2111 aom_usec_timer_start(&ms_stat.timer2);
2112 #endif
2113 const InterpFilter default_interp_filter = EIGHTTAP_REGULAR;
2114 int64_t thresh_sad_pred = INT64_MAX;
2115 const int mi_row = xd->mi_row;
2116 const int mi_col = xd->mi_col;
2117 int svc_mv_col = 0;
2118 int svc_mv_row = 0;
2119 int force_mv_inter_layer = 0;
2120 int use_modeled_non_rd_cost = 0;
2121 #if CONFIG_AV1_TEMPORAL_DENOISING
2122 const int denoise_recheck_zeromv = 1;
2123 AV1_PICKMODE_CTX_DEN ctx_den;
2124 int64_t zero_last_cost_orig = INT64_MAX;
2125 int denoise_svc_pickmode = 1;
2126 const int resize_pending =
2127 (cpi->resize_pending_params.width && cpi->resize_pending_params.height &&
2128 (cpi->common.width != cpi->resize_pending_params.width ||
2129 cpi->common.height != cpi->resize_pending_params.height));
2130
2131 #endif
2132
2133 init_best_pickmode(&best_pickmode);
2134
2135 const ModeCosts *mode_costs = &x->mode_costs;
2136
2137 estimate_single_ref_frame_costs(cm, xd, mode_costs, segment_id,
2138 ref_costs_single);
2139 if (cpi->sf.rt_sf.use_comp_ref_nonrd)
2140 estimate_comp_ref_frame_costs(cm, xd, mode_costs, segment_id,
2141 ref_costs_comp);
2142
2143 memset(&mode_checked[0][0], 0, MB_MODE_COUNT * REF_FRAMES);
2144 if (reuse_inter_pred) {
2145 for (int i = 0; i < 3; i++) {
2146 tmp[i].data = &pred_buf[pixels_in_block * i];
2147 tmp[i].stride = bw;
2148 tmp[i].in_use = 0;
2149 }
2150 tmp[3].data = pd->dst.buf;
2151 tmp[3].stride = pd->dst.stride;
2152 tmp[3].in_use = 0;
2153 }
2154
2155 txfm_info->skip_txfm = 0;
2156
2157 // initialize mode decisions
2158 av1_invalid_rd_stats(&best_rdc);
2159 av1_invalid_rd_stats(&this_rdc);
2160 av1_invalid_rd_stats(rd_cost);
2161 mi->bsize = bsize;
2162 mi->ref_frame[0] = NONE_FRAME;
2163 mi->ref_frame[1] = NONE_FRAME;
2164
2165 #if CONFIG_AV1_TEMPORAL_DENOISING
2166 if (cpi->oxcf.noise_sensitivity > 0) {
2167 // if (cpi->use_svc) denoise_svc_pickmode = av1_denoise_svc_non_key(cpi);
2168 if (cpi->denoiser.denoising_level > kDenLowLow && denoise_svc_pickmode)
2169 av1_denoiser_reset_frame_stats(ctx);
2170 }
2171 #endif
2172
2173 const int gf_temporal_ref = is_same_gf_and_last_scale(cm);
2174
2175 // If the lower spatial layer uses an averaging filter for downsampling
2176 // (phase = 8), the target decimated pixel is shifted by (1/2, 1/2) relative
2177 // to source, so use subpel motion vector to compensate. The nonzero motion
2178 // is half pixel shifted to left and top, so (-4, -4). This has more effect
2179 // on higher resolutins, so condition it on that for now.
2180 if (cpi->use_svc && svc->spatial_layer_id > 0 &&
2181 svc->downsample_filter_phase[svc->spatial_layer_id - 1] == 8 &&
2182 cm->width * cm->height > 640 * 480) {
2183 svc_mv_col = -4;
2184 svc_mv_row = -4;
2185 }
2186
2187 get_ref_frame_use_mask(cpi, x, mi, mi_row, mi_col, bsize, gf_temporal_ref,
2188 use_ref_frame_mask, &force_skip_low_temp_var);
2189
2190 for (MV_REFERENCE_FRAME ref_frame_iter = LAST_FRAME;
2191 ref_frame_iter <= ALTREF_FRAME; ++ref_frame_iter) {
2192 if (use_ref_frame_mask[ref_frame_iter]) {
2193 find_predictors(cpi, x, ref_frame_iter, frame_mv, tile_data, yv12_mb,
2194 bsize, force_skip_low_temp_var);
2195 }
2196 }
2197
2198 thresh_sad_pred = ((int64_t)x->pred_mv_sad[LAST_FRAME]) << 1;
2199 // Increase threshold for less agressive pruning.
2200 if (cpi->sf.rt_sf.nonrd_prune_ref_frame_search == 1)
2201 thresh_sad_pred += (x->pred_mv_sad[LAST_FRAME] >> 2);
2202
2203 const int large_block = bsize >= BLOCK_32X32;
2204 const int use_model_yrd_large =
2205 cpi->oxcf.rc_cfg.mode == AOM_CBR && large_block &&
2206 !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) &&
2207 quant_params->base_qindex && cm->seq_params.bit_depth == 8;
2208
2209 const int enable_filter_search =
2210 is_filter_search_enabled(cpi, mi_row, mi_col, bsize);
2211
2212 // TODO(marpan): Look into reducing these conditions. For now constrain
2213 // it to avoid significant bdrate loss.
2214 if (cpi->sf.rt_sf.use_modeled_non_rd_cost) {
2215 if (cpi->svc.non_reference_frame)
2216 use_modeled_non_rd_cost = 1;
2217 else if (cpi->svc.number_temporal_layers > 1 &&
2218 cpi->svc.temporal_layer_id == 0)
2219 use_modeled_non_rd_cost = 0;
2220 else
2221 use_modeled_non_rd_cost =
2222 (quant_params->base_qindex > 120 && x->source_variance > 100 &&
2223 bsize <= BLOCK_16X16 && !x->content_state_sb.lighting_change &&
2224 x->content_state_sb.source_sad != kHighSad);
2225 }
2226
2227 #if COLLECT_PICK_MODE_STAT
2228 ms_stat.num_blocks[bsize]++;
2229 #endif
2230 init_mbmi(mi, DC_PRED, NONE_FRAME, NONE_FRAME, cm);
2231 mi->tx_size = AOMMIN(
2232 AOMMIN(max_txsize_lookup[bsize],
2233 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]),
2234 TX_16X16);
2235
2236 for (int idx = 0; idx < num_inter_modes; ++idx) {
2237 const struct segmentation *const seg = &cm->seg;
2238
2239 int rate_mv = 0;
2240 int is_skippable;
2241 int this_early_term = 0;
2242 int skip_this_mv = 0;
2243 PREDICTION_MODE this_mode;
2244 MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
2245 RD_STATS nonskip_rdc;
2246 av1_invalid_rd_stats(&nonskip_rdc);
2247
2248 this_mode = ref_mode_set[idx].pred_mode;
2249 ref_frame = ref_mode_set[idx].ref_frame;
2250
2251 #if COLLECT_PICK_MODE_STAT
2252 aom_usec_timer_start(&ms_stat.timer1);
2253 ms_stat.num_searches[bsize][this_mode]++;
2254 #endif
2255 mi->mode = this_mode;
2256 mi->ref_frame[0] = ref_frame;
2257
2258 if (!use_ref_frame_mask[ref_frame]) continue;
2259
2260 force_mv_inter_layer = 0;
2261 if (cpi->use_svc && svc->spatial_layer_id > 0 &&
2262 ((ref_frame == LAST_FRAME && svc->skip_mvsearch_last) ||
2263 (ref_frame == GOLDEN_FRAME && svc->skip_mvsearch_gf))) {
2264 // Only test mode if NEARESTMV/NEARMV is (svc_mv_col, svc_mv_row),
2265 // otherwise set NEWMV to (svc_mv_col, svc_mv_row).
2266 // Skip newmv and filter search.
2267 force_mv_inter_layer = 1;
2268 if (this_mode == NEWMV) {
2269 frame_mv[this_mode][ref_frame].as_mv.col = svc_mv_col;
2270 frame_mv[this_mode][ref_frame].as_mv.row = svc_mv_row;
2271 } else if (frame_mv[this_mode][ref_frame].as_mv.col != svc_mv_col ||
2272 frame_mv[this_mode][ref_frame].as_mv.row != svc_mv_row) {
2273 continue;
2274 }
2275 }
2276
2277 // If the segment reference frame feature is enabled then do nothing if the
2278 // current ref frame is not allowed.
2279 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
2280 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
2281 continue;
2282
2283 if (skip_mode_by_bsize_and_ref_frame(
2284 this_mode, ref_frame, bsize, x->nonrd_prune_ref_frame_search,
2285 sse_zeromv_norm, cpi->sf.rt_sf.nonrd_agressive_skip))
2286 continue;
2287
2288 if (skip_mode_by_low_temp(this_mode, ref_frame, bsize, x->content_state_sb,
2289 frame_mv[this_mode][ref_frame],
2290 force_skip_low_temp_var))
2291 continue;
2292
2293 // Disable this drop out case if the ref frame segment level feature is
2294 // enabled for this segment. This is to prevent the possibility that we
2295 // end up unable to pick any mode.
2296 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
2297 // Check for skipping GOLDEN and ALTREF based pred_mv_sad.
2298 if (cpi->sf.rt_sf.nonrd_prune_ref_frame_search > 0 &&
2299 x->pred_mv_sad[ref_frame] != INT_MAX && ref_frame != LAST_FRAME) {
2300 if ((int64_t)(x->pred_mv_sad[ref_frame]) > thresh_sad_pred) continue;
2301 }
2302 }
2303
2304 if (skip_mode_by_threshold(
2305 this_mode, ref_frame, frame_mv[this_mode][ref_frame],
2306 cpi->rc.frames_since_golden, rd_threshes, rd_thresh_freq_fact,
2307 best_rdc.rdcost, best_pickmode.best_mode_skip_txfm,
2308 (cpi->sf.rt_sf.nonrd_agressive_skip ? 1 : 0)))
2309 continue;
2310
2311 // Select prediction reference frames.
2312 for (int i = 0; i < MAX_MB_PLANE; i++) {
2313 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
2314 }
2315
2316 mi->ref_frame[0] = ref_frame;
2317 mi->ref_frame[1] = NONE_FRAME;
2318 set_ref_ptrs(cm, xd, ref_frame, NONE_FRAME);
2319
2320 if (this_mode == NEWMV && !force_mv_inter_layer) {
2321 if (search_new_mv(cpi, x, frame_mv, ref_frame, gf_temporal_ref, bsize,
2322 mi_row, mi_col, &rate_mv, &best_rdc))
2323 continue;
2324 }
2325
2326 for (PREDICTION_MODE inter_mv_mode = NEARESTMV; inter_mv_mode <= NEWMV;
2327 inter_mv_mode++) {
2328 if (inter_mv_mode == this_mode) continue;
2329 if (mode_checked[inter_mv_mode][ref_frame] &&
2330 frame_mv[this_mode][ref_frame].as_int ==
2331 frame_mv[inter_mv_mode][ref_frame].as_int) {
2332 skip_this_mv = 1;
2333 break;
2334 }
2335 }
2336
2337 if (skip_this_mv) continue;
2338
2339 mi->mode = this_mode;
2340 mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
2341 mi->mv[1].as_int = 0;
2342 if (reuse_inter_pred) {
2343 if (!this_mode_pred) {
2344 this_mode_pred = &tmp[3];
2345 } else {
2346 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
2347 pd->dst.buf = this_mode_pred->data;
2348 pd->dst.stride = bw;
2349 }
2350 }
2351 #if COLLECT_PICK_MODE_STAT
2352 ms_stat.num_nonskipped_searches[bsize][this_mode]++;
2353 #endif
2354 if (enable_filter_search && !force_mv_inter_layer &&
2355 ((mi->mv[0].as_mv.row & 0x07) || (mi->mv[0].as_mv.col & 0x07)) &&
2356 (ref_frame == LAST_FRAME || !x->nonrd_prune_ref_frame_search)) {
2357 search_filter_ref(cpi, x, &this_rdc, mi_row, mi_col, tmp, bsize,
2358 reuse_inter_pred, &this_mode_pred, &this_early_term,
2359 use_model_yrd_large);
2360 } else {
2361 mi->interp_filters =
2362 (filter_ref == SWITCHABLE)
2363 ? av1_broadcast_interp_filter(default_interp_filter)
2364 : av1_broadcast_interp_filter(filter_ref);
2365 if (force_mv_inter_layer)
2366 mi->interp_filters = av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
2367
2368 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
2369 if (use_model_yrd_large) {
2370 model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd, &this_rdc,
2371 &this_early_term, use_modeled_non_rd_cost);
2372 } else {
2373 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc,
2374 use_modeled_non_rd_cost);
2375 }
2376 }
2377
2378 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0) {
2379 sse_zeromv_norm =
2380 (unsigned int)(this_rdc.sse >> (b_width_log2_lookup[bsize] +
2381 b_height_log2_lookup[bsize]));
2382 }
2383
2384 const int skip_ctx = av1_get_skip_txfm_context(xd);
2385 const int skip_txfm_cost = mode_costs->skip_txfm_cost[skip_ctx][1];
2386 const int no_skip_txfm_cost = mode_costs->skip_txfm_cost[skip_ctx][0];
2387 const int64_t sse_y = this_rdc.sse;
2388 if (this_early_term) {
2389 this_rdc.skip_txfm = 1;
2390 this_rdc.rate = skip_txfm_cost;
2391 this_rdc.dist = this_rdc.sse << 4;
2392 } else {
2393 if (use_modeled_non_rd_cost) {
2394 if (this_rdc.skip_txfm) {
2395 this_rdc.rate = skip_txfm_cost;
2396 } else {
2397 this_rdc.rate += no_skip_txfm_cost;
2398 }
2399 } else {
2400 block_yrd(cpi, x, mi_row, mi_col, &this_rdc, &is_skippable, bsize,
2401 mi->tx_size);
2402 if (this_rdc.skip_txfm ||
2403 RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist) >=
2404 RDCOST(x->rdmult, 0, this_rdc.sse)) {
2405 if (!this_rdc.skip_txfm) {
2406 // Need to store "real" rdc for possible furure use if UV rdc
2407 // disallows tx skip
2408 nonskip_rdc = this_rdc;
2409 nonskip_rdc.rate += no_skip_txfm_cost;
2410 }
2411 this_rdc.rate = skip_txfm_cost;
2412 this_rdc.skip_txfm = 1;
2413 this_rdc.dist = this_rdc.sse;
2414 } else {
2415 this_rdc.rate += no_skip_txfm_cost;
2416 }
2417 }
2418 if ((x->color_sensitivity[0] || x->color_sensitivity[1])) {
2419 RD_STATS rdc_uv;
2420 const BLOCK_SIZE uv_bsize = get_plane_block_size(
2421 bsize, xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
2422 if (x->color_sensitivity[0]) {
2423 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
2424 AOM_PLANE_U, AOM_PLANE_U);
2425 }
2426 if (x->color_sensitivity[1]) {
2427 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
2428 AOM_PLANE_V, AOM_PLANE_V);
2429 }
2430 model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &this_rdc.sse, 1, 2);
2431 // Restore Y rdc if UV rdc disallows txfm skip
2432 if (this_rdc.skip_txfm && !rdc_uv.skip_txfm &&
2433 nonskip_rdc.rate != INT_MAX)
2434 this_rdc = nonskip_rdc;
2435 this_rdc.rate += rdc_uv.rate;
2436 this_rdc.dist += rdc_uv.dist;
2437 this_rdc.skip_txfm = this_rdc.skip_txfm && rdc_uv.skip_txfm;
2438 }
2439 }
2440
2441 // TODO(kyslov) account for UV prediction cost
2442 this_rdc.rate += rate_mv;
2443 const int16_t mode_ctx =
2444 av1_mode_context_analyzer(mbmi_ext->mode_context, mi->ref_frame);
2445 this_rdc.rate += cost_mv_ref(mode_costs, this_mode, mode_ctx);
2446
2447 this_rdc.rate += ref_costs_single[ref_frame];
2448
2449 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
2450 if (cpi->oxcf.rc_cfg.mode == AOM_CBR) {
2451 newmv_diff_bias(xd, this_mode, &this_rdc, bsize,
2452 frame_mv[this_mode][ref_frame].as_mv.row,
2453 frame_mv[this_mode][ref_frame].as_mv.col, cpi->speed,
2454 x->source_variance, x->content_state_sb);
2455 }
2456 #if CONFIG_AV1_TEMPORAL_DENOISING
2457 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc_pickmode &&
2458 cpi->denoiser.denoising_level > kDenLowLow) {
2459 av1_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx);
2460 // Keep track of zero_last cost.
2461 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0)
2462 zero_last_cost_orig = this_rdc.rdcost;
2463 }
2464 #else
2465 (void)sse_y;
2466 #endif
2467
2468 mode_checked[this_mode][ref_frame] = 1;
2469 #if COLLECT_PICK_MODE_STAT
2470 aom_usec_timer_mark(&ms_stat.timer1);
2471 ms_stat.nonskipped_search_times[bsize][this_mode] +=
2472 aom_usec_timer_elapsed(&ms_stat.timer1);
2473 #endif
2474 if (this_rdc.rdcost < best_rdc.rdcost) {
2475 best_rdc = this_rdc;
2476 best_early_term = this_early_term;
2477 best_pickmode.best_mode = this_mode;
2478 best_pickmode.best_pred_filter = mi->interp_filters;
2479 best_pickmode.best_tx_size = mi->tx_size;
2480 best_pickmode.best_ref_frame = ref_frame;
2481 best_pickmode.best_mode_skip_txfm = this_rdc.skip_txfm;
2482 best_pickmode.best_mode_initial_skip_flag =
2483 (nonskip_rdc.rate == INT_MAX && this_rdc.skip_txfm);
2484
2485 if (reuse_inter_pred) {
2486 free_pred_buffer(best_pickmode.best_pred);
2487 best_pickmode.best_pred = this_mode_pred;
2488 }
2489 } else {
2490 if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
2491 }
2492 if (best_early_term && (idx > 0 || cpi->sf.rt_sf.nonrd_agressive_skip)) {
2493 txfm_info->skip_txfm = 1;
2494 break;
2495 }
2496 }
2497
2498 mi->mode = best_pickmode.best_mode;
2499 mi->interp_filters = best_pickmode.best_pred_filter;
2500 mi->tx_size = best_pickmode.best_tx_size;
2501 memset(mi->inter_tx_size, mi->tx_size, sizeof(mi->inter_tx_size));
2502 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2503 mi->mv[0].as_int =
2504 frame_mv[best_pickmode.best_mode][best_pickmode.best_ref_frame].as_int;
2505
2506 // Perform intra prediction search, if the best SAD is above a certain
2507 // threshold.
2508 mi->angle_delta[PLANE_TYPE_Y] = 0;
2509 mi->angle_delta[PLANE_TYPE_UV] = 0;
2510 mi->filter_intra_mode_info.use_filter_intra = 0;
2511
2512 estimate_intra_mode(cpi, x, bsize, use_modeled_non_rd_cost, best_early_term,
2513 ref_costs_single[INTRA_FRAME], reuse_inter_pred,
2514 &orig_dst, tmp, &this_mode_pred, &best_rdc,
2515 &best_pickmode);
2516
2517 pd->dst = orig_dst;
2518 mi->mode = best_pickmode.best_mode;
2519 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2520 txfm_info->skip_txfm = best_rdc.skip_txfm;
2521
2522 if (!is_inter_block(mi)) {
2523 mi->interp_filters = av1_broadcast_interp_filter(SWITCHABLE_FILTERS);
2524 }
2525
2526 if (reuse_inter_pred && best_pickmode.best_pred != NULL) {
2527 PRED_BUFFER *const best_pred = best_pickmode.best_pred;
2528 if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) {
2529 aom_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2530 pd->dst.stride, bw, bh);
2531 }
2532 }
2533
2534 #if CONFIG_AV1_TEMPORAL_DENOISING
2535 if (cpi->oxcf.noise_sensitivity > 0 && resize_pending == 0 &&
2536 denoise_svc_pickmode && cpi->denoiser.denoising_level > kDenLowLow &&
2537 cpi->denoiser.reset == 0) {
2538 AV1_DENOISER_DECISION decision = COPY_BLOCK;
2539 ctx->sb_skip_denoising = 0;
2540 av1_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_costs_single,
2541 frame_mv, reuse_inter_pred, &best_pickmode);
2542 av1_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision,
2543 gf_temporal_ref);
2544 if (denoise_recheck_zeromv)
2545 recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den,
2546 yv12_mb, &best_rdc, &best_pickmode, bsize,
2547 mi_row, mi_col);
2548 best_pickmode.best_ref_frame = ctx_den.best_ref_frame;
2549 }
2550 #endif
2551
2552 if (cpi->sf.inter_sf.adaptive_rd_thresh) {
2553 THR_MODES best_mode_idx =
2554 mode_idx[best_pickmode.best_ref_frame][mode_offset(mi->mode)];
2555 if (best_pickmode.best_ref_frame == INTRA_FRAME) {
2556 // Only consider the modes that are included in the intra_mode_list.
2557 int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
2558 for (int i = 0; i < intra_modes; i++) {
2559 update_thresh_freq_fact(cpi, x, bsize, INTRA_FRAME, best_mode_idx,
2560 intra_mode_list[i]);
2561 }
2562 } else {
2563 PREDICTION_MODE this_mode;
2564 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2565 update_thresh_freq_fact(cpi, x, bsize, best_pickmode.best_ref_frame,
2566 best_mode_idx, this_mode);
2567 }
2568 }
2569 }
2570
2571 #if CONFIG_INTERNAL_STATS
2572 store_coding_context(x, ctx, mi->mode);
2573 #else
2574 store_coding_context(x, ctx);
2575 #endif // CONFIG_INTERNAL_STATS
2576 #if COLLECT_PICK_MODE_STAT
2577 aom_usec_timer_mark(&ms_stat.timer2);
2578 ms_stat.avg_block_times[bsize] += aom_usec_timer_elapsed(&ms_stat.timer2);
2579 //
2580 if ((mi_row + mi_size_high[bsize] >= (cpi->common.mi_params.mi_rows)) &&
2581 (mi_col + mi_size_wide[bsize] >= (cpi->common.mi_params.mi_cols))) {
2582 int i, j;
2583 PREDICTION_MODE used_modes[3] = { NEARESTMV, NEARMV, NEWMV };
2584 BLOCK_SIZE bss[5] = { BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
2585 BLOCK_128X128 };
2586 int64_t total_time = 0l;
2587 int32_t total_blocks = 0;
2588
2589 printf("\n");
2590 for (i = 0; i < 5; i++) {
2591 printf("BS(%d) Num %d, Avg_time %f: ", bss[i], ms_stat.num_blocks[bss[i]],
2592 ms_stat.num_blocks[bss[i]] > 0
2593 ? (float)ms_stat.avg_block_times[bss[i]] /
2594 ms_stat.num_blocks[bss[i]]
2595 : 0);
2596 total_time += ms_stat.avg_block_times[bss[i]];
2597 total_blocks += ms_stat.num_blocks[bss[i]];
2598 for (j = 0; j < 3; j++) {
2599 printf("Mode %d, %d/%d tps %f ", used_modes[j],
2600 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]],
2601 ms_stat.num_searches[bss[i]][used_modes[j]],
2602 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]] > 0
2603 ? (float)ms_stat
2604 .nonskipped_search_times[bss[i]][used_modes[j]] /
2605 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]]
2606 : 0l);
2607 }
2608 printf("\n");
2609 }
2610 printf("Total time = %ld. Total blocks = %d\n", total_time, total_blocks);
2611 }
2612 //
2613 #endif // COLLECT_PICK_MODE_STAT
2614 *rd_cost = best_rdc;
2615 }
2616