1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
11 */
12
13 #include <assert.h>
14 #include <limits.h>
15 #include <math.h>
16 #include <stdio.h>
17
18 #include "config/aom_dsp_rtcd.h"
19 #include "config/av1_rtcd.h"
20
21 #include "aom_dsp/aom_dsp_common.h"
22 #include "aom_dsp/blend.h"
23 #include "aom_mem/aom_mem.h"
24 #include "aom_ports/aom_timer.h"
25 #include "aom_ports/mem.h"
26 #include "aom_ports/system_state.h"
27
28 #include "av1/encoder/model_rd.h"
29 #include "av1/common/mvref_common.h"
30 #include "av1/common/pred_common.h"
31 #include "av1/common/reconinter.h"
32 #include "av1/common/reconintra.h"
33
34 #include "av1/encoder/encodemv.h"
35 #include "av1/encoder/rdopt.h"
36 #include "av1/encoder/reconinter_enc.h"
37
38 extern int g_pick_inter_mode_cnt;
39 /*!\cond */
40 typedef struct {
41 uint8_t *data;
42 int stride;
43 int in_use;
44 } PRED_BUFFER;
45
46 typedef struct {
47 PRED_BUFFER *best_pred;
48 PREDICTION_MODE best_mode;
49 TX_SIZE best_tx_size;
50 MV_REFERENCE_FRAME best_ref_frame;
51 uint8_t best_mode_skip_txfm;
52 uint8_t best_mode_initial_skip_flag;
53 int_interpfilters best_pred_filter;
54 } BEST_PICKMODE;
55
56 typedef struct {
57 MV_REFERENCE_FRAME ref_frame;
58 PREDICTION_MODE pred_mode;
59 } REF_MODE;
60 /*!\endcond */
61
62 static const int pos_shift_16x16[4][4] = {
63 { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
64 };
65
66 #define RT_INTER_MODES 9
67 static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
68 { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV },
69 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
70 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV },
71 { ALTREF_FRAME, NEARESTMV }, { ALTREF_FRAME, NEARMV },
72 { ALTREF_FRAME, NEWMV }
73 };
74
75 static const THR_MODES mode_idx[REF_FRAMES][4] = {
76 { THR_DC, THR_V_PRED, THR_H_PRED, THR_SMOOTH },
77 { THR_NEARESTMV, THR_NEARMV, THR_GLOBALMV, THR_NEWMV },
78 { THR_NEARESTL2, THR_NEARL2, THR_GLOBALL2, THR_NEWL2 },
79 { THR_NEARESTL3, THR_NEARL3, THR_GLOBALL3, THR_NEWL3 },
80 { THR_NEARESTG, THR_NEARG, THR_GLOBALMV, THR_NEWG },
81 };
82
83 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
84 SMOOTH_PRED };
85
mode_offset(const PREDICTION_MODE mode)86 static INLINE int mode_offset(const PREDICTION_MODE mode) {
87 if (mode >= NEARESTMV) {
88 return INTER_OFFSET(mode);
89 } else {
90 switch (mode) {
91 case DC_PRED: return 0;
92 case V_PRED: return 1;
93 case H_PRED: return 2;
94 case SMOOTH_PRED: return 3;
95 default: assert(0); return -1;
96 }
97 }
98 }
99
100 enum {
101 // INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV),
102 INTER_NEAREST = (1 << NEARESTMV),
103 INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV),
104 INTER_NEAREST_NEAR = (1 << NEARESTMV) | (1 << NEARMV),
105 INTER_NEAR_NEW = (1 << NEARMV) | (1 << NEWMV),
106 };
107
init_best_pickmode(BEST_PICKMODE * bp)108 static INLINE void init_best_pickmode(BEST_PICKMODE *bp) {
109 bp->best_mode = NEARESTMV;
110 bp->best_ref_frame = LAST_FRAME;
111 bp->best_tx_size = TX_8X8;
112 bp->best_pred_filter = av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
113 bp->best_mode_skip_txfm = 0;
114 bp->best_mode_initial_skip_flag = 0;
115 bp->best_pred = NULL;
116 }
117
118 /*!\brief Runs Motion Estimation for a specific block and specific ref frame.
119 *
120 * \ingroup nonrd_mode_search
121 * \callgraph
122 * \callergraph
123 * Finds the best Motion Vector by running Motion Estimation for a specific
124 * block and a specific reference frame. Exits early if RDCost of Full Pel part
125 * exceeds best RD Cost fund so far
126 * \param[in] cpi Top-level encoder structure
127 * \param[in] x Pointer to structure holding all the
128 * data for the current macroblock
129 * \param[in] bsize Current block size
130 * \param[in] mi_row Row index in 4x4 units
131 * \param[in] mi_col Column index in 4x4 units
132 * \param[in] tmp_mv Pointer to best found New MV
133 * \param[in] rate_mv Pointer to Rate of the best new MV
134 * \param[in] best_rd_sofar RD Cost of the best mode found so far
135 * \param[in] use_base_mv Flag, indicating that tmp_mv holds
136 * specific MV to start the search with
137 *
138 * \return Returns 0 if ME was terminated after Full Pel Search because too
139 * high RD Cost. Otherwise returns 1. Best New MV is placed into \c tmp_mv.
140 * Rate estimation for this vector is placed to \c rate_mv
141 */
combined_motion_search(AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int mi_row,int mi_col,int_mv * tmp_mv,int * rate_mv,int64_t best_rd_sofar,int use_base_mv)142 static int combined_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
143 BLOCK_SIZE bsize, int mi_row, int mi_col,
144 int_mv *tmp_mv, int *rate_mv,
145 int64_t best_rd_sofar, int use_base_mv) {
146 MACROBLOCKD *xd = &x->e_mbd;
147 const AV1_COMMON *cm = &cpi->common;
148 const int num_planes = av1_num_planes(cm);
149 MB_MODE_INFO *mi = xd->mi[0];
150 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
151 int step_param = (cpi->sf.rt_sf.fullpel_search_step_param)
152 ? cpi->sf.rt_sf.fullpel_search_step_param
153 : cpi->mv_search_params.mv_step_param;
154 FULLPEL_MV start_mv;
155 const int ref = mi->ref_frame[0];
156 const MV ref_mv = av1_get_ref_mv(x, mi->ref_mv_idx).as_mv;
157 MV center_mv;
158 int dis;
159 int rv = 0;
160 int cost_list[5];
161 int search_subpel = 1;
162 const YV12_BUFFER_CONFIG *scaled_ref_frame =
163 av1_get_scaled_ref_frame(cpi, ref);
164
165 if (scaled_ref_frame) {
166 int i;
167 // Swap out the reference frame for a version that's been scaled to
168 // match the resolution of the current frame, allowing the existing
169 // motion search code to be used without additional modifications.
170 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
171 av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL,
172 num_planes);
173 }
174
175 start_mv = get_fullmv_from_mv(&ref_mv);
176
177 if (!use_base_mv)
178 center_mv = ref_mv;
179 else
180 center_mv = tmp_mv->as_mv;
181 const search_site_config *src_search_sites =
182 cpi->mv_search_params.search_site_cfg[SS_CFG_SRC];
183 FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
184 av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize, ¢er_mv,
185 src_search_sites,
186 /*fine_search_interval=*/0);
187
188 av1_full_pixel_search(start_mv, &full_ms_params, step_param,
189 cond_cost_list(cpi, cost_list), &tmp_mv->as_fullmv,
190 NULL);
191
192 // calculate the bit cost on motion vector
193 MV mvp_full = get_mv_from_fullmv(&tmp_mv->as_fullmv);
194
195 *rate_mv = av1_mv_bit_cost(&mvp_full, &ref_mv, x->mv_costs.nmv_joint_cost,
196 x->mv_costs.mv_cost_stack, MV_COST_WEIGHT);
197
198 // TODO(kyslov) Account for Rate Mode!
199 rv = !(RDCOST(x->rdmult, (*rate_mv), 0) > best_rd_sofar);
200
201 if (rv && search_subpel) {
202 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
203 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
204 cost_list);
205 MV subpel_start_mv = get_mv_from_fullmv(&tmp_mv->as_fullmv);
206 cpi->mv_search_params.find_fractional_mv_step(
207 xd, cm, &ms_params, subpel_start_mv, &tmp_mv->as_mv, &dis,
208 &x->pred_sse[ref], NULL);
209
210 *rate_mv =
211 av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->mv_costs.nmv_joint_cost,
212 x->mv_costs.mv_cost_stack, MV_COST_WEIGHT);
213 }
214
215 if (scaled_ref_frame) {
216 int i;
217 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
218 }
219 return rv;
220 }
221
222 /*!\brief Searches for the best New Motion Vector.
223 *
224 * \ingroup nonrd_mode_search
225 * \callgraph
226 * \callergraph
227 * Finds the best Motion Vector by doing Motion Estimation. Uses reduced
228 * complexity ME for non-LAST frames or calls \c combined_motion_search
229 * for LAST reference frame
230 * \param[in] cpi Top-level encoder structure
231 * \param[in] x Pointer to structure holding all the
232 * data for the current macroblock
233 * \param[in] frame_mv Array that holds MVs for all modes
234 * and ref frames
235 * \param[in] ref_frame Reference freme for which to find
236 * the best New MVs
237 * \param[in] gf_temporal_ref Flag, indicating temporal reference
238 * for GOLDEN frame
239 * \param[in] bsize Current block size
240 * \param[in] mi_row Row index in 4x4 units
241 * \param[in] mi_col Column index in 4x4 units
242 * \param[in] rate_mv Pointer to Rate of the best new MV
243 * \param[in] best_rdc Pointer to the RD Cost for the best
244 * mode found so far
245 *
246 * \return Returns -1 if the search was not done, otherwise returns 0.
247 * Best New MV is placed into \c frame_mv array, Rate estimation for this
248 * vector is placed to \c rate_mv
249 */
search_new_mv(AV1_COMP * cpi,MACROBLOCK * x,int_mv frame_mv[][REF_FRAMES],MV_REFERENCE_FRAME ref_frame,int gf_temporal_ref,BLOCK_SIZE bsize,int mi_row,int mi_col,int * rate_mv,RD_STATS * best_rdc)250 static int search_new_mv(AV1_COMP *cpi, MACROBLOCK *x,
251 int_mv frame_mv[][REF_FRAMES],
252 MV_REFERENCE_FRAME ref_frame, int gf_temporal_ref,
253 BLOCK_SIZE bsize, int mi_row, int mi_col, int *rate_mv,
254 RD_STATS *best_rdc) {
255 MACROBLOCKD *const xd = &x->e_mbd;
256 MB_MODE_INFO *const mi = xd->mi[0];
257 AV1_COMMON *cm = &cpi->common;
258 if (ref_frame > LAST_FRAME && cpi->oxcf.rc_cfg.mode == AOM_CBR &&
259 gf_temporal_ref) {
260 int tmp_sad;
261 int dis;
262 int cost_list[5] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX };
263
264 if (bsize < BLOCK_16X16) return -1;
265
266 tmp_sad = av1_int_pro_motion_estimation(
267 cpi, x, bsize, mi_row, mi_col,
268 &x->mbmi_ext.ref_mv_stack[ref_frame][0].this_mv.as_mv);
269
270 if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) return -1;
271
272 frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
273 int_mv best_mv = mi->mv[0];
274 best_mv.as_mv.row >>= 3;
275 best_mv.as_mv.col >>= 3;
276 MV ref_mv = av1_get_ref_mv(x, 0).as_mv;
277
278 *rate_mv = av1_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, &ref_mv,
279 x->mv_costs.nmv_joint_cost,
280 x->mv_costs.mv_cost_stack, MV_COST_WEIGHT);
281 frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
282 frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
283
284 SUBPEL_MOTION_SEARCH_PARAMS ms_params;
285 av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize, &ref_mv,
286 cost_list);
287 MV start_mv = get_mv_from_fullmv(&best_mv.as_fullmv);
288 cpi->mv_search_params.find_fractional_mv_step(
289 xd, cm, &ms_params, start_mv, &best_mv.as_mv, &dis,
290 &x->pred_sse[ref_frame], NULL);
291 frame_mv[NEWMV][ref_frame].as_int = best_mv.as_int;
292 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
293 &frame_mv[NEWMV][ref_frame], rate_mv,
294 best_rdc->rdcost, 0)) {
295 return -1;
296 }
297
298 return 0;
299 }
300
301 /*!\brief Finds predicted motion vectors for a block.
302 *
303 * \ingroup nonrd_mode_search
304 * \callgraph
305 * \callergraph
306 * Finds predicted motion vectors for a block from a certain reference frame.
307 * First, it fills reference MV stack, then picks the test from the stack and
308 * predicts the final MV for a block for each mode.
309 * \param[in] cpi Top-level encoder structure
310 * \param[in] x Pointer to structure holding all the
311 * data for the current macroblock
312 * \param[in] ref_frame Reference freme for which to find
313 * ref MVs
314 * \param[in] frame_mv Predicted MVs for a block
315 * \param[in] tile_data Pointer to struct holding adaptive
316 * data/contexts/models for the tile
317 * during encoding
318 * \param[in] yv12_mb Buffer to hold predicted block
319 * \param[in] bsize Current block size
320 * \param[in] force_skip_low_temp_var Flag indicating possible mode search
321 * prune for low temporal variace block
322 *
323 * \return Nothing is returned. Instead, predicted MVs are placed into
324 * \c frame_mv array
325 */
find_predictors(AV1_COMP * cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES],TileDataEnc * tile_data,struct buf_2d yv12_mb[8][MAX_MB_PLANE],BLOCK_SIZE bsize,int force_skip_low_temp_var)326 static INLINE void find_predictors(AV1_COMP *cpi, MACROBLOCK *x,
327 MV_REFERENCE_FRAME ref_frame,
328 int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES],
329 TileDataEnc *tile_data,
330 struct buf_2d yv12_mb[8][MAX_MB_PLANE],
331 BLOCK_SIZE bsize,
332 int force_skip_low_temp_var) {
333 AV1_COMMON *const cm = &cpi->common;
334 MACROBLOCKD *const xd = &x->e_mbd;
335 MB_MODE_INFO *const mbmi = xd->mi[0];
336 MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
337 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref_frame);
338 const int num_planes = av1_num_planes(cm);
339 (void)tile_data;
340
341 x->pred_mv_sad[ref_frame] = INT_MAX;
342 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
343 // TODO(kyslov) this needs various further optimizations. to be continued..
344 assert(yv12 != NULL);
345 if (yv12 != NULL) {
346 const struct scale_factors *const sf =
347 get_ref_scale_factors_const(cm, ref_frame);
348 av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
349 av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
350 xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
351 mbmi_ext->mode_context);
352 // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
353 // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
354 av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
355 av1_find_best_ref_mvs_from_stack(
356 cm->features.allow_high_precision_mv, mbmi_ext, ref_frame,
357 &frame_mv[NEARESTMV][ref_frame], &frame_mv[NEARMV][ref_frame], 0);
358 // Early exit for non-LAST frame if force_skip_low_temp_var is set.
359 if (!av1_is_scaled(sf) && bsize >= BLOCK_8X8 &&
360 !(force_skip_low_temp_var && ref_frame != LAST_FRAME)) {
361 av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
362 bsize);
363 }
364 }
365 av1_count_overlappable_neighbors(cm, xd);
366 mbmi->num_proj_ref = 1;
367 }
368
estimate_single_ref_frame_costs(const AV1_COMMON * cm,const MACROBLOCKD * xd,const ModeCosts * mode_costs,int segment_id,unsigned int * ref_costs_single)369 static void estimate_single_ref_frame_costs(const AV1_COMMON *cm,
370 const MACROBLOCKD *xd,
371 const ModeCosts *mode_costs,
372 int segment_id,
373 unsigned int *ref_costs_single) {
374 int seg_ref_active =
375 segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
376 if (seg_ref_active) {
377 memset(ref_costs_single, 0, REF_FRAMES * sizeof(*ref_costs_single));
378 } else {
379 int intra_inter_ctx = av1_get_intra_inter_context(xd);
380 ref_costs_single[INTRA_FRAME] =
381 mode_costs->intra_inter_cost[intra_inter_ctx][0];
382 unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
383 ref_costs_single[LAST_FRAME] = base_cost;
384 ref_costs_single[GOLDEN_FRAME] = base_cost;
385 ref_costs_single[ALTREF_FRAME] = base_cost;
386 // add cost for last, golden, altref
387 ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[0][0][0];
388 ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][0][1];
389 ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[0][1][0];
390 ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][0][1];
391 ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[0][2][0];
392 }
393 }
394
estimate_comp_ref_frame_costs(const AV1_COMMON * cm,const MACROBLOCKD * xd,const ModeCosts * mode_costs,int segment_id,unsigned int (* ref_costs_comp)[REF_FRAMES])395 static void estimate_comp_ref_frame_costs(
396 const AV1_COMMON *cm, const MACROBLOCKD *xd, const ModeCosts *mode_costs,
397 int segment_id, unsigned int (*ref_costs_comp)[REF_FRAMES]) {
398 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
399 for (int ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
400 memset(ref_costs_comp[ref_frame], 0,
401 REF_FRAMES * sizeof((*ref_costs_comp)[0]));
402 } else {
403 int intra_inter_ctx = av1_get_intra_inter_context(xd);
404 unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
405
406 if (cm->current_frame.reference_mode != SINGLE_REFERENCE) {
407 // Similar to single ref, determine cost of compound ref frames.
408 // cost_compound_refs = cost_first_ref + cost_second_ref
409 const int bwdref_comp_ctx_p = av1_get_pred_context_comp_bwdref_p(xd);
410 const int bwdref_comp_ctx_p1 = av1_get_pred_context_comp_bwdref_p1(xd);
411 const int ref_comp_ctx_p = av1_get_pred_context_comp_ref_p(xd);
412 const int ref_comp_ctx_p1 = av1_get_pred_context_comp_ref_p1(xd);
413 const int ref_comp_ctx_p2 = av1_get_pred_context_comp_ref_p2(xd);
414
415 const int comp_ref_type_ctx = av1_get_comp_reference_type_context(xd);
416 unsigned int ref_bicomp_costs[REF_FRAMES] = { 0 };
417
418 ref_bicomp_costs[LAST_FRAME] = ref_bicomp_costs[LAST2_FRAME] =
419 ref_bicomp_costs[LAST3_FRAME] = ref_bicomp_costs[GOLDEN_FRAME] =
420 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][1];
421 ref_bicomp_costs[BWDREF_FRAME] = ref_bicomp_costs[ALTREF2_FRAME] = 0;
422 ref_bicomp_costs[ALTREF_FRAME] = 0;
423
424 // cost of first ref frame
425 ref_bicomp_costs[LAST_FRAME] +=
426 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
427 ref_bicomp_costs[LAST2_FRAME] +=
428 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
429 ref_bicomp_costs[LAST3_FRAME] +=
430 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
431 ref_bicomp_costs[GOLDEN_FRAME] +=
432 mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
433
434 ref_bicomp_costs[LAST_FRAME] +=
435 mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][0];
436 ref_bicomp_costs[LAST2_FRAME] +=
437 mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][1];
438
439 ref_bicomp_costs[LAST3_FRAME] +=
440 mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][0];
441 ref_bicomp_costs[GOLDEN_FRAME] +=
442 mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][1];
443
444 // cost of second ref frame
445 ref_bicomp_costs[BWDREF_FRAME] +=
446 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
447 ref_bicomp_costs[ALTREF2_FRAME] +=
448 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
449 ref_bicomp_costs[ALTREF_FRAME] +=
450 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][1];
451
452 ref_bicomp_costs[BWDREF_FRAME] +=
453 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][0];
454 ref_bicomp_costs[ALTREF2_FRAME] +=
455 mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][1];
456
457 // cost: if one ref frame is forward ref, the other ref is backward ref
458 for (int ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
459 for (int ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1) {
460 ref_costs_comp[ref0][ref1] =
461 ref_bicomp_costs[ref0] + ref_bicomp_costs[ref1];
462 }
463 }
464
465 // cost: if both ref frames are the same side.
466 const int uni_comp_ref_ctx_p = av1_get_pred_context_uni_comp_ref_p(xd);
467 const int uni_comp_ref_ctx_p1 = av1_get_pred_context_uni_comp_ref_p1(xd);
468 const int uni_comp_ref_ctx_p2 = av1_get_pred_context_uni_comp_ref_p2(xd);
469 ref_costs_comp[LAST_FRAME][LAST2_FRAME] =
470 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
471 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
472 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][0];
473 ref_costs_comp[LAST_FRAME][LAST3_FRAME] =
474 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
475 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
476 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
477 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][0];
478 ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] =
479 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
480 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
481 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
482 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][1];
483 ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] =
484 base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
485 mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][1];
486 } else {
487 for (int ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
488 for (int ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1)
489 ref_costs_comp[ref0][ref1] = 512;
490 }
491 ref_costs_comp[LAST_FRAME][LAST2_FRAME] = 512;
492 ref_costs_comp[LAST_FRAME][LAST3_FRAME] = 512;
493 ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] = 512;
494 ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] = 512;
495 }
496 }
497 }
498
calculate_tx_size(const AV1_COMP * const cpi,BLOCK_SIZE bsize,MACROBLOCK * const x,unsigned int var,unsigned int sse)499 static TX_SIZE calculate_tx_size(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
500 MACROBLOCK *const x, unsigned int var,
501 unsigned int sse) {
502 MACROBLOCKD *const xd = &x->e_mbd;
503 TX_SIZE tx_size;
504 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
505 if (txfm_params->tx_mode_search_type == TX_MODE_SELECT) {
506 if (sse > (var << 2))
507 tx_size =
508 AOMMIN(max_txsize_lookup[bsize],
509 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
510 else
511 tx_size = TX_8X8;
512
513 if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ &&
514 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
515 tx_size = TX_8X8;
516 else if (tx_size > TX_16X16)
517 tx_size = TX_16X16;
518 } else {
519 tx_size =
520 AOMMIN(max_txsize_lookup[bsize],
521 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
522 }
523
524 if (txfm_params->tx_mode_search_type != ONLY_4X4 && bsize > BLOCK_32X32)
525 tx_size = TX_16X16;
526
527 return AOMMIN(tx_size, TX_16X16);
528 }
529
530 static const uint8_t b_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 1, 1, 1, 2,
531 2, 2, 3, 3, 3, 4,
532 4, 4, 5, 5 };
533 static const uint8_t b_height_log2_lookup[BLOCK_SIZES] = { 0, 1, 0, 1, 2, 1,
534 2, 3, 2, 3, 4, 3,
535 4, 5, 4, 5 };
536
block_variance(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int w,int h,unsigned int * sse,int * sum,int block_size,uint32_t * sse8x8,int * sum8x8,uint32_t * var8x8)537 static void block_variance(const uint8_t *src, int src_stride,
538 const uint8_t *ref, int ref_stride, int w, int h,
539 unsigned int *sse, int *sum, int block_size,
540 uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) {
541 int i, j, k = 0;
542
543 *sse = 0;
544 *sum = 0;
545
546 for (i = 0; i < h; i += block_size) {
547 for (j = 0; j < w; j += block_size) {
548 aom_get8x8var(src + src_stride * i + j, src_stride,
549 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
550 &sum8x8[k]);
551 *sse += sse8x8[k];
552 *sum += sum8x8[k];
553 var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
554 k++;
555 }
556 }
557 }
558
calculate_variance(int bw,int bh,TX_SIZE tx_size,unsigned int * sse_i,int * sum_i,unsigned int * var_o,unsigned int * sse_o,int * sum_o)559 static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
560 unsigned int *sse_i, int *sum_i,
561 unsigned int *var_o, unsigned int *sse_o,
562 int *sum_o) {
563 const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
564 const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
565 const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
566 int i, j, k = 0;
567
568 for (i = 0; i < nh; i += 2) {
569 for (j = 0; j < nw; j += 2) {
570 sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
571 sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
572 sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
573 sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
574 var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
575 (b_width_log2_lookup[unit_size] +
576 b_height_log2_lookup[unit_size] + 6));
577 k++;
578 }
579 }
580 }
581
582 // Adjust the ac_thr according to speed, width, height and normalized sum
ac_thr_factor(const int speed,const int width,const int height,const int norm_sum)583 static int ac_thr_factor(const int speed, const int width, const int height,
584 const int norm_sum) {
585 if (speed >= 8 && norm_sum < 5) {
586 if (width <= 640 && height <= 480)
587 return 4;
588 else
589 return 2;
590 }
591 return 1;
592 }
593
model_skip_for_sb_y_large(AV1_COMP * cpi,BLOCK_SIZE bsize,int mi_row,int mi_col,MACROBLOCK * x,MACROBLOCKD * xd,RD_STATS * rd_stats,int * early_term,int calculate_rd)594 static void model_skip_for_sb_y_large(AV1_COMP *cpi, BLOCK_SIZE bsize,
595 int mi_row, int mi_col, MACROBLOCK *x,
596 MACROBLOCKD *xd, RD_STATS *rd_stats,
597 int *early_term, int calculate_rd) {
598 // Note our transform coeffs are 8 times an orthogonal transform.
599 // Hence quantizer step is also 8 times. To get effective quantizer
600 // we need to divide by 8 before sending to modeling function.
601 unsigned int sse;
602 struct macroblock_plane *const p = &x->plane[0];
603 struct macroblockd_plane *const pd = &xd->plane[0];
604 const uint32_t dc_quant = p->dequant_QTX[0];
605 const uint32_t ac_quant = p->dequant_QTX[1];
606 const int64_t dc_thr = dc_quant * dc_quant >> 6;
607 int64_t ac_thr = ac_quant * ac_quant >> 6;
608 unsigned int var;
609 int sum;
610
611 const int bw = b_width_log2_lookup[bsize];
612 const int bh = b_height_log2_lookup[bsize];
613 const int num8x8 = 1 << (bw + bh - 2);
614 unsigned int sse8x8[256] = { 0 };
615 int sum8x8[256] = { 0 };
616 unsigned int var8x8[256] = { 0 };
617 TX_SIZE tx_size;
618 int k;
619 // Calculate variance for whole partition, and also save 8x8 blocks' variance
620 // to be used in following transform skipping test.
621 block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
622 4 << bw, 4 << bh, &sse, &sum, 8, sse8x8, sum8x8, var8x8);
623 var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4));
624
625 rd_stats->sse = sse;
626
627 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
628 cpi->common.height, abs(sum) >> (bw + bh));
629
630 tx_size = calculate_tx_size(cpi, bsize, x, var, sse);
631 // The code below for setting skip flag assumes tranform size of at least 8x8,
632 // so force this lower limit on transform.
633 if (tx_size < TX_8X8) tx_size = TX_8X8;
634 xd->mi[0]->tx_size = tx_size;
635
636 // Evaluate if the partition block is a skippable block in Y plane.
637 {
638 unsigned int sse16x16[64] = { 0 };
639 int sum16x16[64] = { 0 };
640 unsigned int var16x16[64] = { 0 };
641 const int num16x16 = num8x8 >> 2;
642
643 unsigned int sse32x32[16] = { 0 };
644 int sum32x32[16] = { 0 };
645 unsigned int var32x32[16] = { 0 };
646 const int num32x32 = num8x8 >> 4;
647
648 int ac_test = 1;
649 int dc_test = 1;
650 const int num = (tx_size == TX_8X8)
651 ? num8x8
652 : ((tx_size == TX_16X16) ? num16x16 : num32x32);
653 const unsigned int *sse_tx =
654 (tx_size == TX_8X8) ? sse8x8
655 : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
656 const unsigned int *var_tx =
657 (tx_size == TX_8X8) ? var8x8
658 : ((tx_size == TX_16X16) ? var16x16 : var32x32);
659
660 // Calculate variance if tx_size > TX_8X8
661 if (tx_size >= TX_16X16)
662 calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
663 sum16x16);
664 if (tx_size == TX_32X32)
665 calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
666 sse32x32, sum32x32);
667
668 // Skipping test
669 *early_term = 0;
670 for (k = 0; k < num; k++)
671 // Check if all ac coefficients can be quantized to zero.
672 if (!(var_tx[k] < ac_thr || var == 0)) {
673 ac_test = 0;
674 break;
675 }
676
677 for (k = 0; k < num; k++)
678 // Check if dc coefficient can be quantized to zero.
679 if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
680 dc_test = 0;
681 break;
682 }
683
684 if (ac_test && dc_test) {
685 int skip_uv[2] = { 0 };
686 unsigned int var_uv[2];
687 unsigned int sse_uv[2];
688 AV1_COMMON *const cm = &cpi->common;
689 // Transform skipping test in UV planes.
690 for (int i = 1; i <= 2; i++) {
691 int j = i - 1;
692 skip_uv[j] = 1;
693 if (x->color_sensitivity[j]) {
694 skip_uv[j] = 0;
695 struct macroblock_plane *const puv = &x->plane[i];
696 struct macroblockd_plane *const puvd = &xd->plane[i];
697 const BLOCK_SIZE uv_bsize = get_plane_block_size(
698 bsize, puvd->subsampling_x, puvd->subsampling_y);
699 // Adjust these thresholds for UV.
700 const int64_t uv_dc_thr =
701 (puv->dequant_QTX[0] * puv->dequant_QTX[0]) >> 3;
702 const int64_t uv_ac_thr =
703 (puv->dequant_QTX[1] * puv->dequant_QTX[1]) >> 3;
704 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, i,
705 i);
706 var_uv[j] = cpi->fn_ptr[uv_bsize].vf(puv->src.buf, puv->src.stride,
707 puvd->dst.buf, puvd->dst.stride,
708 &sse_uv[j]);
709 if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
710 (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
711 skip_uv[j] = 1;
712 else
713 break;
714 }
715 }
716 if (skip_uv[0] & skip_uv[1]) {
717 *early_term = 1;
718 }
719 }
720 }
721 if (calculate_rd) {
722 if (!*early_term) {
723 const int bwide = block_size_wide[bsize];
724 const int bhigh = block_size_high[bsize];
725
726 model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, sse, bwide * bhigh,
727 &rd_stats->rate, &rd_stats->dist);
728 }
729
730 if (*early_term) {
731 rd_stats->rate = 0;
732 rd_stats->dist = sse << 4;
733 }
734 }
735 }
736
model_rd_for_sb_y(const AV1_COMP * const cpi,BLOCK_SIZE bsize,MACROBLOCK * x,MACROBLOCKD * xd,RD_STATS * rd_stats,int calculate_rd)737 static void model_rd_for_sb_y(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
738 MACROBLOCK *x, MACROBLOCKD *xd,
739 RD_STATS *rd_stats, int calculate_rd) {
740 // Note our transform coeffs are 8 times an orthogonal transform.
741 // Hence quantizer step is also 8 times. To get effective quantizer
742 // we need to divide by 8 before sending to modeling function.
743 const int ref = xd->mi[0]->ref_frame[0];
744
745 assert(bsize < BLOCK_SIZES_ALL);
746
747 struct macroblock_plane *const p = &x->plane[0];
748 struct macroblockd_plane *const pd = &xd->plane[0];
749 unsigned int sse;
750 int rate;
751 int64_t dist;
752
753 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
754 pd->dst.buf, pd->dst.stride, &sse);
755 xd->mi[0]->tx_size = calculate_tx_size(cpi, bsize, x, var, sse);
756
757 if (calculate_rd) {
758 const int bwide = block_size_wide[bsize];
759 const int bhigh = block_size_high[bsize];
760 model_rd_with_curvfit(cpi, x, bsize, AOM_PLANE_Y, sse, bwide * bhigh, &rate,
761 &dist);
762 } else {
763 rate = INT_MAX; // this will be overwritten later with block_yrd
764 dist = INT_MAX;
765 }
766 rd_stats->sse = sse;
767 x->pred_sse[ref] = (unsigned int)AOMMIN(sse, UINT_MAX);
768
769 assert(rate >= 0);
770
771 rd_stats->skip_txfm = (rate == 0);
772 rate = AOMMIN(rate, INT_MAX);
773 rd_stats->rate = rate;
774 rd_stats->dist = dist;
775 }
776
777 /*!\brief Calculates RD Cost using Hadamard transform.
778 *
779 * \ingroup nonrd_mode_search
780 * \callgraph
781 * \callergraph
782 * Calculates RD Cost using Hadamard transform. For low bit depth this function
783 * uses low-precision set of functions (16-bit) and 32 bit for high bit depth
784 * \param[in] cpi Top-level encoder structure
785 * \param[in] x Pointer to structure holding all the data for
786 the current macroblock
787 * \param[in] mi_row Row index in 4x4 units
788 * \param[in] mi_col Column index in 4x4 units
789 * \param[in] this_rdc Pointer to calculated RD Cost
790 * \param[in] skippable Pointer to a flag indicating possible tx skip
791 * \param[in] bsize Current block size
792 * \param[in] tx_size Transform size
793 *
794 * \return Nothing is returned. Instead, calculated RD cost is placed to
795 * \c this_rdc. \c skippable flag is set if there is no non-zero quantized
796 * coefficients for Hadamard transform
797 */
block_yrd(AV1_COMP * cpi,MACROBLOCK * x,int mi_row,int mi_col,RD_STATS * this_rdc,int * skippable,BLOCK_SIZE bsize,TX_SIZE tx_size)798 static void block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col,
799 RD_STATS *this_rdc, int *skippable, BLOCK_SIZE bsize,
800 TX_SIZE tx_size) {
801 MACROBLOCKD *xd = &x->e_mbd;
802 const struct macroblockd_plane *pd = &xd->plane[0];
803 struct macroblock_plane *const p = &x->plane[0];
804 const int num_4x4_w = mi_size_wide[bsize];
805 const int num_4x4_h = mi_size_high[bsize];
806 const int step = 1 << (tx_size << 1);
807 const int block_step = (1 << tx_size);
808 int block = 0;
809 const int max_blocks_wide =
810 num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
811 const int max_blocks_high =
812 num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
813 int eob_cost = 0;
814 const int bw = 4 * num_4x4_w;
815 const int bh = 4 * num_4x4_h;
816
817 (void)mi_row;
818 (void)mi_col;
819 (void)cpi;
820
821 #if CONFIG_AV1_HIGHBITDEPTH
822 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
823 aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
824 p->src.stride, pd->dst.buf, pd->dst.stride,
825 x->e_mbd.bd);
826 } else {
827 aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
828 pd->dst.buf, pd->dst.stride);
829 }
830 #else
831 aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
832 pd->dst.buf, pd->dst.stride);
833 #endif
834
835 *skippable = 1;
836 // Keep track of the row and column of the blocks we use so that we know
837 // if we are in the unrestricted motion border.
838 for (int r = 0; r < max_blocks_high; r += block_step) {
839 for (int c = 0; c < num_4x4_w; c += block_step) {
840 if (c < max_blocks_wide) {
841 const SCAN_ORDER *const scan_order = &av1_default_scan_orders[tx_size];
842 const int block_offset = BLOCK_OFFSET(block);
843 #if CONFIG_AV1_HIGHBITDEPTH
844 tran_low_t *const coeff = p->coeff + block_offset;
845 tran_low_t *const qcoeff = p->qcoeff + block_offset;
846 tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
847 #else
848 int16_t *const low_coeff = (int16_t *)p->coeff + block_offset;
849 int16_t *const low_qcoeff = (int16_t *)p->qcoeff + block_offset;
850 int16_t *const low_dqcoeff = (int16_t *)p->dqcoeff + block_offset;
851 #endif
852 uint16_t *const eob = &p->eobs[block];
853 const int diff_stride = bw;
854 const int16_t *src_diff;
855 src_diff = &p->src_diff[(r * diff_stride + c) << 2];
856
857 switch (tx_size) {
858 case TX_64X64:
859 assert(0); // Not implemented
860 break;
861 case TX_32X32:
862 assert(0); // Not used
863 break;
864 #if CONFIG_AV1_HIGHBITDEPTH
865 case TX_16X16:
866 aom_hadamard_16x16(src_diff, diff_stride, coeff);
867 av1_quantize_fp(coeff, 16 * 16, p->zbin_QTX, p->round_fp_QTX,
868 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
869 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
870 scan_order->iscan);
871 break;
872 case TX_8X8:
873 aom_hadamard_8x8(src_diff, diff_stride, coeff);
874 av1_quantize_fp(coeff, 8 * 8, p->zbin_QTX, p->round_fp_QTX,
875 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
876 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
877 scan_order->iscan);
878 break;
879 default:
880 assert(tx_size == TX_4X4);
881 aom_fdct4x4(src_diff, coeff, diff_stride);
882 av1_quantize_fp(coeff, 4 * 4, p->zbin_QTX, p->round_fp_QTX,
883 p->quant_fp_QTX, p->quant_shift_QTX, qcoeff,
884 dqcoeff, p->dequant_QTX, eob, scan_order->scan,
885 scan_order->iscan);
886 break;
887 #else
888 case TX_16X16:
889 aom_hadamard_lp_16x16(src_diff, diff_stride, low_coeff);
890 av1_quantize_lp(low_coeff, 16 * 16, p->round_fp_QTX,
891 p->quant_fp_QTX, low_qcoeff, low_dqcoeff,
892 p->dequant_QTX, eob, scan_order->scan);
893 break;
894 case TX_8X8:
895 aom_hadamard_lp_8x8(src_diff, diff_stride, low_coeff);
896 av1_quantize_lp(low_coeff, 8 * 8, p->round_fp_QTX, p->quant_fp_QTX,
897 low_qcoeff, low_dqcoeff, p->dequant_QTX, eob,
898 scan_order->scan);
899 break;
900 default:
901 assert(tx_size == TX_4X4);
902 aom_fdct4x4_lp(src_diff, low_coeff, diff_stride);
903 av1_quantize_lp(low_coeff, 4 * 4, p->round_fp_QTX, p->quant_fp_QTX,
904 low_qcoeff, low_dqcoeff, p->dequant_QTX, eob,
905 scan_order->scan);
906 break;
907 #endif
908 }
909 assert(*eob <= 1024);
910 *skippable &= (*eob == 0);
911 eob_cost += 1;
912 }
913 block += step;
914 }
915 }
916 this_rdc->skip_txfm = *skippable;
917 this_rdc->rate = 0;
918 if (this_rdc->sse < INT64_MAX) {
919 this_rdc->sse = (this_rdc->sse << 6) >> 2;
920 if (*skippable) {
921 this_rdc->dist = this_rdc->sse;
922 return;
923 }
924 }
925
926 block = 0;
927 this_rdc->dist = 0;
928 for (int r = 0; r < max_blocks_high; r += block_step) {
929 for (int c = 0; c < num_4x4_w; c += block_step) {
930 if (c < max_blocks_wide) {
931 const int block_offset = BLOCK_OFFSET(block);
932 uint16_t *const eob = &p->eobs[block];
933 #if CONFIG_AV1_HIGHBITDEPTH
934 int64_t dummy;
935 tran_low_t *const coeff = p->coeff + block_offset;
936 tran_low_t *const qcoeff = p->qcoeff + block_offset;
937 tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
938
939 if (*eob == 1)
940 this_rdc->rate += (int)abs(qcoeff[0]);
941 else if (*eob > 1)
942 this_rdc->rate += aom_satd(qcoeff, step << 4);
943
944 this_rdc->dist +=
945 av1_block_error(coeff, dqcoeff, step << 4, &dummy) >> 2;
946 #else
947 int16_t *const low_coeff = (int16_t *)p->coeff + block_offset;
948 int16_t *const low_qcoeff = (int16_t *)p->qcoeff + block_offset;
949 int16_t *const low_dqcoeff = (int16_t *)p->dqcoeff + block_offset;
950
951 if (*eob == 1)
952 this_rdc->rate += (int)abs(low_qcoeff[0]);
953 else if (*eob > 1)
954 this_rdc->rate += aom_satd_lp(low_qcoeff, step << 4);
955
956 this_rdc->dist +=
957 av1_block_error_lp(low_coeff, low_dqcoeff, step << 4) >> 2;
958 #endif
959 }
960 block += step;
961 }
962 }
963
964 // If skippable is set, rate gets clobbered later.
965 this_rdc->rate <<= (2 + AV1_PROB_COST_SHIFT);
966 this_rdc->rate += (eob_cost << AV1_PROB_COST_SHIFT);
967 }
968
init_mbmi(MB_MODE_INFO * mbmi,PREDICTION_MODE pred_mode,MV_REFERENCE_FRAME ref_frame0,MV_REFERENCE_FRAME ref_frame1,const AV1_COMMON * cm)969 static INLINE void init_mbmi(MB_MODE_INFO *mbmi, PREDICTION_MODE pred_mode,
970 MV_REFERENCE_FRAME ref_frame0,
971 MV_REFERENCE_FRAME ref_frame1,
972 const AV1_COMMON *cm) {
973 PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
974 mbmi->ref_mv_idx = 0;
975 mbmi->mode = pred_mode;
976 mbmi->uv_mode = UV_DC_PRED;
977 mbmi->ref_frame[0] = ref_frame0;
978 mbmi->ref_frame[1] = ref_frame1;
979 pmi->palette_size[0] = 0;
980 pmi->palette_size[1] = 0;
981 mbmi->filter_intra_mode_info.use_filter_intra = 0;
982 mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
983 mbmi->motion_mode = SIMPLE_TRANSLATION;
984 mbmi->num_proj_ref = 1;
985 mbmi->interintra_mode = 0;
986 set_default_interp_filters(mbmi, cm->features.interp_filter);
987 }
988
989 #if CONFIG_INTERNAL_STATS
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index)990 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
991 int mode_index) {
992 #else
993 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
994 #endif // CONFIG_INTERNAL_STATS
995 MACROBLOCKD *const xd = &x->e_mbd;
996 TxfmSearchInfo *txfm_info = &x->txfm_search_info;
997
998 // Take a snapshot of the coding context so it can be
999 // restored if we decide to encode this way
1000 ctx->rd_stats.skip_txfm = txfm_info->skip_txfm;
1001
1002 memset(ctx->blk_skip, 0, sizeof(ctx->blk_skip[0]) * ctx->num_4x4_blk);
1003 memset(ctx->tx_type_map, DCT_DCT,
1004 sizeof(ctx->tx_type_map[0]) * ctx->num_4x4_blk);
1005 ctx->skippable = txfm_info->skip_txfm;
1006 #if CONFIG_INTERNAL_STATS
1007 ctx->best_mode_index = mode_index;
1008 #endif // CONFIG_INTERNAL_STATS
1009 ctx->mic = *xd->mi[0];
1010 ctx->skippable = txfm_info->skip_txfm;
1011 av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, &x->mbmi_ext,
1012 av1_ref_frame_type(xd->mi[0]->ref_frame));
1013 ctx->comp_pred_diff = 0;
1014 ctx->hybrid_pred_diff = 0;
1015 ctx->single_pred_diff = 0;
1016 }
1017
1018 static int get_pred_buffer(PRED_BUFFER *p, int len) {
1019 for (int i = 0; i < len; i++) {
1020 if (!p[i].in_use) {
1021 p[i].in_use = 1;
1022 return i;
1023 }
1024 }
1025 return -1;
1026 }
1027
1028 static void free_pred_buffer(PRED_BUFFER *p) {
1029 if (p != NULL) p->in_use = 0;
1030 }
1031
1032 static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode,
1033 int16_t mode_context) {
1034 if (is_inter_compound_mode(mode)) {
1035 return mode_costs
1036 ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
1037 }
1038
1039 int mode_cost = 0;
1040 int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
1041
1042 assert(is_inter_mode(mode));
1043
1044 if (mode == NEWMV) {
1045 mode_cost = mode_costs->newmv_mode_cost[mode_ctx][0];
1046 return mode_cost;
1047 } else {
1048 mode_cost = mode_costs->newmv_mode_cost[mode_ctx][1];
1049 mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
1050
1051 if (mode == GLOBALMV) {
1052 mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][0];
1053 return mode_cost;
1054 } else {
1055 mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][1];
1056 mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
1057 mode_cost += mode_costs->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
1058 return mode_cost;
1059 }
1060 }
1061 }
1062
1063 static void newmv_diff_bias(MACROBLOCKD *xd, PREDICTION_MODE this_mode,
1064 RD_STATS *this_rdc, BLOCK_SIZE bsize, int mv_row,
1065 int mv_col, int speed, uint32_t spatial_variance,
1066 int content_state_sb) {
1067 // Bias against MVs associated with NEWMV mode that are very different from
1068 // top/left neighbors.
1069 if (this_mode == NEWMV) {
1070 int al_mv_average_row;
1071 int al_mv_average_col;
1072 int left_row, left_col;
1073 int row_diff, col_diff;
1074 int above_mv_valid = 0;
1075 int left_mv_valid = 0;
1076 int above_row = 0;
1077 int above_col = 0;
1078 if (bsize >= BLOCK_64X64 && content_state_sb != kHighSad &&
1079 spatial_variance < 300 &&
1080 (mv_row > 16 || mv_row < -16 || mv_col > 16 || mv_col < -16)) {
1081 this_rdc->rdcost = this_rdc->rdcost << 2;
1082 return;
1083 }
1084 if (xd->above_mbmi) {
1085 above_mv_valid = xd->above_mbmi->mv[0].as_int != INVALID_MV;
1086 above_row = xd->above_mbmi->mv[0].as_mv.row;
1087 above_col = xd->above_mbmi->mv[0].as_mv.col;
1088 }
1089 if (xd->left_mbmi) {
1090 left_mv_valid = xd->left_mbmi->mv[0].as_int != INVALID_MV;
1091 left_row = xd->left_mbmi->mv[0].as_mv.row;
1092 left_col = xd->left_mbmi->mv[0].as_mv.col;
1093 }
1094 if (above_mv_valid && left_mv_valid) {
1095 al_mv_average_row = (above_row + left_row + 1) >> 1;
1096 al_mv_average_col = (above_col + left_col + 1) >> 1;
1097 } else if (above_mv_valid) {
1098 al_mv_average_row = above_row;
1099 al_mv_average_col = above_col;
1100 } else if (left_mv_valid) {
1101 al_mv_average_row = left_row;
1102 al_mv_average_col = left_col;
1103 } else {
1104 al_mv_average_row = al_mv_average_col = 0;
1105 }
1106 row_diff = al_mv_average_row - mv_row;
1107 col_diff = al_mv_average_col - mv_col;
1108 if (row_diff > 80 || row_diff < -80 || col_diff > 80 || col_diff < -80) {
1109 if (bsize >= BLOCK_32X32)
1110 this_rdc->rdcost = this_rdc->rdcost << 1;
1111 else
1112 this_rdc->rdcost = 5 * this_rdc->rdcost >> 2;
1113 }
1114 } else {
1115 // Bias for speed >= 8 for low spatial variance.
1116 if (speed >= 8 && spatial_variance < 150 &&
1117 (mv_row > 64 || mv_row < -64 || mv_col > 64 || mv_col < -64))
1118 this_rdc->rdcost = 5 * this_rdc->rdcost >> 2;
1119 }
1120 }
1121
1122 static void model_rd_for_sb_uv(AV1_COMP *cpi, BLOCK_SIZE plane_bsize,
1123 MACROBLOCK *x, MACROBLOCKD *xd,
1124 RD_STATS *this_rdc, int64_t *sse_y,
1125 int start_plane, int stop_plane) {
1126 // Note our transform coeffs are 8 times an orthogonal transform.
1127 // Hence quantizer step is also 8 times. To get effective quantizer
1128 // we need to divide by 8 before sending to modeling function.
1129 unsigned int sse;
1130 int rate;
1131 int64_t dist;
1132 int i;
1133 int64_t tot_sse = *sse_y;
1134
1135 this_rdc->rate = 0;
1136 this_rdc->dist = 0;
1137 this_rdc->skip_txfm = 0;
1138
1139 for (i = start_plane; i <= stop_plane; ++i) {
1140 struct macroblock_plane *const p = &x->plane[i];
1141 struct macroblockd_plane *const pd = &xd->plane[i];
1142 const uint32_t dc_quant = p->dequant_QTX[0];
1143 const uint32_t ac_quant = p->dequant_QTX[1];
1144 const BLOCK_SIZE bs = plane_bsize;
1145 unsigned int var;
1146 if (!x->color_sensitivity[i - 1]) continue;
1147
1148 var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
1149 pd->dst.stride, &sse);
1150 assert(sse >= var);
1151 tot_sse += sse;
1152
1153 av1_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
1154 dc_quant >> 3, &rate, &dist);
1155
1156 this_rdc->rate += rate >> 1;
1157 this_rdc->dist += dist << 3;
1158
1159 av1_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
1160 &rate, &dist);
1161
1162 this_rdc->rate += rate;
1163 this_rdc->dist += dist << 4;
1164 }
1165
1166 if (this_rdc->rate == 0) {
1167 this_rdc->skip_txfm = 1;
1168 }
1169
1170 if (RDCOST(x->rdmult, this_rdc->rate, this_rdc->dist) >=
1171 RDCOST(x->rdmult, 0, tot_sse << 4)) {
1172 this_rdc->rate = 0;
1173 this_rdc->dist = tot_sse << 4;
1174 this_rdc->skip_txfm = 1;
1175 }
1176
1177 *sse_y = tot_sse;
1178 }
1179
1180 /*!\cond */
1181 struct estimate_block_intra_args {
1182 AV1_COMP *cpi;
1183 MACROBLOCK *x;
1184 PREDICTION_MODE mode;
1185 int skippable;
1186 RD_STATS *rdc;
1187 };
1188 /*!\endcond */
1189
1190 /*!\brief Estimation of RD cost of an intra mode for Non-RD optimized case.
1191 *
1192 * \ingroup nonrd_mode_search
1193 * \callgraph
1194 * \callergraph
1195 * Calculates RD Cost for an intra mode for a single TX block using Hadamard
1196 * transform.
1197 * \param[in] plane Color plane
1198 * \param[in] block Index of a TX block in a prediction block
1199 * \param[in] row Row of a current TX block
1200 * \param[in] col Column of a current TX block
1201 * \param[in] plane_bsize Block size of a current prediction block
1202 * \param[in] tx_size Transform size
1203 * \param[in] arg Pointer to a structure that holds paramaters
1204 * for intra mode search
1205 *
1206 * \return Nothing is returned. Instead, best mode and RD Cost of the best mode
1207 * are set in \c args->rdc and \c args->mode
1208 */
1209 static void estimate_block_intra(int plane, int block, int row, int col,
1210 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1211 void *arg) {
1212 struct estimate_block_intra_args *const args = arg;
1213 AV1_COMP *const cpi = args->cpi;
1214 AV1_COMMON *const cm = &cpi->common;
1215 MACROBLOCK *const x = args->x;
1216 MACROBLOCKD *const xd = &x->e_mbd;
1217 struct macroblock_plane *const p = &x->plane[plane];
1218 struct macroblockd_plane *const pd = &xd->plane[plane];
1219 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
1220 uint8_t *const src_buf_base = p->src.buf;
1221 uint8_t *const dst_buf_base = pd->dst.buf;
1222 const int64_t src_stride = p->src.stride;
1223 const int64_t dst_stride = pd->dst.stride;
1224 RD_STATS this_rdc;
1225
1226 (void)block;
1227
1228 p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
1229 pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
1230
1231 av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size);
1232 av1_invalid_rd_stats(&this_rdc);
1233
1234 if (plane == 0) {
1235 block_yrd(cpi, x, 0, 0, &this_rdc, &args->skippable, bsize_tx,
1236 AOMMIN(tx_size, TX_16X16));
1237 } else {
1238 int64_t sse = 0;
1239 model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &sse, plane, plane);
1240 }
1241
1242 p->src.buf = src_buf_base;
1243 pd->dst.buf = dst_buf_base;
1244 args->rdc->rate += this_rdc.rate;
1245 args->rdc->dist += this_rdc.dist;
1246 }
1247
1248 static INLINE void update_thresh_freq_fact(AV1_COMP *cpi, MACROBLOCK *x,
1249 BLOCK_SIZE bsize,
1250 MV_REFERENCE_FRAME ref_frame,
1251 THR_MODES best_mode_idx,
1252 PREDICTION_MODE mode) {
1253 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1254 int *freq_fact = &x->thresh_freq_fact[bsize][thr_mode_idx];
1255 if (thr_mode_idx == best_mode_idx) {
1256 *freq_fact -= (*freq_fact >> 4);
1257 } else {
1258 *freq_fact =
1259 AOMMIN(*freq_fact + RD_THRESH_INC,
1260 cpi->sf.inter_sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1261 }
1262 }
1263
1264 static INLINE int get_force_skip_low_temp_var_small_sb(uint8_t *variance_low,
1265 int mi_row, int mi_col,
1266 BLOCK_SIZE bsize) {
1267 // Relative indices of MB inside the superblock.
1268 const int mi_x = mi_row & 0xF;
1269 const int mi_y = mi_col & 0xF;
1270 // Relative indices of 16x16 block inside the superblock.
1271 const int i = mi_x >> 2;
1272 const int j = mi_y >> 2;
1273 int force_skip_low_temp_var = 0;
1274 // Set force_skip_low_temp_var based on the block size and block offset.
1275 switch (bsize) {
1276 case BLOCK_64X64: force_skip_low_temp_var = variance_low[0]; break;
1277 case BLOCK_64X32:
1278 if (!mi_y && !mi_x) {
1279 force_skip_low_temp_var = variance_low[1];
1280 } else if (!mi_y && mi_x) {
1281 force_skip_low_temp_var = variance_low[2];
1282 }
1283 break;
1284 case BLOCK_32X64:
1285 if (!mi_y && !mi_x) {
1286 force_skip_low_temp_var = variance_low[3];
1287 } else if (mi_y && !mi_x) {
1288 force_skip_low_temp_var = variance_low[4];
1289 }
1290 break;
1291 case BLOCK_32X32:
1292 if (!mi_y && !mi_x) {
1293 force_skip_low_temp_var = variance_low[5];
1294 } else if (mi_y && !mi_x) {
1295 force_skip_low_temp_var = variance_low[6];
1296 } else if (!mi_y && mi_x) {
1297 force_skip_low_temp_var = variance_low[7];
1298 } else if (mi_y && mi_x) {
1299 force_skip_low_temp_var = variance_low[8];
1300 }
1301 break;
1302 case BLOCK_32X16:
1303 case BLOCK_16X32:
1304 case BLOCK_16X16:
1305 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]];
1306 break;
1307 default: break;
1308 }
1309
1310 return force_skip_low_temp_var;
1311 }
1312
1313 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
1314 int mi_col, BLOCK_SIZE bsize) {
1315 int force_skip_low_temp_var = 0;
1316 int x, y;
1317 x = (mi_col & 0x1F) >> 4;
1318 // y = (mi_row & 0x1F) >> 4;
1319 // const int idx64 = (y << 1) + x;
1320 y = (mi_row & 0x17) >> 3;
1321 const int idx64 = y + x;
1322
1323 x = (mi_col & 0xF) >> 3;
1324 // y = (mi_row & 0xF) >> 3;
1325 // const int idx32 = (y << 1) + x;
1326 y = (mi_row & 0xB) >> 2;
1327 const int idx32 = y + x;
1328
1329 x = (mi_col & 0x7) >> 2;
1330 // y = (mi_row & 0x7) >> 2;
1331 // const int idx16 = (y << 1) + x;
1332 y = (mi_row & 0x5) >> 1;
1333 const int idx16 = y + x;
1334 // Set force_skip_low_temp_var based on the block size and block offset.
1335 switch (bsize) {
1336 case BLOCK_128X128: force_skip_low_temp_var = variance_low[0]; break;
1337 case BLOCK_128X64:
1338 assert((mi_col & 0x1F) == 0);
1339 force_skip_low_temp_var = variance_low[1 + ((mi_row & 0x1F) != 0)];
1340 break;
1341 case BLOCK_64X128:
1342 assert((mi_row & 0x1F) == 0);
1343 force_skip_low_temp_var = variance_low[3 + ((mi_col & 0x1F) != 0)];
1344 break;
1345 case BLOCK_64X64:
1346 // Location of this 64x64 block inside the 128x128 superblock
1347 force_skip_low_temp_var = variance_low[5 + idx64];
1348 break;
1349 case BLOCK_64X32:
1350 x = (mi_col & 0x1F) >> 4;
1351 y = (mi_row & 0x1F) >> 3;
1352 /*
1353 .---------------.---------------.
1354 | x=0,y=0,idx=0 | x=0,y=0,idx=2 |
1355 :---------------+---------------:
1356 | x=0,y=1,idx=1 | x=1,y=1,idx=3 |
1357 :---------------+---------------:
1358 | x=0,y=2,idx=4 | x=1,y=2,idx=6 |
1359 :---------------+---------------:
1360 | x=0,y=3,idx=5 | x=1,y=3,idx=7 |
1361 '---------------'---------------'
1362 */
1363 const int idx64x32 = (x << 1) + (y % 2) + ((y >> 1) << 2);
1364 force_skip_low_temp_var = variance_low[9 + idx64x32];
1365 break;
1366 case BLOCK_32X64:
1367 x = (mi_col & 0x1F) >> 3;
1368 y = (mi_row & 0x1F) >> 4;
1369 const int idx32x64 = (y << 2) + x;
1370 force_skip_low_temp_var = variance_low[17 + idx32x64];
1371 break;
1372 case BLOCK_32X32:
1373 force_skip_low_temp_var = variance_low[25 + (idx64 << 2) + idx32];
1374 break;
1375 case BLOCK_32X16:
1376 case BLOCK_16X32:
1377 case BLOCK_16X16:
1378 force_skip_low_temp_var =
1379 variance_low[41 + (idx64 << 4) + (idx32 << 2) + idx16];
1380 break;
1381 default: break;
1382 }
1383 return force_skip_low_temp_var;
1384 }
1385
1386 #define FILTER_SEARCH_SIZE 2
1387 /*!\brief Searches for the best intrpolation filter
1388 *
1389 * \ingroup nonrd_mode_search
1390 * \callgraph
1391 * \callergraph
1392 * Iterates through subset of possible interpolation filters (currently
1393 * only EIGHTTAP_REGULAR and EIGTHTAP_SMOOTH in both directions) and selects
1394 * the one that gives lowest RD cost. RD cost is calculated using curvfit model
1395 *
1396 * \param[in] cpi Top-level encoder structure
1397 * \param[in] x Pointer to structure holding all the
1398 * data for the current macroblock
1399 * \param[in] this_rdc Pointer to calculated RD Cost
1400 * \param[in] mi_row Row index in 4x4 units
1401 * \param[in] mi_col Column index in 4x4 units
1402 * \param[in] tmp Pointer to a temporary buffer for
1403 * prediction re-use
1404 * \param[in] bsize Current block size
1405 * \param[in] reuse_inter_pred Flag, indicating prediction re-use
1406 * \param[out] this_mode_pred Pointer to store prediction buffer
1407 * for prediction re-use
1408 * \param[out] this_early_term Flag, indicating that transform can be
1409 * skipped
1410 * \param[in] use_model_yrd_large Flag, indicating special logic to handle
1411 * large blocks
1412 *
1413 * \return Nothing is returned. Instead, calculated RD cost is placed to
1414 * \c this_rdc and best filter is placed to \c mi->interp_filters. In case
1415 * \c reuse_inter_pred flag is set, this function also ouputs
1416 * \c this_mode_pred. Also \c this_early_temp is set if transform can be
1417 * skipped
1418 */
1419 static void search_filter_ref(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *this_rdc,
1420 int mi_row, int mi_col, PRED_BUFFER *tmp,
1421 BLOCK_SIZE bsize, int reuse_inter_pred,
1422 PRED_BUFFER **this_mode_pred,
1423 int *this_early_term, int use_model_yrd_large) {
1424 AV1_COMMON *const cm = &cpi->common;
1425 MACROBLOCKD *const xd = &x->e_mbd;
1426 struct macroblockd_plane *const pd = &xd->plane[0];
1427 MB_MODE_INFO *const mi = xd->mi[0];
1428 const int bw = block_size_wide[bsize];
1429 RD_STATS pf_rd_stats[FILTER_SEARCH_SIZE] = { 0 };
1430 TX_SIZE pf_tx_size[FILTER_SEARCH_SIZE] = { 0 };
1431 PRED_BUFFER *current_pred = *this_mode_pred;
1432 int best_skip = 0;
1433 int best_early_term = 0;
1434 int64_t best_cost = INT64_MAX;
1435 int best_filter_index = -1;
1436 InterpFilter filters[FILTER_SEARCH_SIZE] = { EIGHTTAP_REGULAR,
1437 EIGHTTAP_SMOOTH };
1438 int i;
1439 for (i = 0; i < FILTER_SEARCH_SIZE; ++i) {
1440 int64_t cost;
1441 InterpFilter filter = filters[i];
1442 mi->interp_filters = av1_broadcast_interp_filter(filter);
1443 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1444 if (use_model_yrd_large)
1445 model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd,
1446 &pf_rd_stats[i], this_early_term, 1);
1447 else
1448 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rd_stats[i], 1);
1449 pf_rd_stats[i].rate +=
1450 av1_get_switchable_rate(x, xd, cm->features.interp_filter);
1451 cost = RDCOST(x->rdmult, pf_rd_stats[i].rate, pf_rd_stats[i].dist);
1452 pf_tx_size[i] = mi->tx_size;
1453 if (cost < best_cost) {
1454 best_filter_index = i;
1455 best_cost = cost;
1456 best_skip = pf_rd_stats[i].skip_txfm;
1457 best_early_term = *this_early_term;
1458 if (reuse_inter_pred) {
1459 if (*this_mode_pred != current_pred) {
1460 free_pred_buffer(*this_mode_pred);
1461 *this_mode_pred = current_pred;
1462 }
1463 current_pred = &tmp[get_pred_buffer(tmp, 3)];
1464 pd->dst.buf = current_pred->data;
1465 pd->dst.stride = bw;
1466 }
1467 }
1468 }
1469 assert(best_filter_index >= 0 && best_filter_index < FILTER_SEARCH_SIZE);
1470 if (reuse_inter_pred && *this_mode_pred != current_pred)
1471 free_pred_buffer(current_pred);
1472
1473 mi->interp_filters = av1_broadcast_interp_filter(filters[best_filter_index]);
1474 mi->tx_size = pf_tx_size[best_filter_index];
1475 this_rdc->rate = pf_rd_stats[best_filter_index].rate;
1476 this_rdc->dist = pf_rd_stats[best_filter_index].dist;
1477 this_rdc->sse = pf_rd_stats[best_filter_index].sse;
1478 this_rdc->skip_txfm = (best_skip || best_early_term);
1479 *this_early_term = best_early_term;
1480 if (reuse_inter_pred) {
1481 pd->dst.buf = (*this_mode_pred)->data;
1482 pd->dst.stride = (*this_mode_pred)->stride;
1483 } else if (best_filter_index < FILTER_SEARCH_SIZE - 1) {
1484 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
1485 }
1486 }
1487
1488 #define COLLECT_PICK_MODE_STAT 0
1489
1490 #if COLLECT_PICK_MODE_STAT
1491 typedef struct _mode_search_stat {
1492 int32_t num_blocks[BLOCK_SIZES];
1493 int64_t avg_block_times[BLOCK_SIZES];
1494 int32_t num_searches[BLOCK_SIZES][MB_MODE_COUNT];
1495 int32_t num_nonskipped_searches[BLOCK_SIZES][MB_MODE_COUNT];
1496 int64_t search_times[BLOCK_SIZES][MB_MODE_COUNT];
1497 int64_t nonskipped_search_times[BLOCK_SIZES][MB_MODE_COUNT];
1498 struct aom_usec_timer timer1;
1499 struct aom_usec_timer timer2;
1500 } mode_search_stat;
1501 #endif // COLLECT_PICK_MODE_STAT
1502
1503 static void compute_intra_yprediction(const AV1_COMMON *cm,
1504 PREDICTION_MODE mode, BLOCK_SIZE bsize,
1505 MACROBLOCK *x, MACROBLOCKD *xd) {
1506 struct macroblockd_plane *const pd = &xd->plane[0];
1507 struct macroblock_plane *const p = &x->plane[0];
1508 uint8_t *const src_buf_base = p->src.buf;
1509 uint8_t *const dst_buf_base = pd->dst.buf;
1510 const int src_stride = p->src.stride;
1511 const int dst_stride = pd->dst.stride;
1512 int plane = 0;
1513 int row, col;
1514 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
1515 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
1516 // transform size varies per plane, look it up in a common way.
1517 const TX_SIZE tx_size = max_txsize_lookup[bsize];
1518 const BLOCK_SIZE plane_bsize =
1519 get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
1520 // If mb_to_right_edge is < 0 we are in a situation in which
1521 // the current block size extends into the UMV and we won't
1522 // visit the sub blocks that are wholly within the UMV.
1523 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
1524 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
1525 // Keep track of the row and column of the blocks we use so that we know
1526 // if we are in the unrestricted motion border.
1527 for (row = 0; row < max_blocks_high; row += (1 << tx_size)) {
1528 // Skip visiting the sub blocks that are wholly within the UMV.
1529 for (col = 0; col < max_blocks_wide; col += (1 << tx_size)) {
1530 p->src.buf = &src_buf_base[4 * (row * (int64_t)src_stride + col)];
1531 pd->dst.buf = &dst_buf_base[4 * (row * (int64_t)dst_stride + col)];
1532 av1_predict_intra_block(cm, xd, block_size_wide[bsize],
1533 block_size_high[bsize], tx_size, mode, 0, 0,
1534 FILTER_INTRA_MODES, pd->dst.buf, dst_stride,
1535 pd->dst.buf, dst_stride, 0, 0, plane);
1536 }
1537 }
1538 p->src.buf = src_buf_base;
1539 pd->dst.buf = dst_buf_base;
1540 }
1541
1542 void av1_nonrd_pick_intra_mode(AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *rd_cost,
1543 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1544 AV1_COMMON *const cm = &cpi->common;
1545 MACROBLOCKD *const xd = &x->e_mbd;
1546 MB_MODE_INFO *const mi = xd->mi[0];
1547 RD_STATS this_rdc, best_rdc;
1548 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1549 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1550 const TX_SIZE intra_tx_size =
1551 AOMMIN(max_txsize_lookup[bsize],
1552 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]);
1553 int *bmode_costs;
1554 const MB_MODE_INFO *above_mi = xd->above_mbmi;
1555 const MB_MODE_INFO *left_mi = xd->left_mbmi;
1556 const PREDICTION_MODE A = av1_above_block_mode(above_mi);
1557 const PREDICTION_MODE L = av1_left_block_mode(left_mi);
1558 bmode_costs = x->mode_costs.y_mode_costs[A][L];
1559
1560 av1_invalid_rd_stats(&best_rdc);
1561 av1_invalid_rd_stats(&this_rdc);
1562
1563 init_mbmi(mi, DC_PRED, INTRA_FRAME, NONE_FRAME, cm);
1564 mi->mv[0].as_int = mi->mv[1].as_int = INVALID_MV;
1565
1566 // Change the limit of this loop to add other intra prediction
1567 // mode tests.
1568 for (int i = 0; i < 4; ++i) {
1569 PREDICTION_MODE this_mode = intra_mode_list[i];
1570 this_rdc.dist = this_rdc.rate = 0;
1571 args.mode = this_mode;
1572 args.skippable = 1;
1573 args.rdc = &this_rdc;
1574 mi->tx_size = intra_tx_size;
1575 av1_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
1576 &args);
1577 if (args.skippable) {
1578 this_rdc.rate = av1_cost_symbol(av1_get_skip_txfm_cdf(xd)[1]);
1579 } else {
1580 this_rdc.rate += av1_cost_symbol(av1_get_skip_txfm_cdf(xd)[0]);
1581 }
1582 this_rdc.rate += bmode_costs[this_mode];
1583 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
1584
1585 if (this_rdc.rdcost < best_rdc.rdcost) {
1586 best_rdc = this_rdc;
1587 mi->mode = this_mode;
1588 }
1589 }
1590
1591 *rd_cost = best_rdc;
1592
1593 #if CONFIG_INTERNAL_STATS
1594 store_coding_context(x, ctx, mi->mode);
1595 #else
1596 store_coding_context(x, ctx);
1597 #endif // CONFIG_INTERNAL_STATS
1598 }
1599
1600 static AOM_INLINE int is_same_gf_and_last_scale(AV1_COMMON *cm) {
1601 struct scale_factors *const sf_last = get_ref_scale_factors(cm, LAST_FRAME);
1602 struct scale_factors *const sf_golden =
1603 get_ref_scale_factors(cm, GOLDEN_FRAME);
1604 return ((sf_last->x_scale_fp == sf_golden->x_scale_fp) &&
1605 (sf_last->y_scale_fp == sf_golden->y_scale_fp));
1606 }
1607
1608 static AOM_INLINE void get_ref_frame_use_mask(AV1_COMP *cpi, MACROBLOCK *x,
1609 MB_MODE_INFO *mi, int mi_row,
1610 int mi_col, int bsize,
1611 int gf_temporal_ref,
1612 int use_ref_frame[],
1613 int *force_skip_low_temp_var) {
1614 AV1_COMMON *const cm = &cpi->common;
1615 const struct segmentation *const seg = &cm->seg;
1616 const int is_small_sb = (cm->seq_params.sb_size == BLOCK_64X64);
1617
1618 // For SVC the usage of alt_ref is determined by the ref_frame_flags.
1619 int use_alt_ref_frame = cpi->use_svc || cpi->sf.rt_sf.use_nonrd_altref_frame;
1620 int use_golden_ref_frame = 1;
1621
1622 use_ref_frame[LAST_FRAME] = 1; // we never skip LAST
1623
1624 if (cpi->rc.frames_since_golden == 0 && gf_temporal_ref) {
1625 use_golden_ref_frame = 0;
1626 }
1627
1628 if (cpi->sf.rt_sf.short_circuit_low_temp_var &&
1629 x->nonrd_prune_ref_frame_search) {
1630 if (is_small_sb)
1631 *force_skip_low_temp_var = get_force_skip_low_temp_var_small_sb(
1632 &x->part_search_info.variance_low[0], mi_row, mi_col, bsize);
1633 else
1634 *force_skip_low_temp_var = get_force_skip_low_temp_var(
1635 &x->part_search_info.variance_low[0], mi_row, mi_col, bsize);
1636 // If force_skip_low_temp_var is set, skip golden reference.
1637 if (*force_skip_low_temp_var) {
1638 use_golden_ref_frame = 0;
1639 use_alt_ref_frame = 0;
1640 }
1641 }
1642
1643 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) &&
1644 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) == GOLDEN_FRAME) {
1645 use_golden_ref_frame = 1;
1646 use_alt_ref_frame = 0;
1647 }
1648
1649 use_alt_ref_frame =
1650 cpi->ref_frame_flags & AOM_ALT_FLAG ? use_alt_ref_frame : 0;
1651 use_golden_ref_frame =
1652 cpi->ref_frame_flags & AOM_GOLD_FLAG ? use_golden_ref_frame : 0;
1653
1654 use_ref_frame[ALTREF_FRAME] = use_alt_ref_frame;
1655 use_ref_frame[GOLDEN_FRAME] = use_golden_ref_frame;
1656 }
1657
1658 /*!\brief Estimates best intra mode for inter mode search
1659 *
1660 * \ingroup nonrd_mode_search
1661 * \callgraph
1662 * \callergraph
1663 *
1664 * Using heuristics based on best inter mode, block size, and other decides
1665 * whether to check intra modes. If so, estimates and selects best intra mode
1666 * from the reduced set of intra modes (max 4 intra modes checked)
1667 *
1668 * \param[in] cpi Top-level encoder structure
1669 * \param[in] x Pointer to structure holding all the
1670 * data for the current macroblock
1671 * \param[in] bsize Current block size
1672 * \param[in] use_modeled_non_rd_cost Flag, indicating usage of curvfit
1673 * model for RD cost
1674 * \param[in] best_early_term Flag, indicating that TX for the
1675 * best inter mode was skipped
1676 * \param[in] ref_cost_intra Cost of signalling intra mode
1677 * \param[in] reuse_prediction Flag, indicating prediction re-use
1678 * \param[in] orig_dst Original destination buffer
1679 * \param[in] tmp_buffers Pointer to a temporary buffers for
1680 * prediction re-use
1681 * \param[out] this_mode_pred Pointer to store prediction buffer
1682 * for prediction re-use
1683 * \param[in] best_rdc Pointer to RD cost for the best
1684 * selected intra mode
1685 * \param[in] best_pickmode Pointer to a structure containing
1686 * best mode picked so far
1687 *
1688 * \return Nothing is returned. Instead, calculated RD cost is placed to
1689 * \c best_rdc and best selected mode is placed to \c best_pickmode
1690 */
1691 static void estimate_intra_mode(
1692 AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int use_modeled_non_rd_cost,
1693 int best_early_term, unsigned int ref_cost_intra, int reuse_prediction,
1694 struct buf_2d *orig_dst, PRED_BUFFER *tmp_buffers,
1695 PRED_BUFFER **this_mode_pred, RD_STATS *best_rdc,
1696 BEST_PICKMODE *best_pickmode) {
1697 AV1_COMMON *const cm = &cpi->common;
1698 MACROBLOCKD *const xd = &x->e_mbd;
1699 MB_MODE_INFO *const mi = xd->mi[0];
1700 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1701 const unsigned char segment_id = mi->segment_id;
1702 const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
1703 const int *const rd_thresh_freq_fact = x->thresh_freq_fact[bsize];
1704 const int mi_row = xd->mi_row;
1705 const int mi_col = xd->mi_col;
1706 struct macroblockd_plane *const pd = &xd->plane[0];
1707
1708 const CommonQuantParams *quant_params = &cm->quant_params;
1709
1710 RD_STATS this_rdc;
1711
1712 int intra_cost_penalty = av1_get_intra_cost_penalty(
1713 quant_params->base_qindex, quant_params->y_dc_delta_q,
1714 cm->seq_params.bit_depth);
1715 int64_t inter_mode_thresh = RDCOST(x->rdmult, intra_cost_penalty, 0);
1716 int perform_intra_pred = cpi->sf.rt_sf.check_intra_pred_nonrd;
1717
1718 int do_early_exit_rdthresh = 1;
1719
1720 uint32_t spatial_var_thresh = 50;
1721 int motion_thresh = 32;
1722 // Adjust thresholds to make intra mode likely tested if the other
1723 // references (golden, alt) are skipped/not checked. For now always
1724 // adjust for svc mode.
1725 if (cpi->use_svc || (cpi->sf.rt_sf.use_nonrd_altref_frame == 0 &&
1726 cpi->sf.rt_sf.nonrd_prune_ref_frame_search > 0)) {
1727 spatial_var_thresh = 150;
1728 motion_thresh = 0;
1729 }
1730
1731 // Some adjustments to checking intra mode based on source variance.
1732 if (x->source_variance < spatial_var_thresh) {
1733 // If the best inter mode is large motion or non-LAST ref reduce intra cost
1734 // penalty, so intra mode is more likely tested.
1735 if (best_pickmode->best_ref_frame != LAST_FRAME ||
1736 abs(mi->mv[0].as_mv.row) >= motion_thresh ||
1737 abs(mi->mv[0].as_mv.col) >= motion_thresh) {
1738 intra_cost_penalty = intra_cost_penalty >> 2;
1739 inter_mode_thresh = RDCOST(x->rdmult, intra_cost_penalty, 0);
1740 do_early_exit_rdthresh = 0;
1741 }
1742 // For big blocks worth checking intra (since only DC will be checked),
1743 // even if best_early_term is set.
1744 if (bsize >= BLOCK_32X32) best_early_term = 0;
1745 } else if (cpi->sf.rt_sf.source_metrics_sb_nonrd &&
1746 x->content_state_sb == kLowSad) {
1747 perform_intra_pred = 0;
1748 }
1749
1750 if (cpi->sf.rt_sf.skip_intra_pred_if_tx_skip && best_rdc->skip_txfm &&
1751 best_pickmode->best_mode_initial_skip_flag) {
1752 perform_intra_pred = 0;
1753 }
1754
1755 if (!(best_rdc->rdcost == INT64_MAX ||
1756 (perform_intra_pred && !best_early_term &&
1757 best_rdc->rdcost > inter_mode_thresh &&
1758 bsize <= cpi->sf.part_sf.max_intra_bsize))) {
1759 return;
1760 }
1761
1762 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1763 TX_SIZE intra_tx_size = AOMMIN(
1764 AOMMIN(max_txsize_lookup[bsize],
1765 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]),
1766 TX_16X16);
1767
1768 PRED_BUFFER *const best_pred = best_pickmode->best_pred;
1769 if (reuse_prediction && best_pred != NULL) {
1770 const int bh = block_size_high[bsize];
1771 const int bw = block_size_wide[bsize];
1772 if (best_pred->data == orig_dst->buf) {
1773 *this_mode_pred = &tmp_buffers[get_pred_buffer(tmp_buffers, 3)];
1774 aom_convolve_copy(best_pred->data, best_pred->stride,
1775 (*this_mode_pred)->data, (*this_mode_pred)->stride, bw,
1776 bh);
1777 best_pickmode->best_pred = *this_mode_pred;
1778 }
1779 }
1780 pd->dst = *orig_dst;
1781
1782 for (int i = 0; i < 4; ++i) {
1783 const PREDICTION_MODE this_mode = intra_mode_list[i];
1784 const THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)];
1785 const int mode_rd_thresh = rd_threshes[mode_index];
1786
1787 if (!((1 << this_mode) & cpi->sf.rt_sf.intra_y_mode_bsize_mask_nrd[bsize]))
1788 continue;
1789
1790 if (rd_less_than_thresh(best_rdc->rdcost, mode_rd_thresh,
1791 rd_thresh_freq_fact[mode_index]) &&
1792 (do_early_exit_rdthresh || this_mode == SMOOTH_PRED)) {
1793 continue;
1794 }
1795 const BLOCK_SIZE uv_bsize = get_plane_block_size(
1796 bsize, xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
1797
1798 mi->mode = this_mode;
1799 mi->ref_frame[0] = INTRA_FRAME;
1800 mi->ref_frame[1] = NONE_FRAME;
1801
1802 av1_invalid_rd_stats(&this_rdc);
1803 args.mode = this_mode;
1804 args.skippable = 1;
1805 args.rdc = &this_rdc;
1806 mi->tx_size = intra_tx_size;
1807 compute_intra_yprediction(cm, this_mode, bsize, x, xd);
1808 // Look into selecting tx_size here, based on prediction residual.
1809 if (use_modeled_non_rd_cost)
1810 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc, 1);
1811 else
1812 block_yrd(cpi, x, mi_row, mi_col, &this_rdc, &args.skippable, bsize,
1813 mi->tx_size);
1814 // TODO(kyslov@) Need to account for skippable
1815 if (x->color_sensitivity[0]) {
1816 av1_foreach_transformed_block_in_plane(xd, uv_bsize, 1,
1817 estimate_block_intra, &args);
1818 }
1819 if (x->color_sensitivity[1]) {
1820 av1_foreach_transformed_block_in_plane(xd, uv_bsize, 2,
1821 estimate_block_intra, &args);
1822 }
1823
1824 int mode_cost = 0;
1825 if (av1_is_directional_mode(this_mode) && av1_use_angle_delta(bsize)) {
1826 mode_cost +=
1827 x->mode_costs.angle_delta_cost[this_mode - V_PRED]
1828 [MAX_ANGLE_DELTA +
1829 mi->angle_delta[PLANE_TYPE_Y]];
1830 }
1831 if (this_mode == DC_PRED && av1_filter_intra_allowed_bsize(cm, bsize)) {
1832 mode_cost += x->mode_costs.filter_intra_cost[bsize][0];
1833 }
1834 this_rdc.rate += ref_cost_intra;
1835 this_rdc.rate += intra_cost_penalty;
1836 this_rdc.rate += mode_cost;
1837 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
1838
1839 if (this_rdc.rdcost < best_rdc->rdcost) {
1840 *best_rdc = this_rdc;
1841 best_pickmode->best_mode = this_mode;
1842 best_pickmode->best_tx_size = mi->tx_size;
1843 best_pickmode->best_ref_frame = INTRA_FRAME;
1844 mi->uv_mode = this_mode;
1845 mi->mv[0].as_int = INVALID_MV;
1846 mi->mv[1].as_int = INVALID_MV;
1847 }
1848 }
1849 mi->tx_size = best_pickmode->best_tx_size;
1850 }
1851
1852 static AOM_INLINE int is_filter_search_enabled(const AV1_COMP *cpi, int mi_row,
1853 int mi_col, BLOCK_SIZE bsize) {
1854 const AV1_COMMON *const cm = &cpi->common;
1855 int enable_filter_search = 0;
1856
1857 if (cpi->sf.rt_sf.use_nonrd_filter_search) {
1858 enable_filter_search = 1;
1859 if (cpi->sf.interp_sf.cb_pred_filter_search) {
1860 const int bsl = mi_size_wide_log2[bsize];
1861 enable_filter_search =
1862 (((mi_row + mi_col) >> bsl) +
1863 get_chessboard_index(cm->current_frame.frame_number)) &
1864 0x1;
1865 }
1866 }
1867 return enable_filter_search;
1868 }
1869
1870 static AOM_INLINE int skip_mode_by_threshold(
1871 PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, int_mv mv,
1872 int frames_since_golden, const int *const rd_threshes,
1873 const int *const rd_thresh_freq_fact, int64_t best_cost, int best_skip) {
1874 int skip_this_mode = 0;
1875 const THR_MODES mode_index = mode_idx[ref_frame][INTER_OFFSET(mode)];
1876 int mode_rd_thresh =
1877 best_skip ? rd_threshes[mode_index] << 1 : rd_threshes[mode_index];
1878
1879 // Increase mode_rd_thresh value for non-LAST for improved encoding
1880 // speed
1881 if (ref_frame != LAST_FRAME) {
1882 mode_rd_thresh = mode_rd_thresh << 1;
1883 if (ref_frame == GOLDEN_FRAME && frames_since_golden > 4)
1884 mode_rd_thresh = mode_rd_thresh << 1;
1885 }
1886
1887 if (rd_less_than_thresh(best_cost, mode_rd_thresh,
1888 rd_thresh_freq_fact[mode_index]))
1889 if (mv.as_int != 0) skip_this_mode = 1;
1890
1891 return skip_this_mode;
1892 }
1893
1894 static AOM_INLINE int skip_mode_by_low_temp(PREDICTION_MODE mode,
1895 MV_REFERENCE_FRAME ref_frame,
1896 BLOCK_SIZE bsize,
1897 int content_state_sb, int_mv mv,
1898 int force_skip_low_temp_var) {
1899 // Skip non-zeromv mode search for non-LAST frame if force_skip_low_temp_var
1900 // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
1901 // later.
1902 if (force_skip_low_temp_var && ref_frame != LAST_FRAME && mv.as_int != 0) {
1903 return 1;
1904 }
1905
1906 if (content_state_sb != kHighSad && bsize >= BLOCK_64X64 &&
1907 force_skip_low_temp_var && mode == NEWMV) {
1908 return 1;
1909 }
1910 return 0;
1911 }
1912
1913 static AOM_INLINE int skip_mode_by_bsize_and_ref_frame(
1914 PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, BLOCK_SIZE bsize,
1915 int extra_prune, unsigned int sse_zeromv_norm) {
1916 const unsigned int thresh_skip_golden = 500;
1917
1918 if (ref_frame != LAST_FRAME && sse_zeromv_norm < thresh_skip_golden &&
1919 mode == NEWMV)
1920 return 1;
1921
1922 if (bsize == BLOCK_128X128 && mode == NEWMV) return 1;
1923
1924 // Skip testing non-LAST if this flag is set.
1925 if (extra_prune) {
1926 if (extra_prune > 1 && ref_frame != LAST_FRAME &&
1927 (bsize > BLOCK_64X64 || (bsize > BLOCK_16X16 && mode == NEWMV)))
1928 return 1;
1929
1930 if (ref_frame != LAST_FRAME && mode == NEARMV) return 1;
1931 }
1932 return 0;
1933 }
1934
1935 void av1_nonrd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
1936 MACROBLOCK *x, RD_STATS *rd_cost,
1937 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1938 AV1_COMMON *const cm = &cpi->common;
1939 MACROBLOCKD *const xd = &x->e_mbd;
1940 MB_MODE_INFO *const mi = xd->mi[0];
1941 struct macroblockd_plane *const pd = &xd->plane[0];
1942
1943 BEST_PICKMODE best_pickmode;
1944 #if COLLECT_PICK_MODE_STAT
1945 static mode_search_stat ms_stat;
1946 #endif
1947 MV_REFERENCE_FRAME ref_frame;
1948 int_mv frame_mv[MB_MODE_COUNT][REF_FRAMES];
1949 uint8_t mode_checked[MB_MODE_COUNT][REF_FRAMES];
1950 struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE];
1951 RD_STATS this_rdc, best_rdc;
1952 const unsigned char segment_id = mi->segment_id;
1953 const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
1954 const int *const rd_thresh_freq_fact = x->thresh_freq_fact[bsize];
1955 const InterpFilter filter_ref = cm->features.interp_filter;
1956 int best_early_term = 0;
1957 unsigned int ref_costs_single[REF_FRAMES],
1958 ref_costs_comp[REF_FRAMES][REF_FRAMES];
1959 int force_skip_low_temp_var = 0;
1960 int use_ref_frame_mask[REF_FRAMES] = { 0 };
1961 unsigned int sse_zeromv_norm = UINT_MAX;
1962 int num_inter_modes = RT_INTER_MODES;
1963 PRED_BUFFER tmp[4];
1964 DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 128 * 128]);
1965 PRED_BUFFER *this_mode_pred = NULL;
1966 const int reuse_inter_pred =
1967 cpi->sf.rt_sf.reuse_inter_pred_nonrd && cm->seq_params.bit_depth == 8;
1968 const int bh = block_size_high[bsize];
1969 const int bw = block_size_wide[bsize];
1970 const int pixels_in_block = bh * bw;
1971 struct buf_2d orig_dst = pd->dst;
1972 const CommonQuantParams *quant_params = &cm->quant_params;
1973 const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1974 TxfmSearchInfo *txfm_info = &x->txfm_search_info;
1975 #if COLLECT_PICK_MODE_STAT
1976 aom_usec_timer_start(&ms_stat.timer2);
1977 #endif
1978 const InterpFilter default_interp_filter = EIGHTTAP_REGULAR;
1979 int64_t thresh_sad_pred = INT64_MAX;
1980 const int mi_row = xd->mi_row;
1981 const int mi_col = xd->mi_col;
1982 int use_modeled_non_rd_cost = 0;
1983
1984 init_best_pickmode(&best_pickmode);
1985
1986 const ModeCosts *mode_costs = &x->mode_costs;
1987
1988 estimate_single_ref_frame_costs(cm, xd, mode_costs, segment_id,
1989 ref_costs_single);
1990 if (cpi->sf.rt_sf.use_comp_ref_nonrd)
1991 estimate_comp_ref_frame_costs(cm, xd, mode_costs, segment_id,
1992 ref_costs_comp);
1993
1994 memset(&mode_checked[0][0], 0, MB_MODE_COUNT * REF_FRAMES);
1995 if (reuse_inter_pred) {
1996 for (int i = 0; i < 3; i++) {
1997 tmp[i].data = &pred_buf[pixels_in_block * i];
1998 tmp[i].stride = bw;
1999 tmp[i].in_use = 0;
2000 }
2001 tmp[3].data = pd->dst.buf;
2002 tmp[3].stride = pd->dst.stride;
2003 tmp[3].in_use = 0;
2004 }
2005
2006 txfm_info->skip_txfm = 0;
2007
2008 // initialize mode decisions
2009 av1_invalid_rd_stats(&best_rdc);
2010 av1_invalid_rd_stats(&this_rdc);
2011 av1_invalid_rd_stats(rd_cost);
2012 mi->bsize = bsize;
2013 mi->ref_frame[0] = NONE_FRAME;
2014 mi->ref_frame[1] = NONE_FRAME;
2015
2016 const int gf_temporal_ref = is_same_gf_and_last_scale(cm);
2017
2018 get_ref_frame_use_mask(cpi, x, mi, mi_row, mi_col, bsize, gf_temporal_ref,
2019 use_ref_frame_mask, &force_skip_low_temp_var);
2020
2021 for (MV_REFERENCE_FRAME ref_frame_iter = LAST_FRAME;
2022 ref_frame_iter <= ALTREF_FRAME; ++ref_frame_iter) {
2023 if (use_ref_frame_mask[ref_frame_iter]) {
2024 find_predictors(cpi, x, ref_frame_iter, frame_mv, tile_data, yv12_mb,
2025 bsize, force_skip_low_temp_var);
2026 }
2027 }
2028
2029 thresh_sad_pred = ((int64_t)x->pred_mv_sad[LAST_FRAME]) << 1;
2030 // Increase threshold for less agressive pruning.
2031 if (cpi->sf.rt_sf.nonrd_prune_ref_frame_search == 1)
2032 thresh_sad_pred += (x->pred_mv_sad[LAST_FRAME] >> 2);
2033
2034 const int large_block = bsize >= BLOCK_32X32;
2035 const int use_model_yrd_large =
2036 cpi->oxcf.rc_cfg.mode == AOM_CBR && large_block &&
2037 !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) &&
2038 quant_params->base_qindex && cm->seq_params.bit_depth == 8;
2039
2040 const int enable_filter_search =
2041 is_filter_search_enabled(cpi, mi_row, mi_col, bsize);
2042
2043 // TODO(marpan): Look into reducing these conditions. For now constrain
2044 // it to avoid significant bdrate loss.
2045 if (cpi->sf.rt_sf.use_modeled_non_rd_cost) {
2046 if (cpi->svc.non_reference_frame)
2047 use_modeled_non_rd_cost = 1;
2048 else if (cpi->svc.number_temporal_layers > 1 &&
2049 cpi->svc.temporal_layer_id == 0)
2050 use_modeled_non_rd_cost = 0;
2051 else
2052 use_modeled_non_rd_cost =
2053 (quant_params->base_qindex > 120 && x->source_variance > 100 &&
2054 bsize <= BLOCK_16X16 && x->content_state_sb != kLowVarHighSumdiff &&
2055 x->content_state_sb != kHighSad);
2056 }
2057
2058 #if COLLECT_PICK_MODE_STAT
2059 ms_stat.num_blocks[bsize]++;
2060 #endif
2061 init_mbmi(mi, DC_PRED, NONE_FRAME, NONE_FRAME, cm);
2062 mi->tx_size = AOMMIN(
2063 AOMMIN(max_txsize_lookup[bsize],
2064 tx_mode_to_biggest_tx_size[txfm_params->tx_mode_search_type]),
2065 TX_16X16);
2066
2067 for (int idx = 0; idx < num_inter_modes; ++idx) {
2068 const struct segmentation *const seg = &cm->seg;
2069
2070 int rate_mv = 0;
2071 int is_skippable;
2072 int this_early_term = 0;
2073 int skip_this_mv = 0;
2074 PREDICTION_MODE this_mode;
2075 MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
2076 RD_STATS nonskip_rdc;
2077 av1_invalid_rd_stats(&nonskip_rdc);
2078
2079 this_mode = ref_mode_set[idx].pred_mode;
2080 ref_frame = ref_mode_set[idx].ref_frame;
2081
2082 #if COLLECT_PICK_MODE_STAT
2083 aom_usec_timer_start(&ms_stat.timer1);
2084 ms_stat.num_searches[bsize][this_mode]++;
2085 #endif
2086 mi->mode = this_mode;
2087 mi->ref_frame[0] = ref_frame;
2088
2089 if (!use_ref_frame_mask[ref_frame]) continue;
2090
2091 // Skip non-zero motion for SVC if skip_nonzeromv_ref is set.
2092 if (cpi->use_svc && frame_mv[this_mode][ref_frame].as_int != 0) {
2093 if (ref_frame == LAST_FRAME && cpi->svc.skip_nonzeromv_last)
2094 continue;
2095 else if (ref_frame == GOLDEN_FRAME && cpi->svc.skip_nonzeromv_gf)
2096 continue;
2097 }
2098
2099 // If the segment reference frame feature is enabled then do nothing if the
2100 // current ref frame is not allowed.
2101 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
2102 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
2103 continue;
2104
2105 if (skip_mode_by_bsize_and_ref_frame(this_mode, ref_frame, bsize,
2106 x->nonrd_prune_ref_frame_search,
2107 sse_zeromv_norm))
2108 continue;
2109
2110 if (skip_mode_by_low_temp(this_mode, ref_frame, bsize, x->content_state_sb,
2111 frame_mv[this_mode][ref_frame],
2112 force_skip_low_temp_var))
2113 continue;
2114
2115 // Disable this drop out case if the ref frame segment level feature is
2116 // enabled for this segment. This is to prevent the possibility that we
2117 // end up unable to pick any mode.
2118 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
2119 // Check for skipping GOLDEN and ALTREF based pred_mv_sad.
2120 if (cpi->sf.rt_sf.nonrd_prune_ref_frame_search > 0 &&
2121 x->pred_mv_sad[ref_frame] != INT_MAX && ref_frame != LAST_FRAME) {
2122 if ((int64_t)(x->pred_mv_sad[ref_frame]) > thresh_sad_pred) continue;
2123 }
2124 }
2125
2126 if (skip_mode_by_threshold(
2127 this_mode, ref_frame, frame_mv[this_mode][ref_frame],
2128 cpi->rc.frames_since_golden, rd_threshes, rd_thresh_freq_fact,
2129 best_rdc.rdcost, best_pickmode.best_mode_skip_txfm))
2130 continue;
2131
2132 // Select prediction reference frames.
2133 for (int i = 0; i < MAX_MB_PLANE; i++) {
2134 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
2135 }
2136
2137 mi->ref_frame[0] = ref_frame;
2138 mi->ref_frame[1] = NONE_FRAME;
2139 set_ref_ptrs(cm, xd, ref_frame, NONE_FRAME);
2140
2141 if (this_mode == NEWMV) {
2142 if (search_new_mv(cpi, x, frame_mv, ref_frame, gf_temporal_ref, bsize,
2143 mi_row, mi_col, &rate_mv, &best_rdc))
2144 continue;
2145 }
2146
2147 for (PREDICTION_MODE inter_mv_mode = NEARESTMV; inter_mv_mode <= NEWMV;
2148 inter_mv_mode++) {
2149 if (inter_mv_mode == this_mode) continue;
2150 if (mode_checked[inter_mv_mode][ref_frame] &&
2151 frame_mv[this_mode][ref_frame].as_int ==
2152 frame_mv[inter_mv_mode][ref_frame].as_int) {
2153 skip_this_mv = 1;
2154 break;
2155 }
2156 }
2157
2158 if (skip_this_mv) continue;
2159
2160 mi->mode = this_mode;
2161 mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
2162 mi->mv[1].as_int = 0;
2163 if (reuse_inter_pred) {
2164 if (!this_mode_pred) {
2165 this_mode_pred = &tmp[3];
2166 } else {
2167 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
2168 pd->dst.buf = this_mode_pred->data;
2169 pd->dst.stride = bw;
2170 }
2171 }
2172 #if COLLECT_PICK_MODE_STAT
2173 ms_stat.num_nonskipped_searches[bsize][this_mode]++;
2174 #endif
2175 if (enable_filter_search &&
2176 ((mi->mv[0].as_mv.row & 0x07) || (mi->mv[0].as_mv.col & 0x07)) &&
2177 (ref_frame == LAST_FRAME || !x->nonrd_prune_ref_frame_search)) {
2178 search_filter_ref(cpi, x, &this_rdc, mi_row, mi_col, tmp, bsize,
2179 reuse_inter_pred, &this_mode_pred, &this_early_term,
2180 use_model_yrd_large);
2181 } else {
2182 mi->interp_filters =
2183 (filter_ref == SWITCHABLE)
2184 ? av1_broadcast_interp_filter(default_interp_filter)
2185 : av1_broadcast_interp_filter(filter_ref);
2186 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
2187 if (use_model_yrd_large) {
2188 model_skip_for_sb_y_large(cpi, bsize, mi_row, mi_col, x, xd, &this_rdc,
2189 &this_early_term, use_modeled_non_rd_cost);
2190 } else {
2191 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc,
2192 use_modeled_non_rd_cost);
2193 }
2194 }
2195
2196 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0) {
2197 sse_zeromv_norm =
2198 (unsigned int)(this_rdc.sse >> (b_width_log2_lookup[bsize] +
2199 b_height_log2_lookup[bsize]));
2200 }
2201
2202 const int skip_ctx = av1_get_skip_txfm_context(xd);
2203 const int skip_txfm_cost = mode_costs->skip_txfm_cost[skip_ctx][1];
2204 const int no_skip_txfm_cost = mode_costs->skip_txfm_cost[skip_ctx][0];
2205 if (this_early_term) {
2206 this_rdc.skip_txfm = 1;
2207 this_rdc.rate = skip_txfm_cost;
2208 this_rdc.dist = this_rdc.sse << 4;
2209 } else {
2210 if (use_modeled_non_rd_cost) {
2211 if (this_rdc.skip_txfm) {
2212 this_rdc.rate = skip_txfm_cost;
2213 } else {
2214 this_rdc.rate += no_skip_txfm_cost;
2215 }
2216 } else {
2217 block_yrd(cpi, x, mi_row, mi_col, &this_rdc, &is_skippable, bsize,
2218 mi->tx_size);
2219 if (this_rdc.skip_txfm ||
2220 RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist) >=
2221 RDCOST(x->rdmult, 0, this_rdc.sse)) {
2222 if (!this_rdc.skip_txfm) {
2223 // Need to store "real" rdc for possible furure use if UV rdc
2224 // disallows tx skip
2225 nonskip_rdc = this_rdc;
2226 nonskip_rdc.rate += no_skip_txfm_cost;
2227 }
2228 this_rdc.rate = skip_txfm_cost;
2229 this_rdc.skip_txfm = 1;
2230 this_rdc.dist = this_rdc.sse;
2231 } else {
2232 this_rdc.rate += no_skip_txfm_cost;
2233 }
2234 }
2235 if ((x->color_sensitivity[0] || x->color_sensitivity[1])) {
2236 RD_STATS rdc_uv;
2237 const BLOCK_SIZE uv_bsize = get_plane_block_size(
2238 bsize, xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
2239 if (x->color_sensitivity[0]) {
2240 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
2241 AOM_PLANE_U, AOM_PLANE_U);
2242 }
2243 if (x->color_sensitivity[1]) {
2244 av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
2245 AOM_PLANE_V, AOM_PLANE_V);
2246 }
2247 model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &this_rdc.sse, 1, 2);
2248 // Restore Y rdc if UV rdc disallows txfm skip
2249 if (this_rdc.skip_txfm && !rdc_uv.skip_txfm &&
2250 nonskip_rdc.rate != INT_MAX)
2251 this_rdc = nonskip_rdc;
2252 this_rdc.rate += rdc_uv.rate;
2253 this_rdc.dist += rdc_uv.dist;
2254 this_rdc.skip_txfm = this_rdc.skip_txfm && rdc_uv.skip_txfm;
2255 }
2256 }
2257
2258 // TODO(kyslov) account for UV prediction cost
2259 this_rdc.rate += rate_mv;
2260 const int16_t mode_ctx =
2261 av1_mode_context_analyzer(mbmi_ext->mode_context, mi->ref_frame);
2262 this_rdc.rate += cost_mv_ref(mode_costs, this_mode, mode_ctx);
2263
2264 this_rdc.rate += ref_costs_single[ref_frame];
2265
2266 this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist);
2267 if (cpi->oxcf.rc_cfg.mode == AOM_CBR) {
2268 newmv_diff_bias(xd, this_mode, &this_rdc, bsize,
2269 frame_mv[this_mode][ref_frame].as_mv.row,
2270 frame_mv[this_mode][ref_frame].as_mv.col, cpi->speed,
2271 x->source_variance, x->content_state_sb);
2272 }
2273
2274 mode_checked[this_mode][ref_frame] = 1;
2275 #if COLLECT_PICK_MODE_STAT
2276 aom_usec_timer_mark(&ms_stat.timer1);
2277 ms_stat.nonskipped_search_times[bsize][this_mode] +=
2278 aom_usec_timer_elapsed(&ms_stat.timer1);
2279 #endif
2280 if (this_rdc.rdcost < best_rdc.rdcost) {
2281 best_rdc = this_rdc;
2282 best_early_term = this_early_term;
2283 best_pickmode.best_mode = this_mode;
2284 best_pickmode.best_pred_filter = mi->interp_filters;
2285 best_pickmode.best_tx_size = mi->tx_size;
2286 best_pickmode.best_ref_frame = ref_frame;
2287 best_pickmode.best_mode_skip_txfm = this_rdc.skip_txfm;
2288 best_pickmode.best_mode_initial_skip_flag =
2289 (nonskip_rdc.rate == INT_MAX && this_rdc.skip_txfm);
2290
2291 if (reuse_inter_pred) {
2292 free_pred_buffer(best_pickmode.best_pred);
2293 best_pickmode.best_pred = this_mode_pred;
2294 }
2295 } else {
2296 if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
2297 }
2298 if (best_early_term && idx > 0) {
2299 txfm_info->skip_txfm = 1;
2300 break;
2301 }
2302 }
2303
2304 mi->mode = best_pickmode.best_mode;
2305 mi->interp_filters = best_pickmode.best_pred_filter;
2306 mi->tx_size = best_pickmode.best_tx_size;
2307 memset(mi->inter_tx_size, mi->tx_size, sizeof(mi->inter_tx_size));
2308 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2309 mi->mv[0].as_int =
2310 frame_mv[best_pickmode.best_mode][best_pickmode.best_ref_frame].as_int;
2311
2312 // Perform intra prediction search, if the best SAD is above a certain
2313 // threshold.
2314 mi->angle_delta[PLANE_TYPE_Y] = 0;
2315 mi->angle_delta[PLANE_TYPE_UV] = 0;
2316 mi->filter_intra_mode_info.use_filter_intra = 0;
2317
2318 estimate_intra_mode(cpi, x, bsize, use_modeled_non_rd_cost, best_early_term,
2319 ref_costs_single[INTRA_FRAME], reuse_inter_pred,
2320 &orig_dst, tmp, &this_mode_pred, &best_rdc,
2321 &best_pickmode);
2322
2323 pd->dst = orig_dst;
2324 mi->mode = best_pickmode.best_mode;
2325 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2326 txfm_info->skip_txfm = best_rdc.skip_txfm;
2327
2328 if (!is_inter_block(mi)) {
2329 mi->interp_filters = av1_broadcast_interp_filter(SWITCHABLE_FILTERS);
2330 }
2331
2332 if (reuse_inter_pred && best_pickmode.best_pred != NULL) {
2333 PRED_BUFFER *const best_pred = best_pickmode.best_pred;
2334 if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) {
2335 aom_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2336 pd->dst.stride, bw, bh);
2337 }
2338 }
2339 if (cpi->sf.inter_sf.adaptive_rd_thresh) {
2340 THR_MODES best_mode_idx =
2341 mode_idx[best_pickmode.best_ref_frame][mode_offset(mi->mode)];
2342 if (best_pickmode.best_ref_frame == INTRA_FRAME) {
2343 // Only consider the modes that are included in the intra_mode_list.
2344 int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
2345 for (int i = 0; i < intra_modes; i++) {
2346 update_thresh_freq_fact(cpi, x, bsize, INTRA_FRAME, best_mode_idx,
2347 intra_mode_list[i]);
2348 }
2349 } else {
2350 PREDICTION_MODE this_mode;
2351 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2352 update_thresh_freq_fact(cpi, x, bsize, best_pickmode.best_ref_frame,
2353 best_mode_idx, this_mode);
2354 }
2355 }
2356 }
2357
2358 #if CONFIG_INTERNAL_STATS
2359 store_coding_context(x, ctx, mi->mode);
2360 #else
2361 store_coding_context(x, ctx);
2362 #endif // CONFIG_INTERNAL_STATS
2363 #if COLLECT_PICK_MODE_STAT
2364 aom_usec_timer_mark(&ms_stat.timer2);
2365 ms_stat.avg_block_times[bsize] += aom_usec_timer_elapsed(&ms_stat.timer2);
2366 //
2367 if ((mi_row + mi_size_high[bsize] >= (cpi->common.mi_params.mi_rows)) &&
2368 (mi_col + mi_size_wide[bsize] >= (cpi->common.mi_params.mi_cols))) {
2369 int i, j;
2370 PREDICTION_MODE used_modes[3] = { NEARESTMV, NEARMV, NEWMV };
2371 BLOCK_SIZE bss[5] = { BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
2372 BLOCK_128X128 };
2373 int64_t total_time = 0l;
2374 int32_t total_blocks = 0;
2375
2376 printf("\n");
2377 for (i = 0; i < 5; i++) {
2378 printf("BS(%d) Num %d, Avg_time %f: ", bss[i], ms_stat.num_blocks[bss[i]],
2379 ms_stat.num_blocks[bss[i]] > 0
2380 ? (float)ms_stat.avg_block_times[bss[i]] /
2381 ms_stat.num_blocks[bss[i]]
2382 : 0);
2383 total_time += ms_stat.avg_block_times[bss[i]];
2384 total_blocks += ms_stat.num_blocks[bss[i]];
2385 for (j = 0; j < 3; j++) {
2386 printf("Mode %d, %d/%d tps %f ", used_modes[j],
2387 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]],
2388 ms_stat.num_searches[bss[i]][used_modes[j]],
2389 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]] > 0
2390 ? (float)ms_stat
2391 .nonskipped_search_times[bss[i]][used_modes[j]] /
2392 ms_stat.num_nonskipped_searches[bss[i]][used_modes[j]]
2393 : 0l);
2394 }
2395 printf("\n");
2396 }
2397 printf("Total time = %ld. Total blocks = %d\n", total_time, total_blocks);
2398 }
2399 //
2400 #endif // COLLECT_PICK_MODE_STAT
2401 *rd_cost = best_rdc;
2402 }
2403