1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 /*!\defgroup gf_group_algo Golden Frame Group
13 * \ingroup high_level_algo
14 * Algorithms regarding determining the length of GF groups and defining GF
15 * group structures.
16 * @{
17 */
18 /*! @} - end defgroup gf_group_algo */
19
20 #include <stdint.h>
21
22 #include "av1/encoder/thirdpass.h"
23 #include "config/aom_config.h"
24 #include "config/aom_scale_rtcd.h"
25
26 #include "aom/aom_codec.h"
27 #include "aom/aom_encoder.h"
28
29 #include "av1/common/av1_common_int.h"
30
31 #include "av1/encoder/encoder.h"
32 #include "av1/encoder/firstpass.h"
33 #include "av1/encoder/gop_structure.h"
34 #include "av1/encoder/pass2_strategy.h"
35 #include "av1/encoder/ratectrl.h"
36 #include "av1/encoder/rc_utils.h"
37 #include "av1/encoder/temporal_filter.h"
38 #include "av1/encoder/tpl_model.h"
39 #include "av1/encoder/encode_strategy.h"
40
41 #define DEFAULT_KF_BOOST 2300
42 #define DEFAULT_GF_BOOST 2000
43 #define GROUP_ADAPTIVE_MAXQ 1
44
45 static void init_gf_stats(GF_GROUP_STATS *gf_stats);
46
47 // Calculate an active area of the image that discounts formatting
48 // bars and partially discounts other 0 energy areas.
49 #define MIN_ACTIVE_AREA 0.5
50 #define MAX_ACTIVE_AREA 1.0
calculate_active_area(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame)51 static double calculate_active_area(const FRAME_INFO *frame_info,
52 const FIRSTPASS_STATS *this_frame) {
53 const double active_pct =
54 1.0 -
55 ((this_frame->intra_skip_pct / 2) +
56 ((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows));
57 return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
58 }
59
60 // Calculate a modified Error used in distributing bits between easier and
61 // harder frames.
62 #define ACT_AREA_CORRECTION 0.5
calculate_modified_err_new(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * total_stats,const FIRSTPASS_STATS * this_stats,int vbrbias,double modified_error_min,double modified_error_max)63 static double calculate_modified_err_new(const FRAME_INFO *frame_info,
64 const FIRSTPASS_STATS *total_stats,
65 const FIRSTPASS_STATS *this_stats,
66 int vbrbias, double modified_error_min,
67 double modified_error_max) {
68 if (total_stats == NULL) {
69 return 0;
70 }
71 const double av_weight = total_stats->weight / total_stats->count;
72 const double av_err =
73 (total_stats->coded_error * av_weight) / total_stats->count;
74 double modified_error =
75 av_err * pow(this_stats->coded_error * this_stats->weight /
76 DOUBLE_DIVIDE_CHECK(av_err),
77 vbrbias / 100.0);
78
79 // Correction for active area. Frames with a reduced active area
80 // (eg due to formatting bars) have a higher error per mb for the
81 // remaining active MBs. The correction here assumes that coding
82 // 0.5N blocks of complexity 2X is a little easier than coding N
83 // blocks of complexity X.
84 modified_error *=
85 pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION);
86
87 return fclamp(modified_error, modified_error_min, modified_error_max);
88 }
89
calculate_modified_err(const FRAME_INFO * frame_info,const TWO_PASS * twopass,const AV1EncoderConfig * oxcf,const FIRSTPASS_STATS * this_frame)90 static double calculate_modified_err(const FRAME_INFO *frame_info,
91 const TWO_PASS *twopass,
92 const AV1EncoderConfig *oxcf,
93 const FIRSTPASS_STATS *this_frame) {
94 const FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
95 return calculate_modified_err_new(
96 frame_info, total_stats, this_frame, oxcf->rc_cfg.vbrbias,
97 twopass->modified_error_min, twopass->modified_error_max);
98 }
99
100 // Resets the first pass file to the given position using a relative seek from
101 // the current position.
reset_fpf_position(TWO_PASS_FRAME * p_frame,const FIRSTPASS_STATS * position)102 static void reset_fpf_position(TWO_PASS_FRAME *p_frame,
103 const FIRSTPASS_STATS *position) {
104 p_frame->stats_in = position;
105 }
106
input_stats(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)107 static int input_stats(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
108 FIRSTPASS_STATS *fps) {
109 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
110
111 *fps = *p_frame->stats_in;
112 ++p_frame->stats_in;
113 return 1;
114 }
115
input_stats_lap(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)116 static int input_stats_lap(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
117 FIRSTPASS_STATS *fps) {
118 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
119
120 *fps = *p_frame->stats_in;
121 /* Move old stats[0] out to accommodate for next frame stats */
122 memmove(p->frame_stats_arr[0], p->frame_stats_arr[1],
123 (p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) *
124 sizeof(FIRSTPASS_STATS));
125 p->stats_buf_ctx->stats_in_end--;
126 return 1;
127 }
128
129 // Read frame stats at an offset from the current position.
read_frame_stats(const TWO_PASS * p,const TWO_PASS_FRAME * p_frame,int offset)130 static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p,
131 const TWO_PASS_FRAME *p_frame,
132 int offset) {
133 if ((offset >= 0 &&
134 p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) ||
135 (offset < 0 &&
136 p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) {
137 return NULL;
138 }
139
140 return &p_frame->stats_in[offset];
141 }
142
143 // This function returns the maximum target rate per frame.
frame_max_bits(const RATE_CONTROL * rc,const AV1EncoderConfig * oxcf)144 static int frame_max_bits(const RATE_CONTROL *rc,
145 const AV1EncoderConfig *oxcf) {
146 int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
147 (int64_t)oxcf->rc_cfg.vbrmax_section) /
148 100;
149 if (max_bits < 0)
150 max_bits = 0;
151 else if (max_bits > rc->max_frame_bandwidth)
152 max_bits = rc->max_frame_bandwidth;
153
154 return (int)max_bits;
155 }
156
157 static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75,
158 0.80, 0.85, 0.90,
159 0.95, 0.95, 0.95 };
160 #define ERR_DIVISOR 96.0
calc_correction_factor(double err_per_mb,int q)161 static double calc_correction_factor(double err_per_mb, int q) {
162 const double error_term = err_per_mb / ERR_DIVISOR;
163 const int index = q >> 5;
164 // Adjustment to power term based on qindex
165 const double power_term =
166 q_pow_term[index] +
167 (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0);
168 assert(error_term >= 0.0);
169 return fclamp(pow(error_term, power_term), 0.05, 5.0);
170 }
171
172 // Based on history adjust expectations of bits per macroblock.
twopass_update_bpm_factor(AV1_COMP * cpi,int rate_err_tol)173 static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
174 TWO_PASS *twopass = &cpi->ppi->twopass;
175 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
176
177 // Based on recent history adjust expectations of bits per macroblock.
178 double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0);
179 double rate_err_factor = 1.0;
180 const double adj_limit = AOMMAX(0.20, (double)(100 - rate_err_tol) / 200.0);
181 const double min_fac = 1.0 - adj_limit;
182 const double max_fac = 1.0 + adj_limit;
183 int err_estimate = p_rc->rate_error_estimate;
184
185 if (p_rc->vbr_bits_off_target && p_rc->total_actual_bits > 0) {
186 if (cpi->ppi->lap_enabled) {
187 rate_err_factor =
188 (double)twopass->rolling_arf_group_actual_bits /
189 DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits);
190 } else {
191 rate_err_factor =
192 1.0 - ((double)(p_rc->vbr_bits_off_target) /
193 AOMMAX(p_rc->total_actual_bits, cpi->ppi->twopass.bits_left));
194 }
195
196 rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
197
198 // Adjustment is damped if this is 1 pass with look ahead processing
199 // (as there are only ever a few frames of data) and for all but the first
200 // GOP in normal two pass.
201 if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) {
202 rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac);
203 }
204 }
205
206 // Is the rate control trending in the right direction. Only make
207 // an adjustment if things are getting worse.
208 if ((rate_err_factor < 1.0 && err_estimate > 0) ||
209 (rate_err_factor > 1.0 && err_estimate < 0)) {
210 twopass->bpm_factor *= rate_err_factor;
211 twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
212 }
213 }
214
qbpm_enumerator(int rate_err_tol)215 static int qbpm_enumerator(int rate_err_tol) {
216 return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75);
217 }
218
219 // Similar to find_qindex_by_rate() function in ratectrl.c, but includes
220 // calculation of a correction_factor.
find_qindex_by_rate_with_correction(int desired_bits_per_mb,aom_bit_depth_t bit_depth,double error_per_mb,double group_weight_factor,int rate_err_tol,int best_qindex,int worst_qindex)221 static int find_qindex_by_rate_with_correction(
222 int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb,
223 double group_weight_factor, int rate_err_tol, int best_qindex,
224 int worst_qindex) {
225 assert(best_qindex <= worst_qindex);
226 int low = best_qindex;
227 int high = worst_qindex;
228
229 while (low < high) {
230 const int mid = (low + high) >> 1;
231 const double mid_factor = calc_correction_factor(error_per_mb, mid);
232 const double q = av1_convert_qindex_to_q(mid, bit_depth);
233 const int enumerator = qbpm_enumerator(rate_err_tol);
234 const int mid_bits_per_mb =
235 (int)((enumerator * mid_factor * group_weight_factor) / q);
236
237 if (mid_bits_per_mb > desired_bits_per_mb) {
238 low = mid + 1;
239 } else {
240 high = mid;
241 }
242 }
243 return low;
244 }
245
246 /*!\brief Choose a target maximum Q for a group of frames
247 *
248 * \ingroup rate_control
249 *
250 * This function is used to estimate a suitable maximum Q for a
251 * group of frames. Inititally it is called to get a crude estimate
252 * for the whole clip. It is then called for each ARF/GF group to get
253 * a revised estimate for that group.
254 *
255 * \param[in] cpi Top-level encoder structure
256 * \param[in] av_frame_err The average per frame coded error score
257 * for frames making up this section/group.
258 * \param[in] inactive_zone Used to mask off /ignore part of the
259 * frame. The most common use case is where
260 * a wide format video (e.g. 16:9) is
261 * letter-boxed into a more square format.
262 * Here we want to ignore the bands at the
263 * top and bottom.
264 * \param[in] av_target_bandwidth The target bits per frame
265 *
266 * \return The maximum Q for frames in the group.
267 */
get_twopass_worst_quality(AV1_COMP * cpi,const double av_frame_err,double inactive_zone,int av_target_bandwidth)268 static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
269 double inactive_zone,
270 int av_target_bandwidth) {
271 const RATE_CONTROL *const rc = &cpi->rc;
272 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
273 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
274 inactive_zone = fclamp(inactive_zone, 0.0, 0.9999);
275
276 if (av_target_bandwidth <= 0) {
277 return rc->worst_quality; // Highest value allowed
278 } else {
279 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
280 ? cpi->initial_mbs
281 : cpi->common.mi_params.MBs;
282 const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
283 const double av_err_per_mb = av_frame_err / (1.0 - inactive_zone);
284 const int target_norm_bits_per_mb =
285 (int)((uint64_t)av_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
286 int rate_err_tol = AOMMIN(rc_cfg->under_shoot_pct, rc_cfg->over_shoot_pct);
287
288 // Update bpm correction factor based on previous GOP rate error.
289 twopass_update_bpm_factor(cpi, rate_err_tol);
290
291 // Try and pick a max Q that will be high enough to encode the
292 // content at the given rate.
293 int q = find_qindex_by_rate_with_correction(
294 target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
295 av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol,
296 rc->best_quality, rc->worst_quality);
297
298 // Restriction on active max q for constrained quality mode.
299 if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
300 return q;
301 }
302 }
303
304 #define INTRA_PART 0.005
305 #define DEFAULT_DECAY_LIMIT 0.75
306 #define LOW_SR_DIFF_TRHESH 0.01
307 #define NCOUNT_FRAME_II_THRESH 5.0
308 #define LOW_CODED_ERR_PER_MB 0.01
309
310 /* This function considers how the quality of prediction may be deteriorating
311 * with distance. It comapres the coded error for the last frame and the
312 * second reference frame (usually two frames old) and also applies a factor
313 * based on the extent of INTRA coding.
314 *
315 * The decay factor is then used to reduce the contribution of frames further
316 * from the alt-ref or golden frame, to the bitframe boost calculation for that
317 * alt-ref or golden frame.
318 */
get_sr_decay_rate(const FIRSTPASS_STATS * frame)319 static double get_sr_decay_rate(const FIRSTPASS_STATS *frame) {
320 double sr_diff = (frame->sr_coded_error - frame->coded_error);
321 double sr_decay = 1.0;
322 double modified_pct_inter;
323 double modified_pcnt_intra;
324
325 modified_pct_inter = frame->pcnt_inter;
326 if ((frame->coded_error > LOW_CODED_ERR_PER_MB) &&
327 ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
328 (double)NCOUNT_FRAME_II_THRESH)) {
329 modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral;
330 }
331 modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
332
333 if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
334 double sr_diff_part = ((sr_diff * 0.25) / frame->intra_error);
335 sr_decay = 1.0 - sr_diff_part - (INTRA_PART * modified_pcnt_intra);
336 }
337 return AOMMAX(sr_decay, DEFAULT_DECAY_LIMIT);
338 }
339
340 // This function gives an estimate of how badly we believe the prediction
341 // quality is decaying from frame to frame.
get_zero_motion_factor(const FIRSTPASS_STATS * frame)342 static double get_zero_motion_factor(const FIRSTPASS_STATS *frame) {
343 const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
344 double sr_decay = get_sr_decay_rate(frame);
345 return AOMMIN(sr_decay, zero_motion_pct);
346 }
347
348 #define DEFAULT_ZM_FACTOR 0.5
get_prediction_decay_rate(const FIRSTPASS_STATS * frame_stats)349 static double get_prediction_decay_rate(const FIRSTPASS_STATS *frame_stats) {
350 const double sr_decay_rate = get_sr_decay_rate(frame_stats);
351 double zero_motion_factor =
352 DEFAULT_ZM_FACTOR * (frame_stats->pcnt_inter - frame_stats->pcnt_motion);
353
354 // Clamp value to range 0.0 to 1.0
355 // This should happen anyway if input values are sensibly clamped but checked
356 // here just in case.
357 if (zero_motion_factor > 1.0)
358 zero_motion_factor = 1.0;
359 else if (zero_motion_factor < 0.0)
360 zero_motion_factor = 0.0;
361
362 return AOMMAX(zero_motion_factor,
363 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
364 }
365
366 // Function to test for a condition where a complex transition is followed
367 // by a static section. For example in slide shows where there is a fade
368 // between slides. This is to help with more optimal kf and gf positioning.
detect_transition_to_still(const FIRSTPASS_INFO * firstpass_info,int next_stats_index,const int min_gf_interval,const int frame_interval,const int still_interval,const double loop_decay_rate,const double last_decay_rate)369 static int detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info,
370 int next_stats_index,
371 const int min_gf_interval,
372 const int frame_interval,
373 const int still_interval,
374 const double loop_decay_rate,
375 const double last_decay_rate) {
376 // Break clause to detect very still sections after motion
377 // For example a static image after a fade or other transition
378 // instead of a clean scene cut.
379 if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 &&
380 last_decay_rate < 0.9) {
381 int stats_left =
382 av1_firstpass_info_future_count(firstpass_info, next_stats_index);
383 if (stats_left >= still_interval) {
384 int j;
385 // Look ahead a few frames to see if static condition persists...
386 for (j = 0; j < still_interval; ++j) {
387 const FIRSTPASS_STATS *stats =
388 av1_firstpass_info_peek(firstpass_info, next_stats_index + j);
389 if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
390 }
391 // Only if it does do we signal a transition to still.
392 return j == still_interval;
393 }
394 }
395 return 0;
396 }
397
398 // This function detects a flash through the high relative pcnt_second_ref
399 // score in the frame following a flash frame. The offset passed in should
400 // reflect this.
detect_flash(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const int offset)401 static int detect_flash(const TWO_PASS *twopass,
402 const TWO_PASS_FRAME *twopass_frame, const int offset) {
403 const FIRSTPASS_STATS *const next_frame =
404 read_frame_stats(twopass, twopass_frame, offset);
405
406 // What we are looking for here is a situation where there is a
407 // brief break in prediction (such as a flash) but subsequent frames
408 // are reasonably well predicted by an earlier (pre flash) frame.
409 // The recovery after a flash is indicated by a high pcnt_second_ref
410 // compared to pcnt_inter.
411 return next_frame != NULL &&
412 next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
413 next_frame->pcnt_second_ref >= 0.5;
414 }
415
416 // Update the motion related elements to the GF arf boost calculation.
accumulate_frame_motion_stats(const FIRSTPASS_STATS * stats,GF_GROUP_STATS * gf_stats,double f_w,double f_h)417 static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
418 GF_GROUP_STATS *gf_stats, double f_w,
419 double f_h) {
420 const double pct = stats->pcnt_motion;
421
422 // Accumulate Motion In/Out of frame stats.
423 gf_stats->this_frame_mv_in_out = stats->mv_in_out_count * pct;
424 gf_stats->mv_in_out_accumulator += gf_stats->this_frame_mv_in_out;
425 gf_stats->abs_mv_in_out_accumulator += fabs(gf_stats->this_frame_mv_in_out);
426
427 // Accumulate a measure of how uniform (or conversely how random) the motion
428 // field is (a ratio of abs(mv) / mv).
429 if (pct > 0.05) {
430 const double mvr_ratio =
431 fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
432 const double mvc_ratio =
433 fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
434
435 gf_stats->mv_ratio_accumulator +=
436 pct *
437 (mvr_ratio < stats->mvr_abs * f_h ? mvr_ratio : stats->mvr_abs * f_h);
438 gf_stats->mv_ratio_accumulator +=
439 pct *
440 (mvc_ratio < stats->mvc_abs * f_w ? mvc_ratio : stats->mvc_abs * f_w);
441 }
442 }
443
accumulate_this_frame_stats(const FIRSTPASS_STATS * stats,const double mod_frame_err,GF_GROUP_STATS * gf_stats)444 static void accumulate_this_frame_stats(const FIRSTPASS_STATS *stats,
445 const double mod_frame_err,
446 GF_GROUP_STATS *gf_stats) {
447 gf_stats->gf_group_err += mod_frame_err;
448 #if GROUP_ADAPTIVE_MAXQ
449 gf_stats->gf_group_raw_error += stats->coded_error;
450 #endif
451 gf_stats->gf_group_skip_pct += stats->intra_skip_pct;
452 gf_stats->gf_group_inactive_zone_rows += stats->inactive_zone_rows;
453 }
454
accumulate_next_frame_stats(const FIRSTPASS_STATS * stats,const int flash_detected,const int frames_since_key,const int cur_idx,GF_GROUP_STATS * gf_stats,int f_w,int f_h)455 static void accumulate_next_frame_stats(const FIRSTPASS_STATS *stats,
456 const int flash_detected,
457 const int frames_since_key,
458 const int cur_idx,
459 GF_GROUP_STATS *gf_stats, int f_w,
460 int f_h) {
461 accumulate_frame_motion_stats(stats, gf_stats, f_w, f_h);
462 // sum up the metric values of current gf group
463 gf_stats->avg_sr_coded_error += stats->sr_coded_error;
464 gf_stats->avg_pcnt_second_ref += stats->pcnt_second_ref;
465 gf_stats->avg_new_mv_count += stats->new_mv_count;
466 gf_stats->avg_wavelet_energy += stats->frame_avg_wavelet_energy;
467 if (fabs(stats->raw_error_stdev) > 0.000001) {
468 gf_stats->non_zero_stdev_count++;
469 gf_stats->avg_raw_err_stdev += stats->raw_error_stdev;
470 }
471
472 // Accumulate the effect of prediction quality decay
473 if (!flash_detected) {
474 gf_stats->last_loop_decay_rate = gf_stats->loop_decay_rate;
475 gf_stats->loop_decay_rate = get_prediction_decay_rate(stats);
476
477 gf_stats->decay_accumulator =
478 gf_stats->decay_accumulator * gf_stats->loop_decay_rate;
479
480 // Monitor for static sections.
481 if ((frames_since_key + cur_idx - 1) > 1) {
482 gf_stats->zero_motion_accumulator = AOMMIN(
483 gf_stats->zero_motion_accumulator, get_zero_motion_factor(stats));
484 }
485 }
486 }
487
average_gf_stats(const int total_frame,GF_GROUP_STATS * gf_stats)488 static void average_gf_stats(const int total_frame, GF_GROUP_STATS *gf_stats) {
489 if (total_frame) {
490 gf_stats->avg_sr_coded_error /= total_frame;
491 gf_stats->avg_pcnt_second_ref /= total_frame;
492 gf_stats->avg_new_mv_count /= total_frame;
493 gf_stats->avg_wavelet_energy /= total_frame;
494 }
495
496 if (gf_stats->non_zero_stdev_count)
497 gf_stats->avg_raw_err_stdev /= gf_stats->non_zero_stdev_count;
498 }
499
500 #define BOOST_FACTOR 12.5
baseline_err_per_mb(const FRAME_INFO * frame_info)501 static double baseline_err_per_mb(const FRAME_INFO *frame_info) {
502 unsigned int screen_area = frame_info->frame_height * frame_info->frame_width;
503
504 // Use a different error per mb factor for calculating boost for
505 // different formats.
506 if (screen_area <= 640 * 360) {
507 return 500.0;
508 } else {
509 return 1000.0;
510 }
511 }
512
calc_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double this_frame_mv_in_out,double max_boost)513 static double calc_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
514 const FRAME_INFO *frame_info,
515 const FIRSTPASS_STATS *this_frame,
516 double this_frame_mv_in_out, double max_boost) {
517 double frame_boost;
518 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
519 frame_info->bit_depth);
520 const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
521 const double active_area = calculate_active_area(frame_info, this_frame);
522
523 // Underlying boost factor is based on inter error ratio.
524 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
525 this_frame->intra_error * active_area) /
526 DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
527 frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
528
529 // Increase boost for frames where new data coming into frame (e.g. zoom out).
530 // Slightly reduce boost if there is a net balance of motion out of the frame
531 // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0.
532 if (this_frame_mv_in_out > 0.0)
533 frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
534 // In the extreme case the boost is halved.
535 else
536 frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
537
538 return AOMMIN(frame_boost, max_boost * boost_q_correction);
539 }
540
calc_kf_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double * sr_accumulator,double max_boost)541 static double calc_kf_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
542 const FRAME_INFO *frame_info,
543 const FIRSTPASS_STATS *this_frame,
544 double *sr_accumulator, double max_boost) {
545 double frame_boost;
546 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
547 frame_info->bit_depth);
548 const double boost_q_correction = AOMMIN((0.50 + (lq * 0.015)), 2.00);
549 const double active_area = calculate_active_area(frame_info, this_frame);
550
551 // Underlying boost factor is based on inter error ratio.
552 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
553 this_frame->intra_error * active_area) /
554 DOUBLE_DIVIDE_CHECK(
555 (this_frame->coded_error + *sr_accumulator) * active_area);
556
557 // Update the accumulator for second ref error difference.
558 // This is intended to give an indication of how much the coded error is
559 // increasing over time.
560 *sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error);
561 *sr_accumulator = AOMMAX(0.0, *sr_accumulator);
562
563 // Q correction and scaling
564 // The 40.0 value here is an experimentally derived baseline minimum.
565 // This value is in line with the minimum per frame boost in the alt_ref
566 // boost calculation.
567 frame_boost = ((frame_boost + 40.0) * boost_q_correction);
568
569 return AOMMIN(frame_boost, max_boost * boost_q_correction);
570 }
571
get_projected_gfu_boost(const PRIMARY_RATE_CONTROL * p_rc,int gfu_boost,int frames_to_project,int num_stats_used_for_gfu_boost)572 static int get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc,
573 int gfu_boost, int frames_to_project,
574 int num_stats_used_for_gfu_boost) {
575 /*
576 * If frames_to_project is equal to num_stats_used_for_gfu_boost,
577 * it means that gfu_boost was calculated over frames_to_project to
578 * begin with(ie; all stats required were available), hence return
579 * the original boost.
580 */
581 if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost;
582
583 double min_boost_factor = sqrt(p_rc->baseline_gf_interval);
584 // Get the current tpl factor (number of frames = frames_to_project).
585 double tpl_factor = av1_get_gfu_boost_projection_factor(
586 min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project);
587 // Get the tpl factor when number of frames = num_stats_used_for_prior_boost.
588 double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor(
589 min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost);
590 int projected_gfu_boost =
591 (int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats);
592 return projected_gfu_boost;
593 }
594
595 #define GF_MAX_BOOST 90.0
596 #define GF_MIN_BOOST 50
597 #define MIN_DECAY_FACTOR 0.01
av1_calc_arf_boost(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const PRIMARY_RATE_CONTROL * p_rc,FRAME_INFO * frame_info,int offset,int f_frames,int b_frames,int * num_fpstats_used,int * num_fpstats_required,int project_gfu_boost)598 int av1_calc_arf_boost(const TWO_PASS *twopass,
599 const TWO_PASS_FRAME *twopass_frame,
600 const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info,
601 int offset, int f_frames, int b_frames,
602 int *num_fpstats_used, int *num_fpstats_required,
603 int project_gfu_boost) {
604 int i;
605 GF_GROUP_STATS gf_stats;
606 init_gf_stats(&gf_stats);
607 double boost_score = (double)NORMAL_BOOST;
608 int arf_boost;
609 int flash_detected = 0;
610 if (num_fpstats_used) *num_fpstats_used = 0;
611
612 // Search forward from the proposed arf/next gf position.
613 for (i = 0; i < f_frames; ++i) {
614 const FIRSTPASS_STATS *this_frame =
615 read_frame_stats(twopass, twopass_frame, i + offset);
616 if (this_frame == NULL) break;
617
618 // Update the motion related elements to the boost calculation.
619 accumulate_frame_motion_stats(this_frame, &gf_stats,
620 frame_info->frame_width,
621 frame_info->frame_height);
622
623 // We want to discount the flash frame itself and the recovery
624 // frame that follows as both will have poor scores.
625 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
626 detect_flash(twopass, twopass_frame, i + offset + 1);
627
628 // Accumulate the effect of prediction quality decay.
629 if (!flash_detected) {
630 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
631 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
632 ? MIN_DECAY_FACTOR
633 : gf_stats.decay_accumulator;
634 }
635
636 boost_score +=
637 gf_stats.decay_accumulator *
638 calc_frame_boost(p_rc, frame_info, this_frame,
639 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
640 if (num_fpstats_used) (*num_fpstats_used)++;
641 }
642
643 arf_boost = (int)boost_score;
644
645 // Reset for backward looking loop.
646 boost_score = 0.0;
647 init_gf_stats(&gf_stats);
648 // Search backward towards last gf position.
649 for (i = -1; i >= -b_frames; --i) {
650 const FIRSTPASS_STATS *this_frame =
651 read_frame_stats(twopass, twopass_frame, i + offset);
652 if (this_frame == NULL) break;
653
654 // Update the motion related elements to the boost calculation.
655 accumulate_frame_motion_stats(this_frame, &gf_stats,
656 frame_info->frame_width,
657 frame_info->frame_height);
658
659 // We want to discount the the flash frame itself and the recovery
660 // frame that follows as both will have poor scores.
661 flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
662 detect_flash(twopass, twopass_frame, i + offset + 1);
663
664 // Cumulative effect of prediction quality decay.
665 if (!flash_detected) {
666 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
667 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
668 ? MIN_DECAY_FACTOR
669 : gf_stats.decay_accumulator;
670 }
671
672 boost_score +=
673 gf_stats.decay_accumulator *
674 calc_frame_boost(p_rc, frame_info, this_frame,
675 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
676 if (num_fpstats_used) (*num_fpstats_used)++;
677 }
678 arf_boost += (int)boost_score;
679
680 if (project_gfu_boost) {
681 assert(num_fpstats_required != NULL);
682 assert(num_fpstats_used != NULL);
683 *num_fpstats_required = f_frames + b_frames;
684 arf_boost = get_projected_gfu_boost(p_rc, arf_boost, *num_fpstats_required,
685 *num_fpstats_used);
686 }
687
688 if (arf_boost < ((b_frames + f_frames) * GF_MIN_BOOST))
689 arf_boost = ((b_frames + f_frames) * GF_MIN_BOOST);
690
691 return arf_boost;
692 }
693
694 // Calculate a section intra ratio used in setting max loop filter.
calculate_section_intra_ratio(const FIRSTPASS_STATS * begin,const FIRSTPASS_STATS * end,int section_length)695 static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
696 const FIRSTPASS_STATS *end,
697 int section_length) {
698 const FIRSTPASS_STATS *s = begin;
699 double intra_error = 0.0;
700 double coded_error = 0.0;
701 int i = 0;
702
703 while (s < end && i < section_length) {
704 intra_error += s->intra_error;
705 coded_error += s->coded_error;
706 ++s;
707 ++i;
708 }
709
710 return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error));
711 }
712
713 /*!\brief Calculates the bit target for this GF/ARF group
714 *
715 * \ingroup rate_control
716 *
717 * Calculates the total bits to allocate in this GF/ARF group.
718 *
719 * \param[in] cpi Top-level encoder structure
720 * \param[in] gf_group_err Cumulative coded error score for the
721 * frames making up this group.
722 *
723 * \return The target total number of bits for this GF/ARF group.
724 */
calculate_total_gf_group_bits(AV1_COMP * cpi,double gf_group_err)725 static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
726 double gf_group_err) {
727 const RATE_CONTROL *const rc = &cpi->rc;
728 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
729 const TWO_PASS *const twopass = &cpi->ppi->twopass;
730 const int max_bits = frame_max_bits(rc, &cpi->oxcf);
731 int64_t total_group_bits;
732
733 // Calculate the bits to be allocated to the group as a whole.
734 if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
735 total_group_bits = (int64_t)(twopass->kf_group_bits *
736 (gf_group_err / twopass->kf_group_error_left));
737 } else {
738 total_group_bits = 0;
739 }
740
741 // Clamp odd edge cases.
742 total_group_bits = (total_group_bits < 0)
743 ? 0
744 : (total_group_bits > twopass->kf_group_bits)
745 ? twopass->kf_group_bits
746 : total_group_bits;
747
748 // Clip based on user supplied data rate variability limit.
749 if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval)
750 total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval;
751
752 return total_group_bits;
753 }
754
755 // Calculate the number of bits to assign to boosted frames in a group.
calculate_boost_bits(int frame_count,int boost,int64_t total_group_bits)756 static int calculate_boost_bits(int frame_count, int boost,
757 int64_t total_group_bits) {
758 int allocation_chunks;
759
760 // return 0 for invalid inputs (could arise e.g. through rounding errors)
761 if (!boost || (total_group_bits <= 0)) return 0;
762
763 if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX));
764
765 allocation_chunks = (frame_count * 100) + boost;
766
767 // Prevent overflow.
768 if (boost > 1023) {
769 int divisor = boost >> 10;
770 boost /= divisor;
771 allocation_chunks /= divisor;
772 }
773
774 // Calculate the number of extra bits for use in the boosted frame or frames.
775 return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
776 0);
777 }
778
779 // Calculate the boost factor based on the number of bits assigned, i.e. the
780 // inverse of calculate_boost_bits().
calculate_boost_factor(int frame_count,int bits,int64_t total_group_bits)781 static int calculate_boost_factor(int frame_count, int bits,
782 int64_t total_group_bits) {
783 return (int)(100.0 * frame_count * bits / (total_group_bits - bits));
784 }
785
786 // Reduce the number of bits assigned to keyframe or arf if necessary, to
787 // prevent bitrate spikes that may break level constraints.
788 // frame_type: 0: keyframe; 1: arf.
adjust_boost_bits_for_target_level(const AV1_COMP * const cpi,RATE_CONTROL * const rc,int bits_assigned,int64_t group_bits,int frame_type)789 static int adjust_boost_bits_for_target_level(const AV1_COMP *const cpi,
790 RATE_CONTROL *const rc,
791 int bits_assigned,
792 int64_t group_bits,
793 int frame_type) {
794 const AV1_COMMON *const cm = &cpi->common;
795 const SequenceHeader *const seq_params = cm->seq_params;
796 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
797 const int temporal_layer_id = cm->temporal_layer_id;
798 const int spatial_layer_id = cm->spatial_layer_id;
799 for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1;
800 ++index) {
801 if (!is_in_operating_point(seq_params->operating_point_idc[index],
802 temporal_layer_id, spatial_layer_id)) {
803 continue;
804 }
805
806 const AV1_LEVEL target_level =
807 cpi->ppi->level_params.target_seq_level_idx[index];
808 if (target_level >= SEQ_LEVELS) continue;
809
810 assert(is_valid_seq_level_idx(target_level));
811
812 const double level_bitrate_limit = av1_get_max_bitrate_for_level(
813 target_level, seq_params->tier[0], seq_params->profile);
814 const int target_bits_per_frame =
815 (int)(level_bitrate_limit / cpi->framerate);
816 if (frame_type == 0) {
817 // Maximum bits for keyframe is 8 times the target_bits_per_frame.
818 const int level_enforced_max_kf_bits = target_bits_per_frame * 8;
819 if (bits_assigned > level_enforced_max_kf_bits) {
820 const int frames = rc->frames_to_key - 1;
821 p_rc->kf_boost = calculate_boost_factor(
822 frames, level_enforced_max_kf_bits, group_bits);
823 bits_assigned =
824 calculate_boost_bits(frames, p_rc->kf_boost, group_bits);
825 }
826 } else if (frame_type == 1) {
827 // Maximum bits for arf is 4 times the target_bits_per_frame.
828 const int level_enforced_max_arf_bits = target_bits_per_frame * 4;
829 if (bits_assigned > level_enforced_max_arf_bits) {
830 p_rc->gfu_boost =
831 calculate_boost_factor(p_rc->baseline_gf_interval,
832 level_enforced_max_arf_bits, group_bits);
833 bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval,
834 p_rc->gfu_boost, group_bits);
835 }
836 } else {
837 assert(0);
838 }
839 }
840
841 return bits_assigned;
842 }
843
844 // Allocate bits to each frame in a GF / ARF group
845 double layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0, 0.70, 0.55, 0.60,
846 0.60, 1.0, 1.0 };
allocate_gf_group_bits(GF_GROUP * gf_group,PRIMARY_RATE_CONTROL * const p_rc,RATE_CONTROL * const rc,int64_t gf_group_bits,int gf_arf_bits,int key_frame,int use_arf)847 static void allocate_gf_group_bits(GF_GROUP *gf_group,
848 PRIMARY_RATE_CONTROL *const p_rc,
849 RATE_CONTROL *const rc,
850 int64_t gf_group_bits, int gf_arf_bits,
851 int key_frame, int use_arf) {
852 int64_t total_group_bits = gf_group_bits;
853 int base_frame_bits;
854 const int gf_group_size = gf_group->size;
855 int layer_frames[MAX_ARF_LAYERS + 1] = { 0 };
856
857 // For key frames the frame target rate is already set and it
858 // is also the golden frame.
859 // === [frame_index == 0] ===
860 int frame_index = !!key_frame;
861
862 // Subtract the extra bits set aside for ARF frames from the Group Total
863 if (use_arf) total_group_bits -= gf_arf_bits;
864
865 int num_frames =
866 AOMMAX(1, p_rc->baseline_gf_interval - (rc->frames_since_key == 0));
867 base_frame_bits = (int)(total_group_bits / num_frames);
868
869 // Check the number of frames in each layer in case we have a
870 // non standard group length.
871 int max_arf_layer = gf_group->max_layer_depth - 1;
872 for (int idx = frame_index; idx < gf_group_size; ++idx) {
873 if ((gf_group->update_type[idx] == ARF_UPDATE) ||
874 (gf_group->update_type[idx] == INTNL_ARF_UPDATE)) {
875 layer_frames[gf_group->layer_depth[idx]]++;
876 }
877 }
878
879 // Allocate extra bits to each ARF layer
880 int i;
881 int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 };
882 for (i = 1; i <= max_arf_layer; ++i) {
883 double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i];
884 layer_extra_bits[i] =
885 (int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i]));
886 gf_arf_bits -= (int)(gf_arf_bits * fraction);
887 }
888
889 // Now combine ARF layer and baseline bits to give total bits for each frame.
890 int arf_extra_bits;
891 for (int idx = frame_index; idx < gf_group_size; ++idx) {
892 switch (gf_group->update_type[idx]) {
893 case ARF_UPDATE:
894 case INTNL_ARF_UPDATE:
895 arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]];
896 gf_group->bit_allocation[idx] = base_frame_bits + arf_extra_bits;
897 break;
898 case INTNL_OVERLAY_UPDATE:
899 case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break;
900 default: gf_group->bit_allocation[idx] = base_frame_bits; break;
901 }
902 }
903
904 // Set the frame following the current GOP to 0 bit allocation. For ARF
905 // groups, this next frame will be overlay frame, which is the first frame
906 // in the next GOP. For GF group, next GOP will overwrite the rate allocation.
907 // Setting this frame to use 0 bit (of out the current GOP budget) will
908 // simplify logics in reference frame management.
909 if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH)
910 gf_group->bit_allocation[gf_group_size] = 0;
911 }
912
913 // Returns true if KF group and GF group both are almost completely static.
is_almost_static(double gf_zero_motion,int kf_zero_motion,int is_lap_enabled)914 static INLINE int is_almost_static(double gf_zero_motion, int kf_zero_motion,
915 int is_lap_enabled) {
916 if (is_lap_enabled) {
917 /*
918 * when LAP enabled kf_zero_motion is not reliable, so use strict
919 * constraint on gf_zero_motion.
920 */
921 return (gf_zero_motion >= 0.999);
922 } else {
923 return (gf_zero_motion >= 0.995) &&
924 (kf_zero_motion >= STATIC_KF_GROUP_THRESH);
925 }
926 }
927
928 #define ARF_ABS_ZOOM_THRESH 4.4
detect_gf_cut(AV1_COMP * cpi,int frame_index,int cur_start,int flash_detected,int active_max_gf_interval,int active_min_gf_interval,GF_GROUP_STATS * gf_stats)929 static INLINE int detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start,
930 int flash_detected, int active_max_gf_interval,
931 int active_min_gf_interval,
932 GF_GROUP_STATS *gf_stats) {
933 RATE_CONTROL *const rc = &cpi->rc;
934 TWO_PASS *const twopass = &cpi->ppi->twopass;
935 InitialDimensions *const initial_dimensions = &cpi->initial_dimensions;
936 // Motion breakout threshold for loop below depends on image size.
937 const double mv_ratio_accumulator_thresh =
938 (initial_dimensions->height + initial_dimensions->width) / 4.0;
939
940 if (!flash_detected) {
941 // Break clause to detect very still sections after motion. For example,
942 // a static image after a fade or other transition.
943
944 // TODO(angiebird): This is a temporary change, we will avoid using
945 // twopass_frame.stats_in in the follow-up CL
946 int index = (int)(cpi->twopass_frame.stats_in -
947 twopass->stats_buf_ctx->stats_in_start);
948 if (detect_transition_to_still(&twopass->firstpass_info, index,
949 rc->min_gf_interval, frame_index - cur_start,
950 5, gf_stats->loop_decay_rate,
951 gf_stats->last_loop_decay_rate)) {
952 return 1;
953 }
954 }
955
956 // Some conditions to breakout after min interval.
957 if (frame_index - cur_start >= active_min_gf_interval &&
958 // If possible don't break very close to a kf
959 (rc->frames_to_key - frame_index >= rc->min_gf_interval) &&
960 ((frame_index - cur_start) & 0x01) && !flash_detected &&
961 (gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh ||
962 gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) {
963 return 1;
964 }
965
966 // If almost totally static, we will not use the the max GF length later,
967 // so we can continue for more frames.
968 if (((frame_index - cur_start) >= active_max_gf_interval + 1) &&
969 !is_almost_static(gf_stats->zero_motion_accumulator,
970 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) {
971 return 1;
972 }
973 return 0;
974 }
975
is_shorter_gf_interval_better(AV1_COMP * cpi,EncodeFrameParams * frame_params,const EncodeFrameInput * frame_input)976 static int is_shorter_gf_interval_better(AV1_COMP *cpi,
977 EncodeFrameParams *frame_params,
978 const EncodeFrameInput *frame_input) {
979 RATE_CONTROL *const rc = &cpi->rc;
980 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
981 int gop_length_decision_method = cpi->sf.tpl_sf.gop_length_decision_method;
982 int shorten_gf_interval;
983
984 av1_tpl_preload_rc_estimate(cpi, frame_params);
985
986 if (gop_length_decision_method == 2) {
987 // GF group length is decided based on GF boost and tpl stats of ARFs from
988 // base layer, (base+1) layer.
989 shorten_gf_interval =
990 (p_rc->gfu_boost <
991 p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) &&
992 !av1_tpl_setup_stats(cpi, 3, frame_params, frame_input);
993 } else {
994 int do_complete_tpl = 1;
995 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
996 int is_temporal_filter_enabled =
997 (rc->frames_since_key > 0 && gf_group->arf_index > -1);
998
999 if (is_temporal_filter_enabled) {
1000 int arf_src_index = gf_group->arf_src_offset[gf_group->arf_index];
1001 FRAME_UPDATE_TYPE arf_update_type =
1002 gf_group->update_type[gf_group->arf_index];
1003 int is_forward_keyframe = 0;
1004 av1_temporal_filter(cpi, arf_src_index, arf_update_type,
1005 is_forward_keyframe, NULL);
1006 aom_extend_frame_borders(&cpi->ppi->alt_ref_buffer,
1007 av1_num_planes(&cpi->common));
1008 }
1009
1010 if (gop_length_decision_method == 1) {
1011 // Check if tpl stats of ARFs from base layer, (base+1) layer,
1012 // (base+2) layer can decide the GF group length.
1013 int gop_length_eval =
1014 av1_tpl_setup_stats(cpi, 2, frame_params, frame_input);
1015
1016 if (gop_length_eval != 2) {
1017 do_complete_tpl = 0;
1018 shorten_gf_interval = !gop_length_eval;
1019 }
1020 }
1021
1022 if (do_complete_tpl) {
1023 // Decide GF group length based on complete tpl stats.
1024 shorten_gf_interval =
1025 !av1_tpl_setup_stats(cpi, 1, frame_params, frame_input);
1026 // Tpl stats is reused when the ARF is temporally filtered and GF
1027 // interval is not shortened.
1028 if (is_temporal_filter_enabled && !shorten_gf_interval) {
1029 cpi->skip_tpl_setup_stats = 1;
1030 #if CONFIG_BITRATE_ACCURACY
1031 av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data,
1032 gf_group, cpi->gf_frame_index,
1033 cpi->common.seq_params->bit_depth);
1034 #endif // CONFIG_BITRATE_ACCURACY
1035 }
1036 }
1037 }
1038 return shorten_gf_interval;
1039 }
1040
1041 #define MIN_SHRINK_LEN 6 // the minimum length of gf if we are shrinking
1042 #define SMOOTH_FILT_LEN 7
1043 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2)
1044 #define WINDOW_SIZE 7
1045 #define HALF_WIN (WINDOW_SIZE / 2)
1046 // A 7-tap gaussian smooth filter
1047 const double smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242, 0.383,
1048 0.242, 0.061, 0.006 };
1049
1050 // Smooth filter intra_error and coded_error in firstpass stats.
1051 // If stats[i].is_flash==1, the ith element should not be used in the filtering.
smooth_filter_stats(const FIRSTPASS_STATS * stats,int start_idx,int last_idx,double * filt_intra_err,double * filt_coded_err)1052 static void smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx,
1053 int last_idx, double *filt_intra_err,
1054 double *filt_coded_err) {
1055 int i, j;
1056 for (i = start_idx; i <= last_idx; i++) {
1057 double total_wt = 0;
1058 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1059 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1060 if (stats[idx].is_flash) continue;
1061
1062 filt_intra_err[i] +=
1063 smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error;
1064 total_wt += smooth_filt[j + HALF_FILT_LEN];
1065 }
1066 if (total_wt > 0.01) {
1067 filt_intra_err[i] /= total_wt;
1068 } else {
1069 filt_intra_err[i] = stats[i].intra_error;
1070 }
1071 }
1072 for (i = start_idx; i <= last_idx; i++) {
1073 double total_wt = 0;
1074 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1075 int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1076 // Coded error involves idx and idx - 1.
1077 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1078
1079 filt_coded_err[i] +=
1080 smooth_filt[j + HALF_FILT_LEN] * stats[idx].coded_error;
1081 total_wt += smooth_filt[j + HALF_FILT_LEN];
1082 }
1083 if (total_wt > 0.01) {
1084 filt_coded_err[i] /= total_wt;
1085 } else {
1086 filt_coded_err[i] = stats[i].coded_error;
1087 }
1088 }
1089 }
1090
1091 // Calculate gradient
get_gradient(const double * values,int start,int last,double * grad)1092 static void get_gradient(const double *values, int start, int last,
1093 double *grad) {
1094 if (start == last) {
1095 grad[start] = 0;
1096 return;
1097 }
1098 for (int i = start; i <= last; i++) {
1099 int prev = AOMMAX(i - 1, start);
1100 int next = AOMMIN(i + 1, last);
1101 grad[i] = (values[next] - values[prev]) / (next - prev);
1102 }
1103 }
1104
find_next_scenecut(const FIRSTPASS_STATS * const stats_start,int first,int last)1105 static int find_next_scenecut(const FIRSTPASS_STATS *const stats_start,
1106 int first, int last) {
1107 // Identify unstable areas caused by scenecuts.
1108 // Find the max and 2nd max coded error, and the average of the rest frames.
1109 // If there is only one frame that yields a huge coded error, it is likely a
1110 // scenecut.
1111 double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded,
1112 max_next_coded;
1113
1114 if (last - first == 0) return -1;
1115
1116 for (int i = first; i <= last; i++) {
1117 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1118 continue;
1119 double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01);
1120 this_ratio = stats_start[i].coded_error / temp_intra;
1121 // find the avg ratio in the preceding neighborhood
1122 max_prev_ratio = 0;
1123 max_prev_coded = 0;
1124 for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) {
1125 if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash))
1126 continue;
1127 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1128 double temp_ratio = stats_start[j].coded_error / temp_intra;
1129 if (temp_ratio > max_prev_ratio) {
1130 max_prev_ratio = temp_ratio;
1131 }
1132 if (stats_start[j].coded_error > max_prev_coded) {
1133 max_prev_coded = stats_start[j].coded_error;
1134 }
1135 }
1136 // find the avg ratio in the following neighborhood
1137 max_next_ratio = 0;
1138 max_next_coded = 0;
1139 for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) {
1140 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1141 continue;
1142 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1143 double temp_ratio = stats_start[j].coded_error / temp_intra;
1144 if (temp_ratio > max_next_ratio) {
1145 max_next_ratio = temp_ratio;
1146 }
1147 if (stats_start[j].coded_error > max_next_coded) {
1148 max_next_coded = stats_start[j].coded_error;
1149 }
1150 }
1151
1152 if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) {
1153 // the ratios are very small, only check a small fixed threshold
1154 if (this_ratio < 0.02) continue;
1155 } else {
1156 // check if this frame has a larger ratio than the neighborhood
1157 double max_sr = stats_start[i].sr_coded_error;
1158 if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error);
1159 double max_sr_fr_ratio =
1160 max_sr / AOMMAX(stats_start[i].coded_error, 0.01);
1161
1162 if (max_sr_fr_ratio > 1.2) continue;
1163 if (this_ratio < 2 * AOMMAX(max_prev_ratio, max_next_ratio) &&
1164 stats_start[i].coded_error <
1165 2 * AOMMAX(max_prev_coded, max_next_coded)) {
1166 continue;
1167 }
1168 }
1169 return i;
1170 }
1171 return -1;
1172 }
1173
1174 // Remove the region with index next_region.
1175 // parameter merge: 0: merge with previous; 1: merge with next; 2:
1176 // merge with both, take type from previous if possible
1177 // After removing, next_region will be the index of the next region.
remove_region(int merge,REGIONS * regions,int * num_regions,int * next_region)1178 static void remove_region(int merge, REGIONS *regions, int *num_regions,
1179 int *next_region) {
1180 int k = *next_region;
1181 assert(k < *num_regions);
1182 if (*num_regions == 1) {
1183 *num_regions = 0;
1184 return;
1185 }
1186 if (k == 0) {
1187 merge = 1;
1188 } else if (k == *num_regions - 1) {
1189 merge = 0;
1190 }
1191 int num_merge = (merge == 2) ? 2 : 1;
1192 switch (merge) {
1193 case 0:
1194 regions[k - 1].last = regions[k].last;
1195 *next_region = k;
1196 break;
1197 case 1:
1198 regions[k + 1].start = regions[k].start;
1199 *next_region = k + 1;
1200 break;
1201 case 2:
1202 regions[k - 1].last = regions[k + 1].last;
1203 *next_region = k;
1204 break;
1205 default: assert(0);
1206 }
1207 *num_regions -= num_merge;
1208 for (k = *next_region - (merge == 1); k < *num_regions; k++) {
1209 regions[k] = regions[k + num_merge];
1210 }
1211 }
1212
1213 // Insert a region in the cur_region_idx. The start and last should both be in
1214 // the current region. After insertion, the cur_region_idx will point to the
1215 // last region that was splitted from the original region.
insert_region(int start,int last,REGION_TYPES type,REGIONS * regions,int * num_regions,int * cur_region_idx)1216 static void insert_region(int start, int last, REGION_TYPES type,
1217 REGIONS *regions, int *num_regions,
1218 int *cur_region_idx) {
1219 int k = *cur_region_idx;
1220 REGION_TYPES this_region_type = regions[k].type;
1221 int this_region_last = regions[k].last;
1222 int num_add = (start != regions[k].start) + (last != regions[k].last);
1223 // move the following regions further to the back
1224 for (int r = *num_regions - 1; r > k; r--) {
1225 regions[r + num_add] = regions[r];
1226 }
1227 *num_regions += num_add;
1228 if (start > regions[k].start) {
1229 regions[k].last = start - 1;
1230 k++;
1231 regions[k].start = start;
1232 }
1233 regions[k].type = type;
1234 if (last < this_region_last) {
1235 regions[k].last = last;
1236 k++;
1237 regions[k].start = last + 1;
1238 regions[k].last = this_region_last;
1239 regions[k].type = this_region_type;
1240 } else {
1241 regions[k].last = this_region_last;
1242 }
1243 *cur_region_idx = k;
1244 }
1245
1246 // Get the average of stats inside a region.
analyze_region(const FIRSTPASS_STATS * stats,int k,REGIONS * regions)1247 static void analyze_region(const FIRSTPASS_STATS *stats, int k,
1248 REGIONS *regions) {
1249 int i;
1250 regions[k].avg_cor_coeff = 0;
1251 regions[k].avg_sr_fr_ratio = 0;
1252 regions[k].avg_intra_err = 0;
1253 regions[k].avg_coded_err = 0;
1254
1255 int check_first_sr = (k != 0);
1256
1257 for (i = regions[k].start; i <= regions[k].last; i++) {
1258 if (i > regions[k].start || check_first_sr) {
1259 double num_frames =
1260 (double)(regions[k].last - regions[k].start + check_first_sr);
1261 double max_coded_error =
1262 AOMMAX(stats[i].coded_error, stats[i - 1].coded_error);
1263 double this_ratio =
1264 stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001);
1265 regions[k].avg_sr_fr_ratio += this_ratio / num_frames;
1266 }
1267
1268 regions[k].avg_intra_err +=
1269 stats[i].intra_error / (double)(regions[k].last - regions[k].start + 1);
1270 regions[k].avg_coded_err +=
1271 stats[i].coded_error / (double)(regions[k].last - regions[k].start + 1);
1272
1273 regions[k].avg_cor_coeff +=
1274 AOMMAX(stats[i].cor_coeff, 0.001) /
1275 (double)(regions[k].last - regions[k].start + 1);
1276 regions[k].avg_noise_var +=
1277 AOMMAX(stats[i].noise_var, 0.001) /
1278 (double)(regions[k].last - regions[k].start + 1);
1279 }
1280 }
1281
1282 // Calculate the regions stats of every region.
get_region_stats(const FIRSTPASS_STATS * stats,REGIONS * regions,int num_regions)1283 static void get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions,
1284 int num_regions) {
1285 for (int k = 0; k < num_regions; k++) {
1286 analyze_region(stats, k, regions);
1287 }
1288 }
1289
1290 // Find tentative stable regions
find_stable_regions(const FIRSTPASS_STATS * stats,const double * grad_coded,int this_start,int this_last,REGIONS * regions)1291 static int find_stable_regions(const FIRSTPASS_STATS *stats,
1292 const double *grad_coded, int this_start,
1293 int this_last, REGIONS *regions) {
1294 int i, j, k = 0;
1295 regions[k].start = this_start;
1296 for (i = this_start; i <= this_last; i++) {
1297 // Check mean and variance of stats in a window
1298 double mean_intra = 0.001, var_intra = 0.001;
1299 double mean_coded = 0.001, var_coded = 0.001;
1300 int count = 0;
1301 for (j = -HALF_WIN; j <= HALF_WIN; j++) {
1302 int idx = AOMMIN(AOMMAX(i + j, this_start), this_last);
1303 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1304 mean_intra += stats[idx].intra_error;
1305 var_intra += stats[idx].intra_error * stats[idx].intra_error;
1306 mean_coded += stats[idx].coded_error;
1307 var_coded += stats[idx].coded_error * stats[idx].coded_error;
1308 count++;
1309 }
1310
1311 REGION_TYPES cur_type;
1312 if (count > 0) {
1313 mean_intra /= (double)count;
1314 var_intra /= (double)count;
1315 mean_coded /= (double)count;
1316 var_coded /= (double)count;
1317 int is_intra_stable = (var_intra / (mean_intra * mean_intra) < 1.03);
1318 int is_coded_stable = (var_coded / (mean_coded * mean_coded) < 1.04 &&
1319 fabs(grad_coded[i]) / mean_coded < 0.05) ||
1320 mean_coded / mean_intra < 0.05;
1321 int is_coded_small = mean_coded < 0.5 * mean_intra;
1322 cur_type = (is_intra_stable && is_coded_stable && is_coded_small)
1323 ? STABLE_REGION
1324 : HIGH_VAR_REGION;
1325 } else {
1326 cur_type = HIGH_VAR_REGION;
1327 }
1328
1329 // mark a new region if type changes
1330 if (i == regions[k].start) {
1331 // first frame in the region
1332 regions[k].type = cur_type;
1333 } else if (cur_type != regions[k].type) {
1334 // Append a new region
1335 regions[k].last = i - 1;
1336 regions[k + 1].start = i;
1337 regions[k + 1].type = cur_type;
1338 k++;
1339 }
1340 }
1341 regions[k].last = this_last;
1342 return k + 1;
1343 }
1344
1345 // Clean up regions that should be removed or merged.
cleanup_regions(REGIONS * regions,int * num_regions)1346 static void cleanup_regions(REGIONS *regions, int *num_regions) {
1347 int k = 0;
1348 while (k < *num_regions) {
1349 if ((k > 0 && regions[k - 1].type == regions[k].type &&
1350 regions[k].type != SCENECUT_REGION) ||
1351 regions[k].last < regions[k].start) {
1352 remove_region(0, regions, num_regions, &k);
1353 } else {
1354 k++;
1355 }
1356 }
1357 }
1358
1359 // Remove regions that are of type and shorter than length.
1360 // Merge it with its neighboring regions.
remove_short_regions(REGIONS * regions,int * num_regions,REGION_TYPES type,int length)1361 static void remove_short_regions(REGIONS *regions, int *num_regions,
1362 REGION_TYPES type, int length) {
1363 int k = 0;
1364 while (k < *num_regions && (*num_regions) > 1) {
1365 if ((regions[k].last - regions[k].start + 1 < length &&
1366 regions[k].type == type)) {
1367 // merge current region with the previous and next regions
1368 remove_region(2, regions, num_regions, &k);
1369 } else {
1370 k++;
1371 }
1372 }
1373 cleanup_regions(regions, num_regions);
1374 }
1375
adjust_unstable_region_bounds(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1376 static void adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats,
1377 REGIONS *regions, int *num_regions) {
1378 int i, j, k;
1379 // Remove regions that are too short. Likely noise.
1380 remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN);
1381 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1382
1383 get_region_stats(stats, regions, *num_regions);
1384
1385 // Adjust region boundaries. The thresholds are empirically obtained, but
1386 // overall the performance is not very sensitive to small changes to them.
1387 for (k = 0; k < *num_regions; k++) {
1388 if (regions[k].type == STABLE_REGION) continue;
1389 if (k > 0) {
1390 // Adjust previous boundary.
1391 // First find the average intra/coded error in the previous
1392 // neighborhood.
1393 double avg_intra_err = 0;
1394 const int starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1,
1395 regions[k - 1].start + 1);
1396 const int lasti = regions[k - 1].last;
1397 int counti = 0;
1398 for (i = starti; i <= lasti; i++) {
1399 avg_intra_err += stats[i].intra_error;
1400 counti++;
1401 }
1402 if (counti > 0) {
1403 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1404 int count_coded = 0, count_grad = 0;
1405 for (j = lasti + 1; j <= regions[k].last; j++) {
1406 const int intra_close =
1407 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1408 const int coded_small = stats[j].coded_error / avg_intra_err < 0.1;
1409 const int coeff_close = stats[j].cor_coeff > 0.995;
1410 if (!coeff_close || !coded_small) count_coded--;
1411 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1412 // this frame probably belongs to the previous stable region
1413 regions[k - 1].last = j;
1414 regions[k].start = j + 1;
1415 } else {
1416 break;
1417 }
1418 }
1419 }
1420 } // if k > 0
1421 if (k < *num_regions - 1) {
1422 // Adjust next boundary.
1423 // First find the average intra/coded error in the next neighborhood.
1424 double avg_intra_err = 0;
1425 const int starti = regions[k + 1].start;
1426 const int lasti = AOMMIN(regions[k + 1].last - 1,
1427 regions[k + 1].start + WINDOW_SIZE - 1);
1428 int counti = 0;
1429 for (i = starti; i <= lasti; i++) {
1430 avg_intra_err += stats[i].intra_error;
1431 counti++;
1432 }
1433 if (counti > 0) {
1434 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1435 // At the boundary, coded error is large, but still the frame is stable
1436 int count_coded = 1, count_grad = 1;
1437 for (j = starti - 1; j >= regions[k].start; j--) {
1438 const int intra_close =
1439 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1440 const int coded_small =
1441 stats[j + 1].coded_error / avg_intra_err < 0.1;
1442 const int coeff_close = stats[j].cor_coeff > 0.995;
1443 if (!coeff_close || !coded_small) count_coded--;
1444 if (intra_close && count_coded >= 0 && count_grad >= 0) {
1445 // this frame probably belongs to the next stable region
1446 regions[k + 1].start = j;
1447 regions[k].last = j - 1;
1448 } else {
1449 break;
1450 }
1451 }
1452 }
1453 } // if k < *num_regions - 1
1454 } // end of loop over all regions
1455
1456 cleanup_regions(regions, num_regions);
1457 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1458 get_region_stats(stats, regions, *num_regions);
1459
1460 // If a stable regions has higher error than neighboring high var regions,
1461 // or if the stable region has a lower average correlation,
1462 // then it should be merged with them
1463 k = 0;
1464 while (k < *num_regions && (*num_regions) > 1) {
1465 if (regions[k].type == STABLE_REGION &&
1466 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1467 ((k > 0 && // previous regions
1468 (regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 ||
1469 regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) &&
1470 (k < *num_regions - 1 && // next region
1471 (regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 ||
1472 regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) {
1473 // merge current region with the previous and next regions
1474 remove_region(2, regions, num_regions, &k);
1475 analyze_region(stats, k - 1, regions);
1476 } else if (regions[k].type == HIGH_VAR_REGION &&
1477 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1478 ((k > 0 && // previous regions
1479 (regions[k].avg_coded_err <
1480 regions[k - 1].avg_coded_err * 0.99 ||
1481 regions[k].avg_cor_coeff >
1482 regions[k - 1].avg_cor_coeff * 1.001)) &&
1483 (k < *num_regions - 1 && // next region
1484 (regions[k].avg_coded_err <
1485 regions[k + 1].avg_coded_err * 0.99 ||
1486 regions[k].avg_cor_coeff >
1487 regions[k + 1].avg_cor_coeff * 1.001)))) {
1488 // merge current region with the previous and next regions
1489 remove_region(2, regions, num_regions, &k);
1490 analyze_region(stats, k - 1, regions);
1491 } else {
1492 k++;
1493 }
1494 }
1495
1496 remove_short_regions(regions, num_regions, STABLE_REGION, WINDOW_SIZE);
1497 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1498 }
1499
1500 // Identify blending regions.
find_blending_regions(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1501 static void find_blending_regions(const FIRSTPASS_STATS *stats,
1502 REGIONS *regions, int *num_regions) {
1503 int i, k = 0;
1504 // Blending regions will have large content change, therefore will have a
1505 // large consistent change in intra error.
1506 int count_stable = 0;
1507 while (k < *num_regions) {
1508 if (regions[k].type == STABLE_REGION) {
1509 k++;
1510 count_stable++;
1511 continue;
1512 }
1513 int dir = 0;
1514 int start = 0, last;
1515 for (i = regions[k].start; i <= regions[k].last; i++) {
1516 // First mark the regions that has consistent large change of intra error.
1517 if (k == 0 && i == regions[k].start) continue;
1518 if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue;
1519 double grad = stats[i].intra_error - stats[i - 1].intra_error;
1520 int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05;
1521 int this_dir = 0;
1522 if (large_change) {
1523 this_dir = (grad > 0) ? 1 : -1;
1524 }
1525 // the current trend continues
1526 if (dir == this_dir) continue;
1527 if (dir != 0) {
1528 // Mark the end of a new large change group and add it
1529 last = i - 1;
1530 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1531 }
1532 dir = this_dir;
1533 if (k == 0 && i == regions[k].start + 1) {
1534 start = i - 1;
1535 } else {
1536 start = i;
1537 }
1538 }
1539 if (dir != 0) {
1540 last = regions[k].last;
1541 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1542 }
1543 k++;
1544 }
1545
1546 // If the blending region has very low correlation, mark it as high variance
1547 // since we probably cannot benefit from it anyways.
1548 get_region_stats(stats, regions, *num_regions);
1549 for (k = 0; k < *num_regions; k++) {
1550 if (regions[k].type != BLENDING_REGION) continue;
1551 if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 ||
1552 count_stable == 0)
1553 regions[k].type = HIGH_VAR_REGION;
1554 }
1555 get_region_stats(stats, regions, *num_regions);
1556
1557 // It is possible for blending to result in a "dip" in intra error (first
1558 // decrease then increase). Therefore we need to find the dip and combine the
1559 // two regions.
1560 k = 1;
1561 while (k < *num_regions) {
1562 if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) {
1563 // Check if this short high variance regions is actually in the middle of
1564 // a blending region.
1565 if (regions[k - 1].type == BLENDING_REGION &&
1566 regions[k + 1].type == BLENDING_REGION &&
1567 regions[k].last - regions[k].start < 3) {
1568 int prev_dir = (stats[regions[k - 1].last].intra_error -
1569 stats[regions[k - 1].last - 1].intra_error) > 0
1570 ? 1
1571 : -1;
1572 int next_dir = (stats[regions[k + 1].last].intra_error -
1573 stats[regions[k + 1].last - 1].intra_error) > 0
1574 ? 1
1575 : -1;
1576 if (prev_dir < 0 && next_dir > 0) {
1577 // This is possibly a mid region of blending. Check the ratios
1578 double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio,
1579 regions[k + 1].avg_sr_fr_ratio) *
1580 0.95;
1581 if (regions[k].avg_sr_fr_ratio > ratio_thres) {
1582 regions[k].type = BLENDING_REGION;
1583 remove_region(2, regions, num_regions, &k);
1584 analyze_region(stats, k - 1, regions);
1585 continue;
1586 }
1587 }
1588 }
1589 }
1590 // Check if we have a pair of consecutive blending regions.
1591 if (regions[k - 1].type == BLENDING_REGION &&
1592 regions[k].type == BLENDING_REGION) {
1593 int prev_dir = (stats[regions[k - 1].last].intra_error -
1594 stats[regions[k - 1].last - 1].intra_error) > 0
1595 ? 1
1596 : -1;
1597 int next_dir = (stats[regions[k].last].intra_error -
1598 stats[regions[k].last - 1].intra_error) > 0
1599 ? 1
1600 : -1;
1601
1602 // if both are too short, no need to check
1603 int total_length = regions[k].last - regions[k - 1].start + 1;
1604 if (total_length < 4) {
1605 regions[k - 1].type = HIGH_VAR_REGION;
1606 k++;
1607 continue;
1608 }
1609
1610 int to_merge = 0;
1611 if (prev_dir < 0 && next_dir > 0) {
1612 // In this case we check the last frame in the previous region.
1613 double prev_length =
1614 (double)(regions[k - 1].last - regions[k - 1].start + 1);
1615 double last_ratio, ratio_thres;
1616 if (prev_length < 2.01) {
1617 // if the previous region is very short
1618 double max_coded_error =
1619 AOMMAX(stats[regions[k - 1].last].coded_error,
1620 stats[regions[k - 1].last - 1].coded_error);
1621 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1622 AOMMAX(max_coded_error, 0.001);
1623 ratio_thres = regions[k].avg_sr_fr_ratio * 0.95;
1624 } else {
1625 double max_coded_error =
1626 AOMMAX(stats[regions[k - 1].last].coded_error,
1627 stats[regions[k - 1].last - 1].coded_error);
1628 last_ratio = stats[regions[k - 1].last].sr_coded_error /
1629 AOMMAX(max_coded_error, 0.001);
1630 double prev_ratio =
1631 (regions[k - 1].avg_sr_fr_ratio * prev_length - last_ratio) /
1632 (prev_length - 1.0);
1633 ratio_thres = AOMMIN(prev_ratio, regions[k].avg_sr_fr_ratio) * 0.95;
1634 }
1635 if (last_ratio > ratio_thres) {
1636 to_merge = 1;
1637 }
1638 }
1639
1640 if (to_merge) {
1641 remove_region(0, regions, num_regions, &k);
1642 analyze_region(stats, k - 1, regions);
1643 continue;
1644 } else {
1645 // These are possibly two separate blending regions. Mark the boundary
1646 // frame as HIGH_VAR_REGION to separate the two.
1647 int prev_k = k - 1;
1648 insert_region(regions[prev_k].last, regions[prev_k].last,
1649 HIGH_VAR_REGION, regions, num_regions, &prev_k);
1650 analyze_region(stats, prev_k, regions);
1651 k = prev_k + 1;
1652 analyze_region(stats, k, regions);
1653 }
1654 }
1655 k++;
1656 }
1657 cleanup_regions(regions, num_regions);
1658 }
1659
1660 // Clean up decision for blendings. Remove blending regions that are too short.
1661 // Also if a very short high var region is between a blending and a stable
1662 // region, just merge it with one of them.
cleanup_blendings(REGIONS * regions,int * num_regions)1663 static void cleanup_blendings(REGIONS *regions, int *num_regions) {
1664 int k = 0;
1665 while (k<*num_regions && * num_regions> 1) {
1666 int is_short_blending = regions[k].type == BLENDING_REGION &&
1667 regions[k].last - regions[k].start + 1 < 5;
1668 int is_short_hv = regions[k].type == HIGH_VAR_REGION &&
1669 regions[k].last - regions[k].start + 1 < 5;
1670 int has_stable_neighbor =
1671 ((k > 0 && regions[k - 1].type == STABLE_REGION) ||
1672 (k < *num_regions - 1 && regions[k + 1].type == STABLE_REGION));
1673 int has_blend_neighbor =
1674 ((k > 0 && regions[k - 1].type == BLENDING_REGION) ||
1675 (k < *num_regions - 1 && regions[k + 1].type == BLENDING_REGION));
1676 int total_neighbors = (k > 0) + (k < *num_regions - 1);
1677
1678 if (is_short_blending ||
1679 (is_short_hv &&
1680 has_stable_neighbor + has_blend_neighbor >= total_neighbors)) {
1681 // Remove this region.Try to determine whether to combine it with the
1682 // previous or next region.
1683 int merge;
1684 double prev_diff =
1685 (k > 0)
1686 ? fabs(regions[k].avg_cor_coeff - regions[k - 1].avg_cor_coeff)
1687 : 1;
1688 double next_diff =
1689 (k < *num_regions - 1)
1690 ? fabs(regions[k].avg_cor_coeff - regions[k + 1].avg_cor_coeff)
1691 : 1;
1692 // merge == 0 means to merge with previous, 1 means to merge with next
1693 merge = prev_diff > next_diff;
1694 remove_region(merge, regions, num_regions, &k);
1695 } else {
1696 k++;
1697 }
1698 }
1699 cleanup_regions(regions, num_regions);
1700 }
1701
1702 // Identify stable and unstable regions from first pass stats.
1703 // Stats_start points to the first frame to analyze.
1704 // Offset is the offset from the current frame to the frame stats_start is
1705 // pointing to.
identify_regions(const FIRSTPASS_STATS * const stats_start,int total_frames,int offset,REGIONS * regions,int * total_regions)1706 static void identify_regions(const FIRSTPASS_STATS *const stats_start,
1707 int total_frames, int offset, REGIONS *regions,
1708 int *total_regions) {
1709 int k;
1710 if (total_frames <= 1) return;
1711
1712 // store the initial decisions
1713 REGIONS temp_regions[MAX_FIRSTPASS_ANALYSIS_FRAMES];
1714 av1_zero_array(temp_regions, MAX_FIRSTPASS_ANALYSIS_FRAMES);
1715 // buffers for filtered stats
1716 double filt_intra_err[MAX_FIRSTPASS_ANALYSIS_FRAMES] = { 0 };
1717 double filt_coded_err[MAX_FIRSTPASS_ANALYSIS_FRAMES] = { 0 };
1718 double grad_coded[MAX_FIRSTPASS_ANALYSIS_FRAMES] = { 0 };
1719
1720 int cur_region = 0, this_start = 0, this_last;
1721
1722 int next_scenecut = -1;
1723 do {
1724 // first get the obvious scenecuts
1725 next_scenecut =
1726 find_next_scenecut(stats_start, this_start, total_frames - 1);
1727 this_last = (next_scenecut >= 0) ? (next_scenecut - 1) : total_frames - 1;
1728
1729 // low-pass filter the needed stats
1730 smooth_filter_stats(stats_start, this_start, this_last, filt_intra_err,
1731 filt_coded_err);
1732 get_gradient(filt_coded_err, this_start, this_last, grad_coded);
1733
1734 // find tentative stable regions and unstable regions
1735 int num_regions = find_stable_regions(stats_start, grad_coded, this_start,
1736 this_last, temp_regions);
1737
1738 adjust_unstable_region_bounds(stats_start, temp_regions, &num_regions);
1739
1740 get_region_stats(stats_start, temp_regions, num_regions);
1741
1742 // Try to identify blending regions in the unstable regions
1743 find_blending_regions(stats_start, temp_regions, &num_regions);
1744 cleanup_blendings(temp_regions, &num_regions);
1745
1746 // The flash points should all be considered high variance points
1747 k = 0;
1748 while (k < num_regions) {
1749 if (temp_regions[k].type != STABLE_REGION) {
1750 k++;
1751 continue;
1752 }
1753 int start = temp_regions[k].start;
1754 int last = temp_regions[k].last;
1755 for (int i = start; i <= last; i++) {
1756 if (stats_start[i].is_flash) {
1757 insert_region(i, i, HIGH_VAR_REGION, temp_regions, &num_regions, &k);
1758 }
1759 }
1760 k++;
1761 }
1762 cleanup_regions(temp_regions, &num_regions);
1763
1764 // copy the regions in the scenecut group
1765 for (k = 0; k < num_regions; k++) {
1766 if (temp_regions[k].last < temp_regions[k].start &&
1767 k == num_regions - 1) {
1768 num_regions--;
1769 break;
1770 }
1771 regions[k + cur_region] = temp_regions[k];
1772 }
1773 cur_region += num_regions;
1774
1775 // add the scenecut region
1776 if (next_scenecut > -1) {
1777 // add the scenecut region, and find the next scenecut
1778 regions[cur_region].type = SCENECUT_REGION;
1779 regions[cur_region].start = next_scenecut;
1780 regions[cur_region].last = next_scenecut;
1781 cur_region++;
1782 this_start = next_scenecut + 1;
1783 }
1784 } while (next_scenecut >= 0);
1785
1786 *total_regions = cur_region;
1787 get_region_stats(stats_start, regions, *total_regions);
1788
1789 for (k = 0; k < *total_regions; k++) {
1790 // If scenecuts are very minor, mark them as high variance.
1791 if (regions[k].type != SCENECUT_REGION ||
1792 regions[k].avg_cor_coeff *
1793 (1 - stats_start[regions[k].start].noise_var /
1794 regions[k].avg_intra_err) <
1795 0.8) {
1796 continue;
1797 }
1798 regions[k].type = HIGH_VAR_REGION;
1799 }
1800 cleanup_regions(regions, total_regions);
1801 get_region_stats(stats_start, regions, *total_regions);
1802
1803 for (k = 0; k < *total_regions; k++) {
1804 regions[k].start += offset;
1805 regions[k].last += offset;
1806 }
1807 }
1808
find_regions_index(const REGIONS * regions,int num_regions,int frame_idx)1809 static int find_regions_index(const REGIONS *regions, int num_regions,
1810 int frame_idx) {
1811 for (int k = 0; k < num_regions; k++) {
1812 if (regions[k].start <= frame_idx && regions[k].last >= frame_idx) {
1813 return k;
1814 }
1815 }
1816 return -1;
1817 }
1818
1819 /*!\brief Determine the length of future GF groups.
1820 *
1821 * \ingroup gf_group_algo
1822 * This function decides the gf group length of future frames in batch
1823 *
1824 * \param[in] cpi Top-level encoder structure
1825 * \param[in] max_gop_length Maximum length of the GF group
1826 * \param[in] max_intervals Maximum number of intervals to decide
1827 *
1828 * \return Nothing is returned. Instead, cpi->ppi->rc.gf_intervals is
1829 * changed to store the decided GF group lengths.
1830 */
calculate_gf_length(AV1_COMP * cpi,int max_gop_length,int max_intervals)1831 static void calculate_gf_length(AV1_COMP *cpi, int max_gop_length,
1832 int max_intervals) {
1833 RATE_CONTROL *const rc = &cpi->rc;
1834 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1835 TWO_PASS *const twopass = &cpi->ppi->twopass;
1836 FIRSTPASS_STATS next_frame;
1837 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
1838 const FIRSTPASS_STATS *const stats = start_pos - (rc->frames_since_key == 0);
1839
1840 const int f_w = cpi->common.width;
1841 const int f_h = cpi->common.height;
1842 int i;
1843
1844 int flash_detected;
1845
1846 av1_zero(next_frame);
1847
1848 if (has_no_stats_stage(cpi)) {
1849 for (i = 0; i < MAX_NUM_GF_INTERVALS; i++) {
1850 p_rc->gf_intervals[i] = AOMMIN(rc->max_gf_interval, max_gop_length);
1851 }
1852 p_rc->cur_gf_index = 0;
1853 rc->intervals_till_gf_calculate_due = MAX_NUM_GF_INTERVALS;
1854 return;
1855 }
1856
1857 // TODO(urvang): Try logic to vary min and max interval based on q.
1858 const int active_min_gf_interval = rc->min_gf_interval;
1859 const int active_max_gf_interval =
1860 AOMMIN(rc->max_gf_interval, max_gop_length);
1861 const int min_shrink_int = AOMMAX(MIN_SHRINK_LEN, active_min_gf_interval);
1862
1863 i = (rc->frames_since_key == 0);
1864 max_intervals = cpi->ppi->lap_enabled ? 1 : max_intervals;
1865 int count_cuts = 1;
1866 // If cpi->gf_state.arf_gf_boost_lst is 0, we are starting with a KF or GF.
1867 int cur_start = -1 + !cpi->ppi->gf_state.arf_gf_boost_lst, cur_last;
1868 int cut_pos[MAX_NUM_GF_INTERVALS + 1] = { -1 };
1869 int cut_here;
1870 GF_GROUP_STATS gf_stats;
1871 init_gf_stats(&gf_stats);
1872 while (count_cuts < max_intervals + 1) {
1873 // reaches next key frame, break here
1874 if (i >= rc->frames_to_key) {
1875 cut_here = 2;
1876 } else if (i - cur_start >= rc->static_scene_max_gf_interval) {
1877 // reached maximum len, but nothing special yet (almost static)
1878 // let's look at the next interval
1879 cut_here = 1;
1880 } else if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) {
1881 // reaches last frame, break
1882 cut_here = 2;
1883 } else {
1884 // Test for the case where there is a brief flash but the prediction
1885 // quality back to an earlier frame is then restored.
1886 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
1887 // TODO(bohanli): remove redundant accumulations here, or unify
1888 // this and the ones in define_gf_group
1889 accumulate_next_frame_stats(&next_frame, flash_detected,
1890 rc->frames_since_key, i, &gf_stats, f_w, f_h);
1891
1892 cut_here = detect_gf_cut(cpi, i, cur_start, flash_detected,
1893 active_max_gf_interval, active_min_gf_interval,
1894 &gf_stats);
1895 }
1896 if (cut_here) {
1897 cur_last = i - 1; // the current last frame in the gf group
1898 int ori_last = cur_last;
1899 // The region frame idx does not start from the same frame as cur_start
1900 // and cur_last. Need to offset them.
1901 int offset = rc->frames_since_key - p_rc->regions_offset;
1902 REGIONS *regions = p_rc->regions;
1903 int num_regions = p_rc->num_regions;
1904
1905 int scenecut_idx = -1;
1906 // only try shrinking if interval smaller than active_max_gf_interval
1907 if (cur_last - cur_start <= active_max_gf_interval &&
1908 cur_last > cur_start) {
1909 // find the region indices of where the first and last frame belong.
1910 int k_start =
1911 find_regions_index(regions, num_regions, cur_start + offset);
1912 int k_last =
1913 find_regions_index(regions, num_regions, cur_last + offset);
1914 if (cur_start + offset == 0) k_start = 0;
1915
1916 // See if we have a scenecut in between
1917 for (int r = k_start + 1; r <= k_last; r++) {
1918 if (regions[r].type == SCENECUT_REGION &&
1919 regions[r].last - offset - cur_start > active_min_gf_interval) {
1920 scenecut_idx = r;
1921 break;
1922 }
1923 }
1924
1925 // if the found scenecut is very close to the end, ignore it.
1926 if (regions[num_regions - 1].last - regions[scenecut_idx].last < 4) {
1927 scenecut_idx = -1;
1928 }
1929
1930 if (scenecut_idx != -1) {
1931 // If we have a scenecut, then stop at it.
1932 // TODO(bohanli): add logic here to stop before the scenecut and for
1933 // the next gop start from the scenecut with GF
1934 int is_minor_sc =
1935 (regions[scenecut_idx].avg_cor_coeff *
1936 (1 - stats[regions[scenecut_idx].start - offset].noise_var /
1937 regions[scenecut_idx].avg_intra_err) >
1938 0.6);
1939 cur_last = regions[scenecut_idx].last - offset - !is_minor_sc;
1940 } else {
1941 int is_last_analysed = (k_last == num_regions - 1) &&
1942 (cur_last + offset == regions[k_last].last);
1943 int not_enough_regions =
1944 k_last - k_start <=
1945 1 + (regions[k_start].type == SCENECUT_REGION);
1946 // if we are very close to the end, then do not shrink since it may
1947 // introduce intervals that are too short
1948 if (!(is_last_analysed && not_enough_regions)) {
1949 const double arf_length_factor = 0.1;
1950 double best_score = 0;
1951 int best_j = -1;
1952 const int first_frame = regions[0].start - offset;
1953 const int last_frame = regions[num_regions - 1].last - offset;
1954 // score of how much the arf helps the whole GOP
1955 double base_score = 0.0;
1956 // Accumulate base_score in
1957 for (int j = cur_start + 1; j < cur_start + min_shrink_int; j++) {
1958 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
1959 base_score = (base_score + 1.0) * stats[j].cor_coeff;
1960 }
1961 int met_blending = 0; // Whether we have met blending areas before
1962 int last_blending = 0; // Whether the previous frame if blending
1963 for (int j = cur_start + min_shrink_int; j <= cur_last; j++) {
1964 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
1965 base_score = (base_score + 1.0) * stats[j].cor_coeff;
1966 int this_reg =
1967 find_regions_index(regions, num_regions, j + offset);
1968 if (this_reg < 0) continue;
1969 // A GOP should include at most 1 blending region.
1970 if (regions[this_reg].type == BLENDING_REGION) {
1971 last_blending = 1;
1972 if (met_blending) {
1973 break;
1974 } else {
1975 base_score = 0;
1976 continue;
1977 }
1978 } else {
1979 if (last_blending) met_blending = 1;
1980 last_blending = 0;
1981 }
1982
1983 // Add the factor of how good the neighborhood is for this
1984 // candidate arf.
1985 double this_score = arf_length_factor * base_score;
1986 double temp_accu_coeff = 1.0;
1987 // following frames
1988 int count_f = 0;
1989 for (int n = j + 1; n <= j + 3 && n <= last_frame; n++) {
1990 if (stats + n >= twopass->stats_buf_ctx->stats_in_end) break;
1991 temp_accu_coeff *= stats[n].cor_coeff;
1992 this_score +=
1993 temp_accu_coeff *
1994 (1 - stats[n].noise_var /
1995 AOMMAX(regions[this_reg].avg_intra_err, 0.001));
1996 count_f++;
1997 }
1998 // preceding frames
1999 temp_accu_coeff = 1.0;
2000 for (int n = j; n > j - 3 * 2 + count_f && n > first_frame; n--) {
2001 if (stats + n < twopass->stats_buf_ctx->stats_in_start) break;
2002 temp_accu_coeff *= stats[n].cor_coeff;
2003 this_score +=
2004 temp_accu_coeff *
2005 (1 - stats[n].noise_var /
2006 AOMMAX(regions[this_reg].avg_intra_err, 0.001));
2007 }
2008
2009 if (this_score > best_score) {
2010 best_score = this_score;
2011 best_j = j;
2012 }
2013 }
2014
2015 // For blending areas, move one more frame in case we missed the
2016 // first blending frame.
2017 int best_reg =
2018 find_regions_index(regions, num_regions, best_j + offset);
2019 if (best_reg < num_regions - 1 && best_reg > 0) {
2020 if (regions[best_reg - 1].type == BLENDING_REGION &&
2021 regions[best_reg + 1].type == BLENDING_REGION) {
2022 if (best_j + offset == regions[best_reg].start &&
2023 best_j + offset < regions[best_reg].last) {
2024 best_j += 1;
2025 } else if (best_j + offset == regions[best_reg].last &&
2026 best_j + offset > regions[best_reg].start) {
2027 best_j -= 1;
2028 }
2029 }
2030 }
2031
2032 if (cur_last - best_j < 2) best_j = cur_last;
2033 if (best_j > 0 && best_score > 0.1) cur_last = best_j;
2034 // if cannot find anything, just cut at the original place.
2035 }
2036 }
2037 }
2038 cut_pos[count_cuts] = cur_last;
2039 count_cuts++;
2040
2041 // reset pointers to the shrinked location
2042 cpi->twopass_frame.stats_in = start_pos + cur_last;
2043 cur_start = cur_last;
2044 int cur_region_idx =
2045 find_regions_index(regions, num_regions, cur_start + 1 + offset);
2046 if (cur_region_idx >= 0)
2047 if (regions[cur_region_idx].type == SCENECUT_REGION) cur_start++;
2048
2049 i = cur_last;
2050
2051 if (cut_here > 1 && cur_last == ori_last) break;
2052
2053 // reset accumulators
2054 init_gf_stats(&gf_stats);
2055 }
2056 ++i;
2057 }
2058
2059 // save intervals
2060 rc->intervals_till_gf_calculate_due = count_cuts - 1;
2061 for (int n = 1; n < count_cuts; n++) {
2062 p_rc->gf_intervals[n - 1] = cut_pos[n] - cut_pos[n - 1];
2063 }
2064 p_rc->cur_gf_index = 0;
2065 cpi->twopass_frame.stats_in = start_pos;
2066 }
2067
correct_frames_to_key(AV1_COMP * cpi)2068 static void correct_frames_to_key(AV1_COMP *cpi) {
2069 int lookahead_size =
2070 (int)av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage);
2071 if (lookahead_size <
2072 av1_lookahead_pop_sz(cpi->ppi->lookahead, cpi->compressor_stage)) {
2073 assert(
2074 IMPLIES(cpi->oxcf.pass != AOM_RC_ONE_PASS && cpi->ppi->frames_left > 0,
2075 lookahead_size == cpi->ppi->frames_left));
2076 cpi->rc.frames_to_key = AOMMIN(cpi->rc.frames_to_key, lookahead_size);
2077 } else if (cpi->ppi->frames_left > 0) {
2078 // Correct frames to key based on limit
2079 cpi->rc.frames_to_key =
2080 AOMMIN(cpi->rc.frames_to_key, cpi->ppi->frames_left);
2081 }
2082 }
2083
2084 /*!\brief Define a GF group in one pass mode when no look ahead stats are
2085 * available.
2086 *
2087 * \ingroup gf_group_algo
2088 * This function defines the structure of a GF group, along with various
2089 * parameters regarding bit-allocation and quality setup in the special
2090 * case of one pass encoding where no lookahead stats are avialable.
2091 *
2092 * \param[in] cpi Top-level encoder structure
2093 *
2094 * \return Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2095 */
define_gf_group_pass0(AV1_COMP * cpi)2096 static void define_gf_group_pass0(AV1_COMP *cpi) {
2097 RATE_CONTROL *const rc = &cpi->rc;
2098 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2099 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
2100 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2101 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2102 int target;
2103
2104 if (oxcf->q_cfg.aq_mode == CYCLIC_REFRESH_AQ) {
2105 av1_cyclic_refresh_set_golden_update(cpi);
2106 } else {
2107 p_rc->baseline_gf_interval = p_rc->gf_intervals[p_rc->cur_gf_index];
2108 rc->intervals_till_gf_calculate_due--;
2109 p_rc->cur_gf_index++;
2110 }
2111
2112 // correct frames_to_key when lookahead queue is flushing
2113 correct_frames_to_key(cpi);
2114
2115 if (p_rc->baseline_gf_interval > rc->frames_to_key)
2116 p_rc->baseline_gf_interval = rc->frames_to_key;
2117
2118 p_rc->gfu_boost = DEFAULT_GF_BOOST;
2119 p_rc->constrained_gf_group =
2120 (p_rc->baseline_gf_interval >= rc->frames_to_key) ? 1 : 0;
2121
2122 gf_group->max_layer_depth_allowed = oxcf->gf_cfg.gf_max_pyr_height;
2123
2124 // Rare case when the look-ahead is less than the target GOP length, can't
2125 // generate ARF frame.
2126 if (p_rc->baseline_gf_interval > gf_cfg->lag_in_frames ||
2127 !is_altref_enabled(gf_cfg->lag_in_frames, gf_cfg->enable_auto_arf) ||
2128 p_rc->baseline_gf_interval < rc->min_gf_interval)
2129 gf_group->max_layer_depth_allowed = 0;
2130
2131 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2132 av1_gop_setup_structure(cpi);
2133
2134 // Allocate bits to each of the frames in the GF group.
2135 // TODO(sarahparker) Extend this to work with pyramid structure.
2136 for (int cur_index = 0; cur_index < gf_group->size; ++cur_index) {
2137 const FRAME_UPDATE_TYPE cur_update_type = gf_group->update_type[cur_index];
2138 if (oxcf->rc_cfg.mode == AOM_CBR) {
2139 if (cur_update_type == KF_UPDATE) {
2140 target = av1_calc_iframe_target_size_one_pass_cbr(cpi);
2141 } else {
2142 target = av1_calc_pframe_target_size_one_pass_cbr(cpi, cur_update_type);
2143 }
2144 } else {
2145 if (cur_update_type == KF_UPDATE) {
2146 target = av1_calc_iframe_target_size_one_pass_vbr(cpi);
2147 } else {
2148 target = av1_calc_pframe_target_size_one_pass_vbr(cpi, cur_update_type);
2149 }
2150 }
2151 gf_group->bit_allocation[cur_index] = target;
2152 }
2153 }
2154
set_baseline_gf_interval(PRIMARY_RATE_CONTROL * p_rc,int arf_position)2155 static INLINE void set_baseline_gf_interval(PRIMARY_RATE_CONTROL *p_rc,
2156 int arf_position) {
2157 p_rc->baseline_gf_interval = arf_position;
2158 }
2159
2160 // initialize GF_GROUP_STATS
init_gf_stats(GF_GROUP_STATS * gf_stats)2161 static void init_gf_stats(GF_GROUP_STATS *gf_stats) {
2162 gf_stats->gf_group_err = 0.0;
2163 gf_stats->gf_group_raw_error = 0.0;
2164 gf_stats->gf_group_skip_pct = 0.0;
2165 gf_stats->gf_group_inactive_zone_rows = 0.0;
2166
2167 gf_stats->mv_ratio_accumulator = 0.0;
2168 gf_stats->decay_accumulator = 1.0;
2169 gf_stats->zero_motion_accumulator = 1.0;
2170 gf_stats->loop_decay_rate = 1.0;
2171 gf_stats->last_loop_decay_rate = 1.0;
2172 gf_stats->this_frame_mv_in_out = 0.0;
2173 gf_stats->mv_in_out_accumulator = 0.0;
2174 gf_stats->abs_mv_in_out_accumulator = 0.0;
2175
2176 gf_stats->avg_sr_coded_error = 0.0;
2177 gf_stats->avg_pcnt_second_ref = 0.0;
2178 gf_stats->avg_new_mv_count = 0.0;
2179 gf_stats->avg_wavelet_energy = 0.0;
2180 gf_stats->avg_raw_err_stdev = 0.0;
2181 gf_stats->non_zero_stdev_count = 0;
2182 }
2183
2184 // Analyse and define a gf/arf group.
2185 #define MAX_GF_BOOST 5400
2186 /*!\brief Define a GF group.
2187 *
2188 * \ingroup gf_group_algo
2189 * This function defines the structure of a GF group, along with various
2190 * parameters regarding bit-allocation and quality setup.
2191 *
2192 * \param[in] cpi Top-level encoder structure
2193 * \param[in] frame_params Structure with frame parameters
2194 * \param[in] is_final_pass Whether this is the final pass for the
2195 * GF group, or a trial (non-zero)
2196 *
2197 * \return Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2198 */
define_gf_group(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2199 static void define_gf_group(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2200 int is_final_pass) {
2201 AV1_COMMON *const cm = &cpi->common;
2202 RATE_CONTROL *const rc = &cpi->rc;
2203 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2204 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2205 TWO_PASS *const twopass = &cpi->ppi->twopass;
2206 FIRSTPASS_STATS next_frame;
2207 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2208 GF_GROUP *gf_group = &cpi->ppi->gf_group;
2209 FRAME_INFO *frame_info = &cpi->frame_info;
2210 const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2211 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2212 const int f_w = cm->width;
2213 const int f_h = cm->height;
2214 int i;
2215 int flash_detected;
2216 int64_t gf_group_bits;
2217 const int is_intra_only = rc->frames_since_key == 0;
2218
2219 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2220
2221 // Reset the GF group data structures unless this is a key
2222 // frame in which case it will already have been done.
2223 if (!is_intra_only) {
2224 av1_zero(cpi->ppi->gf_group);
2225 cpi->gf_frame_index = 0;
2226 }
2227
2228 av1_zero(next_frame);
2229
2230 if (has_no_stats_stage(cpi)) {
2231 define_gf_group_pass0(cpi);
2232 return;
2233 }
2234
2235 // correct frames_to_key when lookahead queue is emptying
2236 if (cpi->ppi->lap_enabled) {
2237 correct_frames_to_key(cpi);
2238 }
2239
2240 GF_GROUP_STATS gf_stats;
2241 init_gf_stats(&gf_stats);
2242
2243 const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2244
2245 // If this is a key frame or the overlay from a previous arf then
2246 // the error score / cost of this frame has already been accounted for.
2247 const int active_min_gf_interval = rc->min_gf_interval;
2248
2249 i = is_intra_only;
2250 // get the determined gf group length from p_rc->gf_intervals
2251 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2252 // read in the next frame
2253 if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) break;
2254 // Accumulate error score of frames in this gf group.
2255 double mod_frame_err =
2256 calculate_modified_err(frame_info, twopass, oxcf, &next_frame);
2257 // accumulate stats for this frame
2258 accumulate_this_frame_stats(&next_frame, mod_frame_err, &gf_stats);
2259 ++i;
2260 }
2261
2262 reset_fpf_position(&cpi->twopass_frame, start_pos);
2263
2264 i = is_intra_only;
2265 input_stats(twopass, &cpi->twopass_frame, &next_frame);
2266 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2267 // read in the next frame
2268 if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) break;
2269
2270 // Test for the case where there is a brief flash but the prediction
2271 // quality back to an earlier frame is then restored.
2272 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
2273
2274 // accumulate stats for next frame
2275 accumulate_next_frame_stats(&next_frame, flash_detected,
2276 rc->frames_since_key, i, &gf_stats, f_w, f_h);
2277
2278 ++i;
2279 }
2280
2281 i = p_rc->gf_intervals[p_rc->cur_gf_index];
2282
2283 if (is_final_pass) {
2284 rc->intervals_till_gf_calculate_due--;
2285 p_rc->cur_gf_index++;
2286 }
2287
2288 // Was the group length constrained by the requirement for a new KF?
2289 p_rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
2290
2291 average_gf_stats(i, &gf_stats);
2292
2293 // Disable internal ARFs for "still" gf groups.
2294 // zero_motion_accumulator: minimum percentage of (0,0) motion;
2295 // avg_sr_coded_error: average of the SSE per pixel of each frame;
2296 // avg_raw_err_stdev: average of the standard deviation of (0,0)
2297 // motion error per block of each frame.
2298 const int can_disable_internal_arfs = gf_cfg->gf_min_pyr_height <= 1;
2299 if (can_disable_internal_arfs &&
2300 gf_stats.zero_motion_accumulator > MIN_ZERO_MOTION &&
2301 gf_stats.avg_sr_coded_error < MAX_SR_CODED_ERROR &&
2302 gf_stats.avg_raw_err_stdev < MAX_RAW_ERR_VAR) {
2303 cpi->ppi->internal_altref_allowed = 0;
2304 }
2305
2306 int use_alt_ref;
2307 if (can_disable_arf) {
2308 use_alt_ref =
2309 !is_almost_static(gf_stats.zero_motion_accumulator,
2310 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled) &&
2311 p_rc->use_arf_in_this_kf_group && (i < gf_cfg->lag_in_frames) &&
2312 (i >= MIN_GF_INTERVAL);
2313 } else {
2314 use_alt_ref = p_rc->use_arf_in_this_kf_group &&
2315 (i < gf_cfg->lag_in_frames) && (i > 2);
2316 }
2317
2318 #define REDUCE_GF_LENGTH_THRESH 4
2319 #define REDUCE_GF_LENGTH_TO_KEY_THRESH 9
2320 #define REDUCE_GF_LENGTH_BY 1
2321 int alt_offset = 0;
2322 // The length reduction strategy is tweaked for certain cases, and doesn't
2323 // work well for certain other cases.
2324 const int allow_gf_length_reduction =
2325 ((rc_cfg->mode == AOM_Q && rc_cfg->cq_level <= 128) ||
2326 !cpi->ppi->internal_altref_allowed) &&
2327 !is_lossless_requested(rc_cfg);
2328
2329 if (allow_gf_length_reduction && use_alt_ref) {
2330 // adjust length of this gf group if one of the following condition met
2331 // 1: only one overlay frame left and this gf is too long
2332 // 2: next gf group is too short to have arf compared to the current gf
2333
2334 // maximum length of next gf group
2335 const int next_gf_len = rc->frames_to_key - i;
2336 const int single_overlay_left =
2337 next_gf_len == 0 && i > REDUCE_GF_LENGTH_THRESH;
2338 // the next gf is probably going to have a ARF but it will be shorter than
2339 // this gf
2340 const int unbalanced_gf =
2341 i > REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2342 next_gf_len + 1 < REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2343 next_gf_len + 1 >= rc->min_gf_interval;
2344
2345 if (single_overlay_left || unbalanced_gf) {
2346 const int roll_back = REDUCE_GF_LENGTH_BY;
2347 // Reduce length only if active_min_gf_interval will be respected later.
2348 if (i - roll_back >= active_min_gf_interval + 1) {
2349 alt_offset = -roll_back;
2350 i -= roll_back;
2351 if (is_final_pass) rc->intervals_till_gf_calculate_due = 0;
2352 }
2353 }
2354 }
2355
2356 // Should we use the alternate reference frame.
2357 int ext_len = i - is_intra_only;
2358 if (use_alt_ref) {
2359 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2360 set_baseline_gf_interval(&cpi->ppi->p_rc, i);
2361
2362 const int forward_frames = (rc->frames_to_key - i >= ext_len)
2363 ? ext_len
2364 : AOMMAX(0, rc->frames_to_key - i);
2365
2366 // Calculate the boost for alt ref.
2367 p_rc->gfu_boost = av1_calc_arf_boost(
2368 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset,
2369 forward_frames, ext_len, &p_rc->num_stats_used_for_gfu_boost,
2370 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled);
2371 } else {
2372 reset_fpf_position(&cpi->twopass_frame, start_pos);
2373 gf_group->max_layer_depth_allowed = 0;
2374 set_baseline_gf_interval(&cpi->ppi->p_rc, i);
2375
2376 p_rc->gfu_boost = AOMMIN(
2377 MAX_GF_BOOST,
2378 av1_calc_arf_boost(
2379 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, ext_len,
2380 0, &p_rc->num_stats_used_for_gfu_boost,
2381 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled));
2382 }
2383
2384 #define LAST_ALR_BOOST_FACTOR 0.2f
2385 p_rc->arf_boost_factor = 1.0;
2386 if (use_alt_ref && !is_lossless_requested(rc_cfg)) {
2387 // Reduce the boost of altref in the last gf group
2388 if (rc->frames_to_key - ext_len == REDUCE_GF_LENGTH_BY ||
2389 rc->frames_to_key - ext_len == 0) {
2390 p_rc->arf_boost_factor = LAST_ALR_BOOST_FACTOR;
2391 }
2392 }
2393
2394 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
2395
2396 // Reset the file position.
2397 reset_fpf_position(&cpi->twopass_frame, start_pos);
2398
2399 if (cpi->ppi->lap_enabled) {
2400 // Since we don't have enough stats to know the actual error of the
2401 // gf group, we assume error of each frame to be equal to 1 and set
2402 // the error of the group as baseline_gf_interval.
2403 gf_stats.gf_group_err = p_rc->baseline_gf_interval;
2404 }
2405 // Calculate the bits to be allocated to the gf/arf group as a whole
2406 gf_group_bits = calculate_total_gf_group_bits(cpi, gf_stats.gf_group_err);
2407 p_rc->gf_group_bits = gf_group_bits;
2408
2409 #if GROUP_ADAPTIVE_MAXQ
2410 // Calculate an estimate of the maxq needed for the group.
2411 // We are more agressive about correcting for sections
2412 // where there could be significant overshoot than for easier
2413 // sections where we do not wish to risk creating an overshoot
2414 // of the allocated bit budget.
2415 if ((rc_cfg->mode != AOM_Q) && (p_rc->baseline_gf_interval > 1) &&
2416 is_final_pass) {
2417 const int vbr_group_bits_per_frame =
2418 (int)(gf_group_bits / p_rc->baseline_gf_interval);
2419 const double group_av_err =
2420 gf_stats.gf_group_raw_error / p_rc->baseline_gf_interval;
2421 const double group_av_skip_pct =
2422 gf_stats.gf_group_skip_pct / p_rc->baseline_gf_interval;
2423 const double group_av_inactive_zone =
2424 ((gf_stats.gf_group_inactive_zone_rows * 2) /
2425 (p_rc->baseline_gf_interval * (double)cm->mi_params.mb_rows));
2426
2427 int tmp_q;
2428 tmp_q = get_twopass_worst_quality(
2429 cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
2430 vbr_group_bits_per_frame);
2431 rc->active_worst_quality = AOMMAX(tmp_q, rc->active_worst_quality >> 1);
2432 }
2433 #endif
2434
2435 // Adjust KF group bits and error remaining.
2436 if (is_final_pass) twopass->kf_group_error_left -= gf_stats.gf_group_err;
2437
2438 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2439 av1_gop_setup_structure(cpi);
2440
2441 // Reset the file position.
2442 reset_fpf_position(&cpi->twopass_frame, start_pos);
2443
2444 // Calculate a section intra ratio used in setting max loop filter.
2445 if (rc->frames_since_key != 0) {
2446 twopass->section_intra_rating = calculate_section_intra_ratio(
2447 start_pos, twopass->stats_buf_ctx->stats_in_end,
2448 p_rc->baseline_gf_interval);
2449 }
2450
2451 av1_gop_bit_allocation(cpi, rc, gf_group, rc->frames_since_key == 0,
2452 use_alt_ref, gf_group_bits);
2453
2454 frame_params->frame_type =
2455 rc->frames_since_key == 0 ? KEY_FRAME : INTER_FRAME;
2456 frame_params->show_frame =
2457 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
2458 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
2459
2460 // TODO(jingning): Generalize this condition.
2461 if (is_final_pass) {
2462 cpi->ppi->gf_state.arf_gf_boost_lst = use_alt_ref;
2463
2464 // Reset rolling actual and target bits counters for ARF groups.
2465 twopass->rolling_arf_group_target_bits = 1;
2466 twopass->rolling_arf_group_actual_bits = 1;
2467 }
2468 #if CONFIG_BITRATE_ACCURACY
2469 if (is_final_pass) {
2470 vbr_rc_set_gop_bit_budget(&cpi->vbr_rc_info, p_rc->baseline_gf_interval);
2471 }
2472 #endif
2473 }
2474
2475 // #define FIXED_ARF_BITS
2476 #ifdef FIXED_ARF_BITS
2477 #define ARF_BITS_FRACTION 0.75
2478 #endif
av1_gop_bit_allocation(const AV1_COMP * cpi,RATE_CONTROL * const rc,GF_GROUP * gf_group,int is_key_frame,int use_arf,int64_t gf_group_bits)2479 void av1_gop_bit_allocation(const AV1_COMP *cpi, RATE_CONTROL *const rc,
2480 GF_GROUP *gf_group, int is_key_frame, int use_arf,
2481 int64_t gf_group_bits) {
2482 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2483 // Calculate the extra bits to be used for boosted frame(s)
2484 #ifdef FIXED_ARF_BITS
2485 int gf_arf_bits = (int)(ARF_BITS_FRACTION * gf_group_bits);
2486 #else
2487 int gf_arf_bits = calculate_boost_bits(
2488 p_rc->baseline_gf_interval - (rc->frames_since_key == 0), p_rc->gfu_boost,
2489 gf_group_bits);
2490 #endif
2491
2492 gf_arf_bits = adjust_boost_bits_for_target_level(cpi, rc, gf_arf_bits,
2493 gf_group_bits, 1);
2494
2495 // Allocate bits to each of the frames in the GF group.
2496 allocate_gf_group_bits(gf_group, p_rc, rc, gf_group_bits, gf_arf_bits,
2497 is_key_frame, use_arf);
2498 }
2499
2500 // Minimum % intra coding observed in first pass (1.0 = 100%)
2501 #define MIN_INTRA_LEVEL 0.25
2502 // Minimum ratio between the % of intra coding and inter coding in the first
2503 // pass after discounting neutral blocks (discounting neutral blocks in this
2504 // way helps catch scene cuts in clips with very flat areas or letter box
2505 // format clips with image padding.
2506 #define INTRA_VS_INTER_THRESH 2.0
2507 // Hard threshold where the first pass chooses intra for almost all blocks.
2508 // In such a case even if the frame is not a scene cut coding a key frame
2509 // may be a good option.
2510 #define VERY_LOW_INTER_THRESH 0.05
2511 // Maximum threshold for the relative ratio of intra error score vs best
2512 // inter error score.
2513 #define KF_II_ERR_THRESHOLD 1.9
2514 // In real scene cuts there is almost always a sharp change in the intra
2515 // or inter error score.
2516 #define ERR_CHANGE_THRESHOLD 0.4
2517 // For real scene cuts we expect an improvment in the intra inter error
2518 // ratio in the next frame.
2519 #define II_IMPROVEMENT_THRESHOLD 3.5
2520 #define KF_II_MAX 128.0
2521 // Intra / Inter threshold very low
2522 #define VERY_LOW_II 1.5
2523 // Clean slide transitions we expect a sharp single frame spike in error.
2524 #define ERROR_SPIKE 5.0
2525
2526 // Slide show transition detection.
2527 // Tests for case where there is very low error either side of the current frame
2528 // but much higher just for this frame. This can help detect key frames in
2529 // slide shows even where the slides are pictures of different sizes.
2530 // Also requires that intra and inter errors are very similar to help eliminate
2531 // harmful false positives.
2532 // It will not help if the transition is a fade or other multi-frame effect.
slide_transition(const FIRSTPASS_STATS * this_frame,const FIRSTPASS_STATS * last_frame,const FIRSTPASS_STATS * next_frame)2533 static int slide_transition(const FIRSTPASS_STATS *this_frame,
2534 const FIRSTPASS_STATS *last_frame,
2535 const FIRSTPASS_STATS *next_frame) {
2536 return (this_frame->intra_error < (this_frame->coded_error * VERY_LOW_II)) &&
2537 (this_frame->coded_error > (last_frame->coded_error * ERROR_SPIKE)) &&
2538 (this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE));
2539 }
2540
2541 // Threshold for use of the lagging second reference frame. High second ref
2542 // usage may point to a transient event like a flash or occlusion rather than
2543 // a real scene cut.
2544 // We adapt the threshold based on number of frames in this key-frame group so
2545 // far.
get_second_ref_usage_thresh(int frame_count_so_far)2546 static double get_second_ref_usage_thresh(int frame_count_so_far) {
2547 const int adapt_upto = 32;
2548 const double min_second_ref_usage_thresh = 0.085;
2549 const double second_ref_usage_thresh_max_delta = 0.035;
2550 if (frame_count_so_far >= adapt_upto) {
2551 return min_second_ref_usage_thresh + second_ref_usage_thresh_max_delta;
2552 }
2553 return min_second_ref_usage_thresh +
2554 ((double)frame_count_so_far / (adapt_upto - 1)) *
2555 second_ref_usage_thresh_max_delta;
2556 }
2557
test_candidate_kf(const FIRSTPASS_INFO * firstpass_info,int this_stats_index,int frame_count_so_far,enum aom_rc_mode rc_mode,int scenecut_mode,int num_mbs)2558 static int test_candidate_kf(const FIRSTPASS_INFO *firstpass_info,
2559 int this_stats_index, int frame_count_so_far,
2560 enum aom_rc_mode rc_mode, int scenecut_mode,
2561 int num_mbs) {
2562 const FIRSTPASS_STATS *last_stats =
2563 av1_firstpass_info_peek(firstpass_info, this_stats_index - 1);
2564 const FIRSTPASS_STATS *this_stats =
2565 av1_firstpass_info_peek(firstpass_info, this_stats_index);
2566 const FIRSTPASS_STATS *next_stats =
2567 av1_firstpass_info_peek(firstpass_info, this_stats_index + 1);
2568 if (last_stats == NULL || this_stats == NULL || next_stats == NULL) {
2569 return 0;
2570 }
2571
2572 int is_viable_kf = 0;
2573 double pcnt_intra = 1.0 - this_stats->pcnt_inter;
2574 double modified_pcnt_inter =
2575 this_stats->pcnt_inter - this_stats->pcnt_neutral;
2576 const double second_ref_usage_thresh =
2577 get_second_ref_usage_thresh(frame_count_so_far);
2578 int frames_to_test_after_candidate_key = SCENE_CUT_KEY_TEST_INTERVAL;
2579 int count_for_tolerable_prediction = 3;
2580
2581 // We do "-1" because the candidate key is not counted.
2582 int stats_after_this_stats =
2583 av1_firstpass_info_future_count(firstpass_info, this_stats_index) - 1;
2584
2585 if (scenecut_mode == ENABLE_SCENECUT_MODE_1) {
2586 if (stats_after_this_stats < 3) {
2587 return 0;
2588 } else {
2589 frames_to_test_after_candidate_key = 3;
2590 count_for_tolerable_prediction = 1;
2591 }
2592 }
2593 // Make sure we have enough stats after the candidate key.
2594 frames_to_test_after_candidate_key =
2595 AOMMIN(frames_to_test_after_candidate_key, stats_after_this_stats);
2596
2597 // Does the frame satisfy the primary criteria of a key frame?
2598 // See above for an explanation of the test criteria.
2599 // If so, then examine how well it predicts subsequent frames.
2600 if (IMPLIES(rc_mode == AOM_Q, frame_count_so_far >= 3) &&
2601 (this_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2602 (next_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2603 ((this_stats->pcnt_inter < VERY_LOW_INTER_THRESH) ||
2604 slide_transition(this_stats, last_stats, next_stats) ||
2605 ((pcnt_intra > MIN_INTRA_LEVEL) &&
2606 (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
2607 ((this_stats->intra_error /
2608 DOUBLE_DIVIDE_CHECK(this_stats->coded_error)) <
2609 KF_II_ERR_THRESHOLD) &&
2610 ((fabs(last_stats->coded_error - this_stats->coded_error) /
2611 DOUBLE_DIVIDE_CHECK(this_stats->coded_error) >
2612 ERR_CHANGE_THRESHOLD) ||
2613 (fabs(last_stats->intra_error - this_stats->intra_error) /
2614 DOUBLE_DIVIDE_CHECK(this_stats->intra_error) >
2615 ERR_CHANGE_THRESHOLD) ||
2616 ((next_stats->intra_error /
2617 DOUBLE_DIVIDE_CHECK(next_stats->coded_error)) >
2618 II_IMPROVEMENT_THRESHOLD))))) {
2619 int i;
2620 double boost_score = 0.0;
2621 double old_boost_score = 0.0;
2622 double decay_accumulator = 1.0;
2623
2624 // Examine how well the key frame predicts subsequent frames.
2625 for (i = 1; i <= frames_to_test_after_candidate_key; ++i) {
2626 // Get the next frame details
2627 const FIRSTPASS_STATS *local_next_frame =
2628 av1_firstpass_info_peek(firstpass_info, this_stats_index + i);
2629 double next_iiratio =
2630 (BOOST_FACTOR * local_next_frame->intra_error /
2631 DOUBLE_DIVIDE_CHECK(local_next_frame->coded_error));
2632
2633 if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
2634
2635 // Cumulative effect of decay in prediction quality.
2636 if (local_next_frame->pcnt_inter > 0.85)
2637 decay_accumulator *= local_next_frame->pcnt_inter;
2638 else
2639 decay_accumulator *= (0.85 + local_next_frame->pcnt_inter) / 2.0;
2640
2641 // Keep a running total.
2642 boost_score += (decay_accumulator * next_iiratio);
2643
2644 // Test various breakout clauses.
2645 // TODO(any): Test of intra error should be normalized to an MB.
2646 if ((local_next_frame->pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
2647 (((local_next_frame->pcnt_inter - local_next_frame->pcnt_neutral) <
2648 0.20) &&
2649 (next_iiratio < 3.0)) ||
2650 ((boost_score - old_boost_score) < 3.0) ||
2651 (local_next_frame->intra_error < (200.0 / (double)num_mbs))) {
2652 break;
2653 }
2654
2655 old_boost_score = boost_score;
2656 }
2657
2658 // If there is tolerable prediction for at least the next 3 frames then
2659 // break out else discard this potential key frame and move on
2660 if (boost_score > 30.0 && (i > count_for_tolerable_prediction)) {
2661 is_viable_kf = 1;
2662 } else {
2663 is_viable_kf = 0;
2664 }
2665 }
2666 return is_viable_kf;
2667 }
2668
2669 #define FRAMES_TO_CHECK_DECAY 8
2670 #define KF_MIN_FRAME_BOOST 80.0
2671 #define KF_MAX_FRAME_BOOST 128.0
2672 #define MIN_KF_BOOST 600 // Minimum boost for non-static KF interval
2673 #define MAX_KF_BOOST 3200
2674 #define MIN_STATIC_KF_BOOST 5400 // Minimum boost for static KF interval
2675
detect_app_forced_key(AV1_COMP * cpi)2676 static int detect_app_forced_key(AV1_COMP *cpi) {
2677 int num_frames_to_app_forced_key = is_forced_keyframe_pending(
2678 cpi->ppi->lookahead, cpi->ppi->lookahead->max_sz, cpi->compressor_stage);
2679 return num_frames_to_app_forced_key;
2680 }
2681
get_projected_kf_boost(AV1_COMP * cpi)2682 static int get_projected_kf_boost(AV1_COMP *cpi) {
2683 /*
2684 * If num_stats_used_for_kf_boost >= frames_to_key, then
2685 * all stats needed for prior boost calculation are available.
2686 * Hence projecting the prior boost is not needed in this cases.
2687 */
2688 if (cpi->ppi->p_rc.num_stats_used_for_kf_boost >= cpi->rc.frames_to_key)
2689 return cpi->ppi->p_rc.kf_boost;
2690
2691 // Get the current tpl factor (number of frames = frames_to_key).
2692 double tpl_factor = av1_get_kf_boost_projection_factor(cpi->rc.frames_to_key);
2693 // Get the tpl factor when number of frames = num_stats_used_for_kf_boost.
2694 double tpl_factor_num_stats = av1_get_kf_boost_projection_factor(
2695 cpi->ppi->p_rc.num_stats_used_for_kf_boost);
2696 int projected_kf_boost =
2697 (int)rint((tpl_factor * cpi->ppi->p_rc.kf_boost) / tpl_factor_num_stats);
2698 return projected_kf_boost;
2699 }
2700
2701 /*!\brief Determine the location of the next key frame
2702 *
2703 * \ingroup gf_group_algo
2704 * This function decides the placement of the next key frame when a
2705 * scenecut is detected or the maximum key frame distance is reached.
2706 *
2707 * \param[in] cpi Top-level encoder structure
2708 * \param[in] firstpass_info struct for firstpass info
2709 * \param[in] num_frames_to_detect_scenecut Maximum lookahead frames.
2710 * \param[in] search_start_idx the start index for searching key frame.
2711 * Set it to one if we already know the
2712 * current frame is key frame. Otherwise,
2713 * set it to zero.
2714 *
2715 * \return Number of frames to the next key including the current frame.
2716 */
define_kf_interval(AV1_COMP * cpi,const FIRSTPASS_INFO * firstpass_info,int num_frames_to_detect_scenecut,int search_start_idx)2717 static int define_kf_interval(AV1_COMP *cpi,
2718 const FIRSTPASS_INFO *firstpass_info,
2719 int num_frames_to_detect_scenecut,
2720 int search_start_idx) {
2721 const TWO_PASS *const twopass = &cpi->ppi->twopass;
2722 const RATE_CONTROL *const rc = &cpi->rc;
2723 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2724 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2725 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
2726 double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
2727 double decay_accumulator = 1.0;
2728 int i = 0, j;
2729 int frames_to_key = search_start_idx;
2730 int frames_since_key = rc->frames_since_key + 1;
2731 int num_stats_used_for_kf_boost = 1;
2732 int scenecut_detected = 0;
2733
2734 int num_frames_to_next_key = detect_app_forced_key(cpi);
2735
2736 if (num_frames_to_detect_scenecut == 0) {
2737 if (num_frames_to_next_key != -1)
2738 return num_frames_to_next_key;
2739 else
2740 return rc->frames_to_key;
2741 }
2742
2743 if (num_frames_to_next_key != -1)
2744 num_frames_to_detect_scenecut =
2745 AOMMIN(num_frames_to_detect_scenecut, num_frames_to_next_key);
2746
2747 // Initialize the decay rates for the recent frames to check
2748 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
2749
2750 i = 0;
2751 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
2752 ? cpi->initial_mbs
2753 : cpi->common.mi_params.MBs;
2754 const int future_stats_count =
2755 av1_firstpass_info_future_count(firstpass_info, 0);
2756 while (frames_to_key < future_stats_count &&
2757 frames_to_key < num_frames_to_detect_scenecut) {
2758 // Accumulate total number of stats available till next key frame
2759 num_stats_used_for_kf_boost++;
2760
2761 // Provided that we are not at the end of the file...
2762 if ((cpi->ppi->p_rc.enable_scenecut_detection > 0) && kf_cfg->auto_key &&
2763 frames_to_key + 1 < future_stats_count) {
2764 double loop_decay_rate;
2765
2766 // Check for a scene cut.
2767 if (frames_since_key >= kf_cfg->key_freq_min) {
2768 scenecut_detected = test_candidate_kf(
2769 &twopass->firstpass_info, frames_to_key, frames_since_key,
2770 oxcf->rc_cfg.mode, cpi->ppi->p_rc.enable_scenecut_detection,
2771 num_mbs);
2772 if (scenecut_detected) {
2773 break;
2774 }
2775 }
2776
2777 // How fast is the prediction quality decaying?
2778 const FIRSTPASS_STATS *next_stats =
2779 av1_firstpass_info_peek(firstpass_info, frames_to_key + 1);
2780 loop_decay_rate = get_prediction_decay_rate(next_stats);
2781
2782 // We want to know something about the recent past... rather than
2783 // as used elsewhere where we are concerned with decay in prediction
2784 // quality since the last GF or KF.
2785 recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate;
2786 decay_accumulator = 1.0;
2787 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
2788 decay_accumulator *= recent_loop_decay[j];
2789
2790 // Special check for transition or high motion followed by a
2791 // static scene.
2792 if (frames_since_key >= kf_cfg->key_freq_min) {
2793 scenecut_detected = detect_transition_to_still(
2794 firstpass_info, frames_to_key + 1, rc->min_gf_interval, i,
2795 kf_cfg->key_freq_max - i, loop_decay_rate, decay_accumulator);
2796 if (scenecut_detected) {
2797 // In the case of transition followed by a static scene, the key frame
2798 // could be a good predictor for the following frames, therefore we
2799 // do not use an arf.
2800 p_rc->use_arf_in_this_kf_group = 0;
2801 break;
2802 }
2803 }
2804
2805 // Step on to the next frame.
2806 ++frames_to_key;
2807 ++frames_since_key;
2808
2809 // If we don't have a real key frame within the next two
2810 // key_freq_max intervals then break out of the loop.
2811 if (frames_to_key >= 2 * kf_cfg->key_freq_max) {
2812 break;
2813 }
2814 } else {
2815 ++frames_to_key;
2816 ++frames_since_key;
2817 }
2818 ++i;
2819 }
2820 if (cpi->ppi->lap_enabled && !scenecut_detected)
2821 frames_to_key = num_frames_to_next_key;
2822
2823 return frames_to_key;
2824 }
2825
get_kf_group_avg_error(TWO_PASS * twopass,TWO_PASS_FRAME * twopass_frame,const FIRSTPASS_STATS * first_frame,const FIRSTPASS_STATS * start_position,int frames_to_key)2826 static double get_kf_group_avg_error(TWO_PASS *twopass,
2827 TWO_PASS_FRAME *twopass_frame,
2828 const FIRSTPASS_STATS *first_frame,
2829 const FIRSTPASS_STATS *start_position,
2830 int frames_to_key) {
2831 FIRSTPASS_STATS cur_frame = *first_frame;
2832 int num_frames, i;
2833 double kf_group_avg_error = 0.0;
2834
2835 reset_fpf_position(twopass_frame, start_position);
2836
2837 for (i = 0; i < frames_to_key; ++i) {
2838 kf_group_avg_error += cur_frame.coded_error;
2839 if (EOF == input_stats(twopass, twopass_frame, &cur_frame)) break;
2840 }
2841 num_frames = i + 1;
2842 num_frames = AOMMIN(num_frames, frames_to_key);
2843 kf_group_avg_error = kf_group_avg_error / num_frames;
2844
2845 return (kf_group_avg_error);
2846 }
2847
get_kf_group_bits(AV1_COMP * cpi,double kf_group_err,double kf_group_avg_error)2848 static int64_t get_kf_group_bits(AV1_COMP *cpi, double kf_group_err,
2849 double kf_group_avg_error) {
2850 RATE_CONTROL *const rc = &cpi->rc;
2851 TWO_PASS *const twopass = &cpi->ppi->twopass;
2852 int64_t kf_group_bits;
2853 if (cpi->ppi->lap_enabled) {
2854 kf_group_bits = (int64_t)rc->frames_to_key * rc->avg_frame_bandwidth;
2855 if (cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap) {
2856 double vbr_corpus_complexity_lap =
2857 cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap / 10.0;
2858 /* Get the average corpus complexity of the frame */
2859 kf_group_bits = (int64_t)(
2860 kf_group_bits * (kf_group_avg_error / vbr_corpus_complexity_lap));
2861 }
2862 } else {
2863 kf_group_bits = (int64_t)(twopass->bits_left *
2864 (kf_group_err / twopass->modified_error_left));
2865 }
2866
2867 return kf_group_bits;
2868 }
2869
calc_avg_stats(AV1_COMP * cpi,FIRSTPASS_STATS * avg_frame_stat)2870 static int calc_avg_stats(AV1_COMP *cpi, FIRSTPASS_STATS *avg_frame_stat) {
2871 RATE_CONTROL *const rc = &cpi->rc;
2872 TWO_PASS *const twopass = &cpi->ppi->twopass;
2873 FIRSTPASS_STATS cur_frame;
2874 av1_zero(cur_frame);
2875 int num_frames = 0;
2876 // Accumulate total stat using available number of stats.
2877 for (num_frames = 0; num_frames < (rc->frames_to_key - 1); ++num_frames) {
2878 if (EOF == input_stats(twopass, &cpi->twopass_frame, &cur_frame)) break;
2879 av1_accumulate_stats(avg_frame_stat, &cur_frame);
2880 }
2881
2882 if (num_frames < 2) {
2883 return num_frames;
2884 }
2885 // Average the total stat
2886 avg_frame_stat->weight = avg_frame_stat->weight / num_frames;
2887 avg_frame_stat->intra_error = avg_frame_stat->intra_error / num_frames;
2888 avg_frame_stat->frame_avg_wavelet_energy =
2889 avg_frame_stat->frame_avg_wavelet_energy / num_frames;
2890 avg_frame_stat->coded_error = avg_frame_stat->coded_error / num_frames;
2891 avg_frame_stat->sr_coded_error = avg_frame_stat->sr_coded_error / num_frames;
2892 avg_frame_stat->pcnt_inter = avg_frame_stat->pcnt_inter / num_frames;
2893 avg_frame_stat->pcnt_motion = avg_frame_stat->pcnt_motion / num_frames;
2894 avg_frame_stat->pcnt_second_ref =
2895 avg_frame_stat->pcnt_second_ref / num_frames;
2896 avg_frame_stat->pcnt_neutral = avg_frame_stat->pcnt_neutral / num_frames;
2897 avg_frame_stat->intra_skip_pct = avg_frame_stat->intra_skip_pct / num_frames;
2898 avg_frame_stat->inactive_zone_rows =
2899 avg_frame_stat->inactive_zone_rows / num_frames;
2900 avg_frame_stat->inactive_zone_cols =
2901 avg_frame_stat->inactive_zone_cols / num_frames;
2902 avg_frame_stat->MVr = avg_frame_stat->MVr / num_frames;
2903 avg_frame_stat->mvr_abs = avg_frame_stat->mvr_abs / num_frames;
2904 avg_frame_stat->MVc = avg_frame_stat->MVc / num_frames;
2905 avg_frame_stat->mvc_abs = avg_frame_stat->mvc_abs / num_frames;
2906 avg_frame_stat->MVrv = avg_frame_stat->MVrv / num_frames;
2907 avg_frame_stat->MVcv = avg_frame_stat->MVcv / num_frames;
2908 avg_frame_stat->mv_in_out_count =
2909 avg_frame_stat->mv_in_out_count / num_frames;
2910 avg_frame_stat->new_mv_count = avg_frame_stat->new_mv_count / num_frames;
2911 avg_frame_stat->count = avg_frame_stat->count / num_frames;
2912 avg_frame_stat->duration = avg_frame_stat->duration / num_frames;
2913
2914 return num_frames;
2915 }
2916
get_kf_boost_score(AV1_COMP * cpi,double kf_raw_err,double * zero_motion_accumulator,double * sr_accumulator,int use_avg_stat)2917 static double get_kf_boost_score(AV1_COMP *cpi, double kf_raw_err,
2918 double *zero_motion_accumulator,
2919 double *sr_accumulator, int use_avg_stat) {
2920 RATE_CONTROL *const rc = &cpi->rc;
2921 TWO_PASS *const twopass = &cpi->ppi->twopass;
2922 FRAME_INFO *const frame_info = &cpi->frame_info;
2923 FIRSTPASS_STATS frame_stat;
2924 av1_zero(frame_stat);
2925 int i = 0, num_stat_used = 0;
2926 double boost_score = 0.0;
2927 const double kf_max_boost =
2928 cpi->oxcf.rc_cfg.mode == AOM_Q
2929 ? AOMMIN(AOMMAX(rc->frames_to_key * 2.0, KF_MIN_FRAME_BOOST),
2930 KF_MAX_FRAME_BOOST)
2931 : KF_MAX_FRAME_BOOST;
2932
2933 // Calculate the average using available number of stats.
2934 if (use_avg_stat) num_stat_used = calc_avg_stats(cpi, &frame_stat);
2935
2936 for (i = num_stat_used; i < (rc->frames_to_key - 1); ++i) {
2937 if (!use_avg_stat &&
2938 EOF == input_stats(twopass, &cpi->twopass_frame, &frame_stat))
2939 break;
2940
2941 // Monitor for static sections.
2942 // For the first frame in kf group, the second ref indicator is invalid.
2943 if (i > 0) {
2944 *zero_motion_accumulator =
2945 AOMMIN(*zero_motion_accumulator, get_zero_motion_factor(&frame_stat));
2946 } else {
2947 *zero_motion_accumulator = frame_stat.pcnt_inter - frame_stat.pcnt_motion;
2948 }
2949
2950 // Not all frames in the group are necessarily used in calculating boost.
2951 if ((*sr_accumulator < (kf_raw_err * 1.50)) &&
2952 (i <= rc->max_gf_interval * 2)) {
2953 double frame_boost;
2954 double zm_factor;
2955
2956 // Factor 0.75-1.25 based on how much of frame is static.
2957 zm_factor = (0.75 + (*zero_motion_accumulator / 2.0));
2958
2959 if (i < 2) *sr_accumulator = 0.0;
2960 frame_boost =
2961 calc_kf_frame_boost(&cpi->ppi->p_rc, frame_info, &frame_stat,
2962 sr_accumulator, kf_max_boost);
2963 boost_score += frame_boost * zm_factor;
2964 }
2965 }
2966 return boost_score;
2967 }
2968
2969 /*!\brief Interval(in seconds) to clip key-frame distance to in LAP.
2970 */
2971 #define MAX_KF_BITS_INTERVAL_SINGLE_PASS 5
2972
2973 /*!\brief Determine the next key frame group
2974 *
2975 * \ingroup gf_group_algo
2976 * This function decides the placement of the next key frame, and
2977 * calculates the bit allocation of the KF group and the keyframe itself.
2978 *
2979 * \param[in] cpi Top-level encoder structure
2980 * \param[in] this_frame Pointer to first pass stats
2981 *
2982 * \return Nothing is returned.
2983 */
find_next_key_frame(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)2984 static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
2985 RATE_CONTROL *const rc = &cpi->rc;
2986 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2987 TWO_PASS *const twopass = &cpi->ppi->twopass;
2988 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
2989 FRAME_INFO *const frame_info = &cpi->frame_info;
2990 AV1_COMMON *const cm = &cpi->common;
2991 CurrentFrame *const current_frame = &cm->current_frame;
2992 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2993 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
2994 const FIRSTPASS_STATS first_frame = *this_frame;
2995 FIRSTPASS_STATS next_frame;
2996 const FIRSTPASS_INFO *firstpass_info = &twopass->firstpass_info;
2997 av1_zero(next_frame);
2998
2999 rc->frames_since_key = 0;
3000 // Use arfs if possible.
3001 p_rc->use_arf_in_this_kf_group = is_altref_enabled(
3002 oxcf->gf_cfg.lag_in_frames, oxcf->gf_cfg.enable_auto_arf);
3003
3004 // Reset the GF group data structures.
3005 av1_zero(*gf_group);
3006 cpi->gf_frame_index = 0;
3007
3008 // KF is always a GF so clear frames till next gf counter.
3009 rc->frames_till_gf_update_due = 0;
3010
3011 if (has_no_stats_stage(cpi)) {
3012 int num_frames_to_app_forced_key = detect_app_forced_key(cpi);
3013 p_rc->this_key_frame_forced =
3014 current_frame->frame_number != 0 && rc->frames_to_key == 0;
3015 if (num_frames_to_app_forced_key != -1)
3016 rc->frames_to_key = num_frames_to_app_forced_key;
3017 else
3018 rc->frames_to_key = AOMMAX(1, kf_cfg->key_freq_max);
3019 correct_frames_to_key(cpi);
3020 p_rc->kf_boost = DEFAULT_KF_BOOST;
3021 gf_group->update_type[0] = KF_UPDATE;
3022 return;
3023 }
3024 int i;
3025 const FIRSTPASS_STATS *const start_position = cpi->twopass_frame.stats_in;
3026 int kf_bits = 0;
3027 double zero_motion_accumulator = 1.0;
3028 double boost_score = 0.0;
3029 double kf_raw_err = 0.0;
3030 double kf_mod_err = 0.0;
3031 double sr_accumulator = 0.0;
3032 double kf_group_avg_error = 0.0;
3033 int frames_to_key, frames_to_key_clipped = INT_MAX;
3034 int64_t kf_group_bits_clipped = INT64_MAX;
3035
3036 // Is this a forced key frame by interval.
3037 p_rc->this_key_frame_forced = p_rc->next_key_frame_forced;
3038
3039 twopass->kf_group_bits = 0; // Total bits available to kf group
3040 twopass->kf_group_error_left = 0; // Group modified error score.
3041
3042 kf_raw_err = this_frame->intra_error;
3043 kf_mod_err = calculate_modified_err(frame_info, twopass, oxcf, this_frame);
3044
3045 // We assume the current frame is a key frame and we are looking for the next
3046 // key frame. Therefore search_start_idx = 1
3047 frames_to_key = define_kf_interval(cpi, firstpass_info, kf_cfg->key_freq_max,
3048 /*search_start_idx=*/1);
3049
3050 if (frames_to_key != -1) {
3051 rc->frames_to_key = AOMMIN(kf_cfg->key_freq_max, frames_to_key);
3052 } else {
3053 rc->frames_to_key = kf_cfg->key_freq_max;
3054 }
3055
3056 rc->frames_to_fwd_kf = kf_cfg->fwd_kf_dist;
3057
3058 if (cpi->ppi->lap_enabled) correct_frames_to_key(cpi);
3059
3060 // If there is a max kf interval set by the user we must obey it.
3061 // We already breakout of the loop above at 2x max.
3062 // This code centers the extra kf if the actual natural interval
3063 // is between 1x and 2x.
3064 if (kf_cfg->auto_key && rc->frames_to_key > kf_cfg->key_freq_max) {
3065 FIRSTPASS_STATS tmp_frame = first_frame;
3066
3067 rc->frames_to_key /= 2;
3068
3069 // Reset to the start of the group.
3070 reset_fpf_position(&cpi->twopass_frame, start_position);
3071 // Rescan to get the correct error data for the forced kf group.
3072 for (i = 0; i < rc->frames_to_key; ++i) {
3073 if (EOF == input_stats(twopass, &cpi->twopass_frame, &tmp_frame)) break;
3074 }
3075 p_rc->next_key_frame_forced = 1;
3076 } else if ((cpi->twopass_frame.stats_in ==
3077 twopass->stats_buf_ctx->stats_in_end &&
3078 is_stat_consumption_stage_twopass(cpi)) ||
3079 rc->frames_to_key >= kf_cfg->key_freq_max) {
3080 p_rc->next_key_frame_forced = 1;
3081 } else {
3082 p_rc->next_key_frame_forced = 0;
3083 }
3084
3085 double kf_group_err = 0;
3086 for (i = 0; i < rc->frames_to_key; ++i) {
3087 const FIRSTPASS_STATS *this_stats =
3088 av1_firstpass_info_peek(&twopass->firstpass_info, i);
3089 if (this_stats != NULL) {
3090 // Accumulate kf group error.
3091 kf_group_err += calculate_modified_err_new(
3092 frame_info, &firstpass_info->total_stats, this_stats,
3093 oxcf->rc_cfg.vbrbias, twopass->modified_error_min,
3094 twopass->modified_error_max);
3095 ++p_rc->num_stats_used_for_kf_boost;
3096 }
3097 }
3098
3099 // Calculate the number of bits that should be assigned to the kf group.
3100 if ((twopass->bits_left > 0 && twopass->modified_error_left > 0.0) ||
3101 (cpi->ppi->lap_enabled && oxcf->rc_cfg.mode != AOM_Q)) {
3102 // Maximum number of bits for a single normal frame (not key frame).
3103 const int max_bits = frame_max_bits(rc, oxcf);
3104
3105 // Maximum number of bits allocated to the key frame group.
3106 int64_t max_grp_bits;
3107
3108 if (oxcf->rc_cfg.vbr_corpus_complexity_lap) {
3109 kf_group_avg_error =
3110 get_kf_group_avg_error(twopass, &cpi->twopass_frame, &first_frame,
3111 start_position, rc->frames_to_key);
3112 }
3113
3114 // Default allocation based on bits left and relative
3115 // complexity of the section.
3116 twopass->kf_group_bits =
3117 get_kf_group_bits(cpi, kf_group_err, kf_group_avg_error);
3118 // Clip based on maximum per frame rate defined by the user.
3119 max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
3120 if (twopass->kf_group_bits > max_grp_bits)
3121 twopass->kf_group_bits = max_grp_bits;
3122 } else {
3123 twopass->kf_group_bits = 0;
3124 }
3125 twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
3126
3127 if (cpi->ppi->lap_enabled) {
3128 // In the case of single pass based on LAP, frames to key may have an
3129 // inaccurate value, and hence should be clipped to an appropriate
3130 // interval.
3131 frames_to_key_clipped =
3132 (int)(MAX_KF_BITS_INTERVAL_SINGLE_PASS * cpi->framerate);
3133
3134 // This variable calculates the bits allocated to kf_group with a clipped
3135 // frames_to_key.
3136 if (rc->frames_to_key > frames_to_key_clipped) {
3137 kf_group_bits_clipped =
3138 (int64_t)((double)twopass->kf_group_bits * frames_to_key_clipped /
3139 rc->frames_to_key);
3140 }
3141 }
3142
3143 // Reset the first pass file position.
3144 reset_fpf_position(&cpi->twopass_frame, start_position);
3145
3146 // Scan through the kf group collating various stats used to determine
3147 // how many bits to spend on it.
3148 boost_score = get_kf_boost_score(cpi, kf_raw_err, &zero_motion_accumulator,
3149 &sr_accumulator, 0);
3150 reset_fpf_position(&cpi->twopass_frame, start_position);
3151 // Store the zero motion percentage
3152 twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
3153
3154 // Calculate a section intra ratio used in setting max loop filter.
3155 twopass->section_intra_rating = calculate_section_intra_ratio(
3156 start_position, twopass->stats_buf_ctx->stats_in_end, rc->frames_to_key);
3157
3158 p_rc->kf_boost = (int)boost_score;
3159
3160 if (cpi->ppi->lap_enabled) {
3161 if (oxcf->rc_cfg.mode == AOM_Q) {
3162 p_rc->kf_boost = get_projected_kf_boost(cpi);
3163 } else {
3164 // TODO(any): Explore using average frame stats for AOM_Q as well.
3165 boost_score = get_kf_boost_score(
3166 cpi, kf_raw_err, &zero_motion_accumulator, &sr_accumulator, 1);
3167 reset_fpf_position(&cpi->twopass_frame, start_position);
3168 p_rc->kf_boost += (int)boost_score;
3169 }
3170 }
3171
3172 // Special case for static / slide show content but don't apply
3173 // if the kf group is very short.
3174 if ((zero_motion_accumulator > STATIC_KF_GROUP_FLOAT_THRESH) &&
3175 (rc->frames_to_key > 8)) {
3176 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_STATIC_KF_BOOST);
3177 } else {
3178 // Apply various clamps for min and max boost
3179 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, (rc->frames_to_key * 3));
3180 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_KF_BOOST);
3181 #ifdef STRICT_RC
3182 p_rc->kf_boost = AOMMIN(p_rc->kf_boost, MAX_KF_BOOST);
3183 #endif
3184 }
3185
3186 // Work out how many bits to allocate for the key frame itself.
3187 // In case of LAP enabled for VBR, if the frames_to_key value is
3188 // very high, we calculate the bits based on a clipped value of
3189 // frames_to_key.
3190 kf_bits = calculate_boost_bits(
3191 AOMMIN(rc->frames_to_key, frames_to_key_clipped) - 1, p_rc->kf_boost,
3192 AOMMIN(twopass->kf_group_bits, kf_group_bits_clipped));
3193 // printf("kf boost = %d kf_bits = %d kf_zeromotion_pct = %d\n",
3194 // p_rc->kf_boost,
3195 // kf_bits, twopass->kf_zeromotion_pct);
3196 kf_bits = adjust_boost_bits_for_target_level(cpi, rc, kf_bits,
3197 twopass->kf_group_bits, 0);
3198
3199 twopass->kf_group_bits -= kf_bits;
3200
3201 // Save the bits to spend on the key frame.
3202 gf_group->bit_allocation[0] = kf_bits;
3203 gf_group->update_type[0] = KF_UPDATE;
3204
3205 // Note the total error score of the kf group minus the key frame itself.
3206 if (cpi->ppi->lap_enabled)
3207 // As we don't have enough stats to know the actual error of the group,
3208 // we assume the complexity of each frame to be equal to 1, and set the
3209 // error as the number of frames in the group(minus the keyframe).
3210 twopass->kf_group_error_left = (double)(rc->frames_to_key - 1);
3211 else
3212 twopass->kf_group_error_left = kf_group_err - kf_mod_err;
3213
3214 // Adjust the count of total modified error left.
3215 // The count of bits left is adjusted elsewhere based on real coded frame
3216 // sizes.
3217 twopass->modified_error_left -= kf_group_err;
3218 }
3219
3220 #define ARF_STATS_OUTPUT 0
3221 #if ARF_STATS_OUTPUT
3222 unsigned int arf_count = 0;
3223 #endif
3224
get_section_target_bandwidth(AV1_COMP * cpi)3225 static int get_section_target_bandwidth(AV1_COMP *cpi) {
3226 AV1_COMMON *const cm = &cpi->common;
3227 CurrentFrame *const current_frame = &cm->current_frame;
3228 RATE_CONTROL *const rc = &cpi->rc;
3229 TWO_PASS *const twopass = &cpi->ppi->twopass;
3230 int section_target_bandwidth;
3231 const int frames_left = (int)(twopass->stats_buf_ctx->total_stats->count -
3232 current_frame->frame_number);
3233 if (cpi->ppi->lap_enabled)
3234 section_target_bandwidth = (int)rc->avg_frame_bandwidth;
3235 else
3236 section_target_bandwidth = (int)(twopass->bits_left / frames_left);
3237 return section_target_bandwidth;
3238 }
3239
set_twopass_params_based_on_fp_stats(AV1_COMP * cpi,const FIRSTPASS_STATS * this_frame_ptr)3240 static INLINE void set_twopass_params_based_on_fp_stats(
3241 AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame_ptr) {
3242 if (this_frame_ptr == NULL) return;
3243
3244 TWO_PASS_FRAME *twopass_frame = &cpi->twopass_frame;
3245 // The multiplication by 256 reverses a scaling factor of (>> 8)
3246 // applied when combining MB error values for the frame.
3247 twopass_frame->mb_av_energy = log((this_frame_ptr->intra_error) + 1.0);
3248
3249 const FIRSTPASS_STATS *const total_stats =
3250 cpi->ppi->twopass.stats_buf_ctx->total_stats;
3251 if (is_fp_wavelet_energy_invalid(total_stats) == 0) {
3252 twopass_frame->frame_avg_haar_energy =
3253 log((this_frame_ptr->frame_avg_wavelet_energy) + 1.0);
3254 }
3255
3256 // Set the frame content type flag.
3257 if (this_frame_ptr->intra_skip_pct >= FC_ANIMATION_THRESH)
3258 twopass_frame->fr_content_type = FC_GRAPHICS_ANIMATION;
3259 else
3260 twopass_frame->fr_content_type = FC_NORMAL;
3261 }
3262
process_first_pass_stats(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3263 static void process_first_pass_stats(AV1_COMP *cpi,
3264 FIRSTPASS_STATS *this_frame) {
3265 AV1_COMMON *const cm = &cpi->common;
3266 CurrentFrame *const current_frame = &cm->current_frame;
3267 RATE_CONTROL *const rc = &cpi->rc;
3268 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3269 TWO_PASS *const twopass = &cpi->ppi->twopass;
3270 FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
3271
3272 if (cpi->oxcf.rc_cfg.mode != AOM_Q && current_frame->frame_number == 0 &&
3273 cpi->gf_frame_index == 0 && total_stats &&
3274 cpi->ppi->twopass.stats_buf_ctx->total_left_stats) {
3275 if (cpi->ppi->lap_enabled) {
3276 /*
3277 * Accumulate total_stats using available limited number of stats,
3278 * and assign it to total_left_stats.
3279 */
3280 *cpi->ppi->twopass.stats_buf_ctx->total_left_stats = *total_stats;
3281 }
3282 // Special case code for first frame.
3283 const int section_target_bandwidth = get_section_target_bandwidth(cpi);
3284 const double section_length =
3285 twopass->stats_buf_ctx->total_left_stats->count;
3286 const double section_error =
3287 twopass->stats_buf_ctx->total_left_stats->coded_error / section_length;
3288 const double section_intra_skip =
3289 twopass->stats_buf_ctx->total_left_stats->intra_skip_pct /
3290 section_length;
3291 const double section_inactive_zone =
3292 (twopass->stats_buf_ctx->total_left_stats->inactive_zone_rows * 2) /
3293 ((double)cm->mi_params.mb_rows * section_length);
3294 const int tmp_q = get_twopass_worst_quality(
3295 cpi, section_error, section_intra_skip + section_inactive_zone,
3296 section_target_bandwidth);
3297
3298 rc->active_worst_quality = tmp_q;
3299 rc->ni_av_qi = tmp_q;
3300 p_rc->last_q[INTER_FRAME] = tmp_q;
3301 p_rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->seq_params->bit_depth);
3302 p_rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
3303 p_rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.rc_cfg.best_allowed_q) / 2;
3304 p_rc->avg_frame_qindex[KEY_FRAME] = p_rc->last_q[KEY_FRAME];
3305 }
3306
3307 if (cpi->twopass_frame.stats_in <
3308 cpi->ppi->twopass.stats_buf_ctx->stats_in_end) {
3309 *this_frame = *cpi->twopass_frame.stats_in;
3310 ++cpi->twopass_frame.stats_in;
3311 }
3312 set_twopass_params_based_on_fp_stats(cpi, this_frame);
3313 }
3314
setup_target_rate(AV1_COMP * cpi)3315 static void setup_target_rate(AV1_COMP *cpi) {
3316 RATE_CONTROL *const rc = &cpi->rc;
3317 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3318
3319 int target_rate = gf_group->bit_allocation[cpi->gf_frame_index];
3320
3321 if (has_no_stats_stage(cpi)) {
3322 av1_rc_set_frame_target(cpi, target_rate, cpi->common.width,
3323 cpi->common.height);
3324 }
3325
3326 rc->base_frame_target = target_rate;
3327 }
3328
mark_flashes(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3329 static void mark_flashes(FIRSTPASS_STATS *first_stats,
3330 FIRSTPASS_STATS *last_stats) {
3331 FIRSTPASS_STATS *this_stats = first_stats, *next_stats;
3332 while (this_stats < last_stats - 1) {
3333 next_stats = this_stats + 1;
3334 if (next_stats->pcnt_second_ref > next_stats->pcnt_inter &&
3335 next_stats->pcnt_second_ref >= 0.5) {
3336 this_stats->is_flash = 1;
3337 } else {
3338 this_stats->is_flash = 0;
3339 }
3340 this_stats = next_stats;
3341 }
3342 // We always treat the last one as none flash.
3343 if (last_stats - 1 >= first_stats) {
3344 (last_stats - 1)->is_flash = 0;
3345 }
3346 }
3347
3348 // Estimate the noise variance of each frame from the first pass stats
estimate_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3349 static void estimate_noise(FIRSTPASS_STATS *first_stats,
3350 FIRSTPASS_STATS *last_stats) {
3351 FIRSTPASS_STATS *this_stats, *next_stats;
3352 double C1, C2, C3, noise;
3353 int count = 0;
3354 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3355 this_stats->noise_var = 0.0;
3356 // flashes tend to have high correlation of innovations, so ignore them.
3357 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3358 (this_stats - 2)->is_flash)
3359 continue;
3360
3361 C1 = (this_stats - 1)->intra_error *
3362 (this_stats->intra_error - this_stats->coded_error);
3363 C2 = (this_stats - 2)->intra_error *
3364 ((this_stats - 1)->intra_error - (this_stats - 1)->coded_error);
3365 C3 = (this_stats - 2)->intra_error *
3366 (this_stats->intra_error - this_stats->sr_coded_error);
3367 if (C1 <= 0 || C2 <= 0 || C3 <= 0) continue;
3368 C1 = sqrt(C1);
3369 C2 = sqrt(C2);
3370 C3 = sqrt(C3);
3371
3372 noise = (this_stats - 1)->intra_error - C1 * C2 / C3;
3373 noise = AOMMAX(noise, 0.01);
3374 this_stats->noise_var = noise;
3375 count++;
3376 }
3377
3378 // Copy noise from the neighbor if the noise value is not trustworthy
3379 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3380 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3381 (this_stats - 2)->is_flash)
3382 continue;
3383 if (this_stats->noise_var < 1.0) {
3384 int found = 0;
3385 // TODO(bohanli): consider expanding to two directions at the same time
3386 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3387 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3388 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3389 continue;
3390 found = 1;
3391 this_stats->noise_var = next_stats->noise_var;
3392 break;
3393 }
3394 if (found) continue;
3395 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3396 next_stats--) {
3397 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3398 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3399 continue;
3400 this_stats->noise_var = next_stats->noise_var;
3401 break;
3402 }
3403 }
3404 }
3405
3406 // copy the noise if this is a flash
3407 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3408 if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3409 (this_stats - 2)->is_flash) {
3410 int found = 0;
3411 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3412 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3413 (next_stats - 2)->is_flash)
3414 continue;
3415 found = 1;
3416 this_stats->noise_var = next_stats->noise_var;
3417 break;
3418 }
3419 if (found) continue;
3420 for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3421 next_stats--) {
3422 if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3423 (next_stats - 2)->is_flash)
3424 continue;
3425 this_stats->noise_var = next_stats->noise_var;
3426 break;
3427 }
3428 }
3429 }
3430
3431 // if we are at the first 2 frames, copy the noise
3432 for (this_stats = first_stats;
3433 this_stats < first_stats + 2 && (first_stats + 2) < last_stats;
3434 this_stats++) {
3435 this_stats->noise_var = (first_stats + 2)->noise_var;
3436 }
3437 }
3438
3439 // Estimate correlation coefficient of each frame with its previous frame.
estimate_coeff(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3440 static void estimate_coeff(FIRSTPASS_STATS *first_stats,
3441 FIRSTPASS_STATS *last_stats) {
3442 FIRSTPASS_STATS *this_stats;
3443 for (this_stats = first_stats + 1; this_stats < last_stats; this_stats++) {
3444 const double C =
3445 sqrt(AOMMAX((this_stats - 1)->intra_error *
3446 (this_stats->intra_error - this_stats->coded_error),
3447 0.001));
3448 const double cor_coeff =
3449 C /
3450 AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 0.001);
3451
3452 this_stats->cor_coeff =
3453 cor_coeff *
3454 sqrt(AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var,
3455 0.001) /
3456 AOMMAX(this_stats->intra_error - this_stats->noise_var, 0.001));
3457 // clip correlation coefficient.
3458 this_stats->cor_coeff = AOMMIN(AOMMAX(this_stats->cor_coeff, 0), 1);
3459 }
3460 first_stats->cor_coeff = 1.0;
3461 }
3462
av1_get_second_pass_params(AV1_COMP * cpi,EncodeFrameParams * const frame_params,const EncodeFrameInput * const frame_input,unsigned int frame_flags)3463 void av1_get_second_pass_params(AV1_COMP *cpi,
3464 EncodeFrameParams *const frame_params,
3465 const EncodeFrameInput *const frame_input,
3466 unsigned int frame_flags) {
3467 RATE_CONTROL *const rc = &cpi->rc;
3468 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3469 TWO_PASS *const twopass = &cpi->ppi->twopass;
3470 GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3471 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3472
3473 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
3474 int update_total_stats = 0;
3475
3476 if (is_stat_consumption_stage(cpi) && !cpi->twopass_frame.stats_in) return;
3477
3478 assert(cpi->twopass_frame.stats_in != NULL);
3479 const int update_type = gf_group->update_type[cpi->gf_frame_index];
3480 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3481
3482 if (cpi->gf_frame_index < gf_group->size && !(frame_flags & FRAMEFLAGS_KEY)) {
3483 assert(cpi->gf_frame_index < gf_group->size);
3484
3485 setup_target_rate(cpi);
3486
3487 // If this is an arf frame then we dont want to read the stats file or
3488 // advance the input pointer as we already have what we need.
3489 if (update_type == ARF_UPDATE || update_type == INTNL_ARF_UPDATE) {
3490 const FIRSTPASS_STATS *const this_frame_ptr =
3491 read_frame_stats(twopass, &cpi->twopass_frame,
3492 gf_group->arf_src_offset[cpi->gf_frame_index]);
3493 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3494 return;
3495 }
3496 }
3497
3498 if (oxcf->rc_cfg.mode == AOM_Q)
3499 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3500 FIRSTPASS_STATS this_frame;
3501 av1_zero(this_frame);
3502 // call above fn
3503 if (is_stat_consumption_stage(cpi)) {
3504 if (cpi->gf_frame_index < gf_group->size || rc->frames_to_key == 0) {
3505 process_first_pass_stats(cpi, &this_frame);
3506 update_total_stats = 1;
3507 }
3508 } else {
3509 rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3510 }
3511
3512 if (cpi->gf_frame_index == gf_group->size) {
3513 if (cpi->ppi->lap_enabled && cpi->ppi->p_rc.enable_scenecut_detection) {
3514 const int num_frames_to_detect_scenecut = MAX_GF_LENGTH_LAP + 1;
3515 const int frames_to_key = define_kf_interval(
3516 cpi, &twopass->firstpass_info, num_frames_to_detect_scenecut,
3517 /*search_start_idx=*/0);
3518 if (frames_to_key != -1)
3519 rc->frames_to_key = AOMMIN(rc->frames_to_key, frames_to_key);
3520 }
3521 }
3522
3523 // Keyframe and section processing.
3524 FIRSTPASS_STATS this_frame_copy;
3525 this_frame_copy = this_frame;
3526 if (rc->frames_to_key <= 0) {
3527 assert(rc->frames_to_key == 0);
3528 // Define next KF group and assign bits to it.
3529 frame_params->frame_type = KEY_FRAME;
3530 find_next_key_frame(cpi, &this_frame);
3531 this_frame = this_frame_copy;
3532 }
3533
3534 if (rc->frames_to_fwd_kf <= 0)
3535 rc->frames_to_fwd_kf = oxcf->kf_cfg.fwd_kf_dist;
3536
3537 // Define a new GF/ARF group. (Should always enter here for key frames).
3538 if (cpi->gf_frame_index == gf_group->size) {
3539 #if CONFIG_BITRATE_ACCURACY
3540 vbr_rc_reset_gop_data(&cpi->vbr_rc_info);
3541 #endif // CONFIG_BITRATE_ACCURACY
3542 int max_gop_length =
3543 (oxcf->gf_cfg.lag_in_frames >= 32)
3544 ? AOMMIN(MAX_GF_INTERVAL, oxcf->gf_cfg.lag_in_frames -
3545 oxcf->algo_cfg.arnr_max_frames / 2)
3546 : MAX_GF_LENGTH_LAP;
3547
3548 // Use the provided gop size in low delay setting
3549 if (oxcf->gf_cfg.lag_in_frames == 0) max_gop_length = rc->max_gf_interval;
3550
3551 // Identify regions if needed.
3552 // TODO(bohanli): identify regions for all stats available.
3553 if (rc->frames_since_key == 0 || rc->frames_since_key == 1 ||
3554 (p_rc->frames_till_regions_update - rc->frames_since_key <
3555 rc->frames_to_key &&
3556 p_rc->frames_till_regions_update - rc->frames_since_key <
3557 max_gop_length + 1)) {
3558 // how many frames we can analyze from this frame
3559 int rest_frames =
3560 AOMMIN(rc->frames_to_key, MAX_FIRSTPASS_ANALYSIS_FRAMES);
3561 rest_frames =
3562 AOMMIN(rest_frames, (int)(twopass->stats_buf_ctx->stats_in_end -
3563 cpi->twopass_frame.stats_in +
3564 (rc->frames_since_key == 0)));
3565 p_rc->frames_till_regions_update = rest_frames;
3566
3567 if (cpi->ppi->lap_enabled) {
3568 mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3569 twopass->stats_buf_ctx->stats_in_end);
3570 estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3571 twopass->stats_buf_ctx->stats_in_end);
3572 estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3573 twopass->stats_buf_ctx->stats_in_end);
3574 identify_regions(cpi->twopass_frame.stats_in, rest_frames,
3575 (rc->frames_since_key == 0), p_rc->regions,
3576 &p_rc->num_regions);
3577 } else {
3578 identify_regions(
3579 cpi->twopass_frame.stats_in - (rc->frames_since_key == 0),
3580 rest_frames, 0, p_rc->regions, &p_rc->num_regions);
3581 }
3582 }
3583
3584 int cur_region_idx =
3585 find_regions_index(p_rc->regions, p_rc->num_regions,
3586 rc->frames_since_key - p_rc->regions_offset);
3587 if ((cur_region_idx >= 0 &&
3588 p_rc->regions[cur_region_idx].type == SCENECUT_REGION) ||
3589 rc->frames_since_key == 0) {
3590 // If we start from a scenecut, then the last GOP's arf boost is not
3591 // needed for this GOP.
3592 cpi->ppi->gf_state.arf_gf_boost_lst = 0;
3593 }
3594
3595 int need_gf_len = 1;
3596 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
3597 if (!cpi->third_pass_ctx->input_file_name && oxcf->two_pass_output) {
3598 cpi->third_pass_ctx->input_file_name = oxcf->two_pass_output;
3599 }
3600 if (cpi->third_pass_ctx->input_file_name) {
3601 int gf_len;
3602 const int order_hint_bits =
3603 cpi->common.seq_params->order_hint_info.order_hint_bits_minus_1 + 1;
3604 av1_set_gop_third_pass(cpi->third_pass_ctx, gf_group, order_hint_bits,
3605 &gf_len);
3606 p_rc->cur_gf_index = 0;
3607 p_rc->gf_intervals[0] = gf_len;
3608 need_gf_len = 0;
3609 }
3610 }
3611
3612 if (need_gf_len) {
3613 // TODO(jingning): Resolve the redundant calls here.
3614 if (rc->intervals_till_gf_calculate_due == 0 || 1) {
3615 calculate_gf_length(cpi, max_gop_length, MAX_NUM_GF_INTERVALS);
3616 }
3617
3618 if (max_gop_length > 16 && oxcf->algo_cfg.enable_tpl_model &&
3619 oxcf->gf_cfg.lag_in_frames >= 32 &&
3620 cpi->sf.tpl_sf.gop_length_decision_method != 3) {
3621 int this_idx = rc->frames_since_key +
3622 p_rc->gf_intervals[p_rc->cur_gf_index] -
3623 p_rc->regions_offset - 1;
3624 int this_region =
3625 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx);
3626 int next_region =
3627 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx + 1);
3628 // TODO(angiebird): Figure out why this_region and next_region are -1 in
3629 // unit test like AltRefFramePresenceTestLarge (aomedia:3134)
3630 int is_last_scenecut =
3631 p_rc->gf_intervals[p_rc->cur_gf_index] >= rc->frames_to_key ||
3632 (this_region != -1 &&
3633 p_rc->regions[this_region].type == SCENECUT_REGION) ||
3634 (next_region != -1 &&
3635 p_rc->regions[next_region].type == SCENECUT_REGION);
3636
3637 int ori_gf_int = p_rc->gf_intervals[p_rc->cur_gf_index];
3638
3639 if (p_rc->gf_intervals[p_rc->cur_gf_index] > 16 &&
3640 rc->min_gf_interval <= 16) {
3641 // The calculate_gf_length function is previously used with
3642 // max_gop_length = 32 with look-ahead gf intervals.
3643 define_gf_group(cpi, frame_params, 0);
3644 this_frame = this_frame_copy;
3645
3646 if (is_shorter_gf_interval_better(cpi, frame_params, frame_input)) {
3647 // A shorter gf interval is better.
3648 // TODO(jingning): Remove redundant computations here.
3649 max_gop_length = 16;
3650 calculate_gf_length(cpi, max_gop_length, 1);
3651 if (is_last_scenecut &&
3652 (ori_gf_int - p_rc->gf_intervals[p_rc->cur_gf_index] < 4)) {
3653 p_rc->gf_intervals[p_rc->cur_gf_index] = ori_gf_int;
3654 }
3655 }
3656 }
3657 }
3658 }
3659
3660 define_gf_group(cpi, frame_params, 0);
3661
3662 if (gf_group->update_type[cpi->gf_frame_index] != ARF_UPDATE &&
3663 rc->frames_since_key > 0)
3664 process_first_pass_stats(cpi, &this_frame);
3665
3666 define_gf_group(cpi, frame_params, 1);
3667
3668 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
3669 assert(cpi->gf_frame_index == 0);
3670 #if ARF_STATS_OUTPUT
3671 {
3672 FILE *fpfile;
3673 fpfile = fopen("arf.stt", "a");
3674 ++arf_count;
3675 fprintf(fpfile, "%10d %10d %10d %10d %10d\n",
3676 cpi->common.current_frame.frame_number,
3677 rc->frames_till_gf_update_due, cpi->ppi->p_rc.kf_boost, arf_count,
3678 p_rc->gfu_boost);
3679
3680 fclose(fpfile);
3681 }
3682 #endif
3683 }
3684 assert(cpi->gf_frame_index < gf_group->size);
3685
3686 if (gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3687 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE) {
3688 reset_fpf_position(&cpi->twopass_frame, start_pos);
3689
3690 const FIRSTPASS_STATS *const this_frame_ptr =
3691 read_frame_stats(twopass, &cpi->twopass_frame,
3692 gf_group->arf_src_offset[cpi->gf_frame_index]);
3693 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3694 } else {
3695 // Back up this frame's stats for updating total stats during post encode.
3696 cpi->twopass_frame.this_frame = update_total_stats ? start_pos : NULL;
3697 }
3698
3699 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3700 setup_target_rate(cpi);
3701 }
3702
av1_init_second_pass(AV1_COMP * cpi)3703 void av1_init_second_pass(AV1_COMP *cpi) {
3704 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3705 TWO_PASS *const twopass = &cpi->ppi->twopass;
3706 FRAME_INFO *const frame_info = &cpi->frame_info;
3707 double frame_rate;
3708 FIRSTPASS_STATS *stats;
3709
3710 if (!twopass->stats_buf_ctx->stats_in_end) return;
3711
3712 mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3713 twopass->stats_buf_ctx->stats_in_end);
3714 estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3715 twopass->stats_buf_ctx->stats_in_end);
3716 estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3717 twopass->stats_buf_ctx->stats_in_end);
3718
3719 stats = twopass->stats_buf_ctx->total_stats;
3720
3721 *stats = *twopass->stats_buf_ctx->stats_in_end;
3722 *twopass->stats_buf_ctx->total_left_stats = *stats;
3723
3724 frame_rate = 10000000.0 * stats->count / stats->duration;
3725 // Each frame can have a different duration, as the frame rate in the source
3726 // isn't guaranteed to be constant. The frame rate prior to the first frame
3727 // encoded in the second pass is a guess. However, the sum duration is not.
3728 // It is calculated based on the actual durations of all frames from the
3729 // first pass.
3730 av1_new_framerate(cpi, frame_rate);
3731 twopass->bits_left =
3732 (int64_t)(stats->duration * oxcf->rc_cfg.target_bandwidth / 10000000.0);
3733
3734 #if CONFIG_BITRATE_ACCURACY
3735 vbr_rc_init(&cpi->vbr_rc_info, cpi->ppi->twopass.bits_left,
3736 (int)round(stats->count));
3737 #endif
3738
3739 // This variable monitors how far behind the second ref update is lagging.
3740 twopass->sr_update_lag = 1;
3741
3742 // Scan the first pass file and calculate a modified total error based upon
3743 // the bias/power function used to allocate bits.
3744 {
3745 const double avg_error =
3746 stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
3747 const FIRSTPASS_STATS *s = cpi->twopass_frame.stats_in;
3748 double modified_error_total = 0.0;
3749 twopass->modified_error_min =
3750 (avg_error * oxcf->rc_cfg.vbrmin_section) / 100;
3751 twopass->modified_error_max =
3752 (avg_error * oxcf->rc_cfg.vbrmax_section) / 100;
3753 while (s < twopass->stats_buf_ctx->stats_in_end) {
3754 modified_error_total +=
3755 calculate_modified_err(frame_info, twopass, oxcf, s);
3756 ++s;
3757 }
3758 twopass->modified_error_left = modified_error_total;
3759 }
3760
3761 // Reset the vbr bits off target counters
3762 cpi->ppi->p_rc.vbr_bits_off_target = 0;
3763 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
3764
3765 cpi->ppi->p_rc.rate_error_estimate = 0;
3766
3767 // Static sequence monitor variables.
3768 twopass->kf_zeromotion_pct = 100;
3769 twopass->last_kfgroup_zeromotion_pct = 100;
3770
3771 // Initialize bits per macro_block estimate correction factor.
3772 twopass->bpm_factor = 1.0;
3773 // Initialize actual and target bits counters for ARF groups so that
3774 // at the start we have a neutral bpm adjustment.
3775 twopass->rolling_arf_group_target_bits = 1;
3776 twopass->rolling_arf_group_actual_bits = 1;
3777 }
3778
av1_init_single_pass_lap(AV1_COMP * cpi)3779 void av1_init_single_pass_lap(AV1_COMP *cpi) {
3780 TWO_PASS *const twopass = &cpi->ppi->twopass;
3781
3782 if (!twopass->stats_buf_ctx->stats_in_end) return;
3783
3784 // This variable monitors how far behind the second ref update is lagging.
3785 twopass->sr_update_lag = 1;
3786
3787 twopass->bits_left = 0;
3788 twopass->modified_error_min = 0.0;
3789 twopass->modified_error_max = 0.0;
3790 twopass->modified_error_left = 0.0;
3791
3792 // Reset the vbr bits off target counters
3793 cpi->ppi->p_rc.vbr_bits_off_target = 0;
3794 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
3795
3796 cpi->ppi->p_rc.rate_error_estimate = 0;
3797
3798 // Static sequence monitor variables.
3799 twopass->kf_zeromotion_pct = 100;
3800 twopass->last_kfgroup_zeromotion_pct = 100;
3801
3802 // Initialize bits per macro_block estimate correction factor.
3803 twopass->bpm_factor = 1.0;
3804 // Initialize actual and target bits counters for ARF groups so that
3805 // at the start we have a neutral bpm adjustment.
3806 twopass->rolling_arf_group_target_bits = 1;
3807 twopass->rolling_arf_group_actual_bits = 1;
3808 }
3809
3810 #define MINQ_ADJ_LIMIT 48
3811 #define MINQ_ADJ_LIMIT_CQ 20
3812 #define HIGH_UNDERSHOOT_RATIO 2
av1_twopass_postencode_update(AV1_COMP * cpi)3813 void av1_twopass_postencode_update(AV1_COMP *cpi) {
3814 TWO_PASS *const twopass = &cpi->ppi->twopass;
3815 RATE_CONTROL *const rc = &cpi->rc;
3816 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3817 const RateControlCfg *const rc_cfg = &cpi->oxcf.rc_cfg;
3818
3819 // Increment the stats_in pointer.
3820 if (is_stat_consumption_stage(cpi) &&
3821 (cpi->gf_frame_index < cpi->ppi->gf_group.size ||
3822 rc->frames_to_key == 0)) {
3823 const int update_type = cpi->ppi->gf_group.update_type[cpi->gf_frame_index];
3824 if (update_type != ARF_UPDATE && update_type != INTNL_ARF_UPDATE) {
3825 FIRSTPASS_STATS this_frame;
3826 --cpi->twopass_frame.stats_in;
3827 if (cpi->ppi->lap_enabled) {
3828 input_stats_lap(twopass, &cpi->twopass_frame, &this_frame);
3829 } else {
3830 input_stats(twopass, &cpi->twopass_frame, &this_frame);
3831 }
3832 } else if (cpi->ppi->lap_enabled) {
3833 cpi->twopass_frame.stats_in =
3834 cpi->ppi->twopass.stats_buf_ctx->stats_in_start;
3835 }
3836 }
3837
3838 // VBR correction is done through rc->vbr_bits_off_target. Based on the
3839 // sign of this value, a limited % adjustment is made to the target rate
3840 // of subsequent frames, to try and push it back towards 0. This method
3841 // is designed to prevent extreme behaviour at the end of a clip
3842 // or group of frames.
3843 p_rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
3844 twopass->bits_left = AOMMAX(twopass->bits_left - rc->base_frame_target, 0);
3845
3846 #if CONFIG_FRAME_PARALLEL_ENCODE
3847 if (cpi->do_update_vbr_bits_off_target_fast) {
3848 // Subtract current frame's fast_extra_bits.
3849 p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits;
3850 rc->frame_level_fast_extra_bits = 0;
3851 }
3852 #endif
3853
3854 // Target vs actual bits for this arf group.
3855 twopass->rolling_arf_group_target_bits += rc->base_frame_target;
3856 twopass->rolling_arf_group_actual_bits += rc->projected_frame_size;
3857
3858 // Calculate the pct rc error.
3859 if (p_rc->total_actual_bits) {
3860 p_rc->rate_error_estimate =
3861 (int)((p_rc->vbr_bits_off_target * 100) / p_rc->total_actual_bits);
3862 p_rc->rate_error_estimate = clamp(p_rc->rate_error_estimate, -100, 100);
3863 } else {
3864 p_rc->rate_error_estimate = 0;
3865 }
3866
3867 // Update the active best quality pyramid.
3868 if (!rc->is_src_frame_alt_ref) {
3869 const int pyramid_level =
3870 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
3871 int i;
3872 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) {
3873 p_rc->active_best_quality[i] = cpi->common.quant_params.base_qindex;
3874 #if CONFIG_TUNE_VMAF
3875 if (cpi->vmaf_info.original_qindex != -1 &&
3876 (cpi->oxcf.tune_cfg.tuning >= AOM_TUNE_VMAF_WITH_PREPROCESSING &&
3877 cpi->oxcf.tune_cfg.tuning <= AOM_TUNE_VMAF_NEG_MAX_GAIN)) {
3878 p_rc->active_best_quality[i] = cpi->vmaf_info.original_qindex;
3879 }
3880 #endif
3881 }
3882 }
3883
3884 #if 0
3885 {
3886 AV1_COMMON *cm = &cpi->common;
3887 FILE *fpfile;
3888 fpfile = fopen("details.stt", "a");
3889 fprintf(fpfile,
3890 "%10d %10d %10d %10" PRId64 " %10" PRId64
3891 " %10d %10d %10d %10.4lf %10.4lf %10.4lf %10.4lf\n",
3892 cm->current_frame.frame_number, rc->base_frame_target,
3893 rc->projected_frame_size, rc->total_actual_bits,
3894 rc->vbr_bits_off_target, p_rc->rate_error_estimate,
3895 twopass->rolling_arf_group_target_bits,
3896 twopass->rolling_arf_group_actual_bits,
3897 (double)twopass->rolling_arf_group_actual_bits /
3898 (double)twopass->rolling_arf_group_target_bits,
3899 twopass->bpm_factor,
3900 av1_convert_qindex_to_q(cpi->common.quant_params.base_qindex,
3901 cm->seq_params->bit_depth),
3902 av1_convert_qindex_to_q(rc->active_worst_quality,
3903 cm->seq_params->bit_depth));
3904 fclose(fpfile);
3905 }
3906 #endif
3907
3908 if (cpi->common.current_frame.frame_type != KEY_FRAME) {
3909 twopass->kf_group_bits -= rc->base_frame_target;
3910 twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
3911 }
3912 twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
3913
3914 // If the rate control is drifting consider adjustment to min or maxq.
3915 if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref) {
3916 int maxq_adj_limit;
3917 int minq_adj_limit;
3918 maxq_adj_limit = rc->worst_quality - rc->active_worst_quality;
3919 minq_adj_limit =
3920 (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
3921 // Undershoot.
3922 if (p_rc->rate_error_estimate > rc_cfg->under_shoot_pct) {
3923 --twopass->extend_maxq;
3924 if (p_rc->rolling_target_bits >= p_rc->rolling_actual_bits)
3925 ++twopass->extend_minq;
3926 // Overshoot.
3927 } else if (p_rc->rate_error_estimate < -rc_cfg->over_shoot_pct) {
3928 --twopass->extend_minq;
3929 if (p_rc->rolling_target_bits < p_rc->rolling_actual_bits)
3930 ++twopass->extend_maxq;
3931 } else {
3932 // Adjustment for extreme local overshoot.
3933 if (rc->projected_frame_size > (2 * rc->base_frame_target) &&
3934 rc->projected_frame_size > (2 * rc->avg_frame_bandwidth))
3935 ++twopass->extend_maxq;
3936 // Unwind undershoot or overshoot adjustment.
3937 if (p_rc->rolling_target_bits < p_rc->rolling_actual_bits)
3938 --twopass->extend_minq;
3939 else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits)
3940 --twopass->extend_maxq;
3941 }
3942 twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit);
3943 twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
3944
3945 #if CONFIG_FRAME_PARALLEL_ENCODE
3946 if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
3947 p_rc->vbr_bits_off_target_fast) {
3948 // Subtract current frame's fast_extra_bits.
3949 p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits;
3950 }
3951 #endif
3952
3953 // If there is a big and undexpected undershoot then feed the extra
3954 // bits back in quickly. One situation where this may happen is if a
3955 // frame is unexpectedly almost perfectly predicted by the ARF or GF
3956 // but not very well predcited by the previous frame.
3957 if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
3958 int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
3959 if (rc->projected_frame_size < fast_extra_thresh) {
3960 p_rc->vbr_bits_off_target_fast +=
3961 fast_extra_thresh - rc->projected_frame_size;
3962 p_rc->vbr_bits_off_target_fast = AOMMIN(p_rc->vbr_bits_off_target_fast,
3963 (4 * rc->avg_frame_bandwidth));
3964
3965 // Fast adaptation of minQ if necessary to use up the extra bits.
3966 if (rc->avg_frame_bandwidth) {
3967 twopass->extend_minq_fast = (int)(p_rc->vbr_bits_off_target_fast * 8 /
3968 rc->avg_frame_bandwidth);
3969 }
3970 twopass->extend_minq_fast = AOMMIN(
3971 twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
3972 } else if (p_rc->vbr_bits_off_target_fast) {
3973 twopass->extend_minq_fast = AOMMIN(
3974 twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
3975 } else {
3976 twopass->extend_minq_fast = 0;
3977 }
3978 }
3979 }
3980
3981 #if CONFIG_FRAME_PARALLEL_ENCODE
3982 // Update the frame probabilities obtained from parallel encode frames
3983 FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
3984 int i, j, loop;
3985 // Sequentially do average on temp_frame_probs_simulation which holds
3986 // probabilities of last frame before parallel encode
3987 for (loop = 0; loop <= cpi->num_frame_recode; loop++) {
3988 // Sequentially update tx_type_probs
3989 if (cpi->do_update_frame_probs_txtype[loop] &&
3990 (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)) {
3991 const FRAME_UPDATE_TYPE update_type =
3992 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
3993 for (i = 0; i < TX_SIZES_ALL; i++) {
3994 int left = 1024;
3995
3996 for (j = TX_TYPES - 1; j >= 0; j--) {
3997 const int new_prob =
3998 cpi->frame_new_probs[loop].tx_type_probs[update_type][i][j];
3999 int prob =
4000 (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
4001 left -= prob;
4002 if (j == 0) prob += left;
4003 frame_probs->tx_type_probs[update_type][i][j] = prob;
4004 }
4005 }
4006 }
4007
4008 // Sequentially update obmc_probs
4009 if (cpi->do_update_frame_probs_obmc[loop] &&
4010 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4011 const FRAME_UPDATE_TYPE update_type =
4012 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4013
4014 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4015 const int new_prob =
4016 cpi->frame_new_probs[loop].obmc_probs[update_type][i];
4017 frame_probs->obmc_probs[update_type][i] =
4018 (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
4019 }
4020 }
4021
4022 // Sequentially update warped_probs
4023 if (cpi->do_update_frame_probs_warp[loop] &&
4024 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4025 const FRAME_UPDATE_TYPE update_type =
4026 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4027 const int new_prob = cpi->frame_new_probs[loop].warped_probs[update_type];
4028 frame_probs->warped_probs[update_type] =
4029 (frame_probs->warped_probs[update_type] + new_prob) >> 1;
4030 }
4031
4032 // Sequentially update switchable_interp_probs
4033 if (cpi->do_update_frame_probs_interpfilter[loop] &&
4034 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4035 const FRAME_UPDATE_TYPE update_type =
4036 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4037
4038 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4039 int left = 1536;
4040
4041 for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
4042 const int new_prob = cpi->frame_new_probs[loop]
4043 .switchable_interp_probs[update_type][i][j];
4044 int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
4045 new_prob) >>
4046 1;
4047 left -= prob;
4048 if (j == 0) prob += left;
4049 frame_probs->switchable_interp_probs[update_type][i][j] = prob;
4050 }
4051 }
4052 }
4053 }
4054 #endif
4055 }
4056