1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_config.h"
13 #include "./vpx_scale_rtcd.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "./vp8_rtcd.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "quantize.h"
21 #include "vp8/common/alloccommon.h"
22 #include "mcomp.h"
23 #include "firstpass.h"
24 #include "vpx/internal/vpx_psnr.h"
25 #include "vpx_scale/vpx_scale.h"
26 #include "vp8/common/extend.h"
27 #include "ratectrl.h"
28 #include "vp8/common/quant_common.h"
29 #include "segmentation.h"
30 #if CONFIG_POSTPROC
31 #include "vp8/common/postproc.h"
32 #endif
33 #include "vpx_mem/vpx_mem.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/vpx_timer.h"
37 #if ARCH_ARM
38 #include "vpx_ports/arm.h"
39 #endif
40 #if CONFIG_MULTI_RES_ENCODING
41 #include "mr_dissim.h"
42 #endif
43 #include "encodeframe.h"
44
45 #include <math.h>
46 #include <stdio.h>
47 #include <limits.h>
48
49 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
50 extern int vp8_update_coef_context(VP8_COMP *cpi);
51 extern void vp8_update_coef_probs(VP8_COMP *cpi);
52 #endif
53
54 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
55 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
56 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
57
58 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
59 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
60 extern unsigned int vp8_get_processor_freq();
61 extern void print_tree_update_probs();
62 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
63 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
64
65 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
66
67 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
68
69 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
70
71 static void set_default_lf_deltas(VP8_COMP *cpi);
72
73 extern const int vp8_gf_interval_table[101];
74
75 #if CONFIG_INTERNAL_STATS
76 #include "math.h"
77
78 extern double vp8_calc_ssim
79 (
80 YV12_BUFFER_CONFIG *source,
81 YV12_BUFFER_CONFIG *dest,
82 int lumamask,
83 double *weight
84 );
85
86
87 extern double vp8_calc_ssimg
88 (
89 YV12_BUFFER_CONFIG *source,
90 YV12_BUFFER_CONFIG *dest,
91 double *ssim_y,
92 double *ssim_u,
93 double *ssim_v
94 );
95
96
97 #endif
98
99
100 #ifdef OUTPUT_YUV_SRC
101 FILE *yuv_file;
102 #endif
103 #ifdef OUTPUT_YUV_DENOISED
104 FILE *yuv_denoised_file;
105 #endif
106
107 #if 0
108 FILE *framepsnr;
109 FILE *kf_list;
110 FILE *keyfile;
111 #endif
112
113 #if 0
114 extern int skip_true_count;
115 extern int skip_false_count;
116 #endif
117
118
119 #ifdef VP8_ENTROPY_STATS
120 extern int intra_mode_stats[10][10][10];
121 #endif
122
123 #ifdef SPEEDSTATS
124 unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
125 unsigned int tot_pm = 0;
126 unsigned int cnt_pm = 0;
127 unsigned int tot_ef = 0;
128 unsigned int cnt_ef = 0;
129 #endif
130
131 #ifdef MODE_STATS
132 extern unsigned __int64 Sectionbits[50];
133 extern int y_modes[5] ;
134 extern int uv_modes[4] ;
135 extern int b_modes[10] ;
136
137 extern int inter_y_modes[10] ;
138 extern int inter_uv_modes[4] ;
139 extern unsigned int inter_b_modes[15];
140 #endif
141
142 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
143
144 extern const int qrounding_factors[129];
145 extern const int qzbin_factors[129];
146 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
147 extern const int vp8cx_base_skip_false_prob[128];
148
149 /* Tables relating active max Q to active min Q */
150 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] =
151 {
152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
153 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
154 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
155 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
156 3,3,3,3,3,3,4,4,4,5,5,5,5,5,6,6,
157 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
158 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
159 16,16,17,17,18,18,18,18,19,20,20,21,21,22,23,23
160 };
161 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] =
162 {
163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
164 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
165 1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,
166 3,3,3,3,4,4,4,4,5,5,5,5,5,5,6,6,
167 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
168 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
169 16,16,17,17,18,18,18,18,19,19,20,20,20,20,21,21,
170 21,21,22,22,23,23,24,25,25,26,26,27,28,28,29,30
171 };
172 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] =
173 {
174 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
175 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,
176 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,
177 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,
178 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,
179 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34,
180 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42,
181 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58
182 };
183 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] =
184 {
185 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4,
186 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
187 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14,
188 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,
189 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,
190 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37,
191 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48,
192 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64
193 };
194 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] =
195 {
196 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4,
197 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
198 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16,
199 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,
200 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32,
201 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40,
202 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54,
203 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80
204 };
205 static const unsigned char inter_minq[QINDEX_RANGE] =
206 {
207 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9,
208 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20,
209 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31,
210 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43,
211 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56,
212 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70,
213 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85,
214 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
215 };
216
217 #ifdef PACKET_TESTING
218 extern FILE *vpxlogc;
219 #endif
220
save_layer_context(VP8_COMP * cpi)221 static void save_layer_context(VP8_COMP *cpi)
222 {
223 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
224
225 /* Save layer dependent coding state */
226 lc->target_bandwidth = cpi->target_bandwidth;
227 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
228 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
229 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
230 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
231 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
232 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
233 lc->buffer_level = cpi->buffer_level;
234 lc->bits_off_target = cpi->bits_off_target;
235 lc->total_actual_bits = cpi->total_actual_bits;
236 lc->worst_quality = cpi->worst_quality;
237 lc->active_worst_quality = cpi->active_worst_quality;
238 lc->best_quality = cpi->best_quality;
239 lc->active_best_quality = cpi->active_best_quality;
240 lc->ni_av_qi = cpi->ni_av_qi;
241 lc->ni_tot_qi = cpi->ni_tot_qi;
242 lc->ni_frames = cpi->ni_frames;
243 lc->avg_frame_qindex = cpi->avg_frame_qindex;
244 lc->rate_correction_factor = cpi->rate_correction_factor;
245 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
246 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
247 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
248 lc->inter_frame_target = cpi->inter_frame_target;
249 lc->total_byte_count = cpi->total_byte_count;
250 lc->filter_level = cpi->common.filter_level;
251
252 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
253
254 memcpy (lc->count_mb_ref_frame_usage,
255 cpi->mb.count_mb_ref_frame_usage,
256 sizeof(cpi->mb.count_mb_ref_frame_usage));
257 }
258
restore_layer_context(VP8_COMP * cpi,const int layer)259 static void restore_layer_context(VP8_COMP *cpi, const int layer)
260 {
261 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
262
263 /* Restore layer dependent coding state */
264 cpi->current_layer = layer;
265 cpi->target_bandwidth = lc->target_bandwidth;
266 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
267 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
268 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
269 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
270 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
271 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
272 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
273 cpi->buffer_level = lc->buffer_level;
274 cpi->bits_off_target = lc->bits_off_target;
275 cpi->total_actual_bits = lc->total_actual_bits;
276 cpi->active_worst_quality = lc->active_worst_quality;
277 cpi->active_best_quality = lc->active_best_quality;
278 cpi->ni_av_qi = lc->ni_av_qi;
279 cpi->ni_tot_qi = lc->ni_tot_qi;
280 cpi->ni_frames = lc->ni_frames;
281 cpi->avg_frame_qindex = lc->avg_frame_qindex;
282 cpi->rate_correction_factor = lc->rate_correction_factor;
283 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
284 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
285 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
286 cpi->inter_frame_target = lc->inter_frame_target;
287 cpi->total_byte_count = lc->total_byte_count;
288 cpi->common.filter_level = lc->filter_level;
289
290 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
291
292 memcpy (cpi->mb.count_mb_ref_frame_usage,
293 lc->count_mb_ref_frame_usage,
294 sizeof(cpi->mb.count_mb_ref_frame_usage));
295 }
296
rescale(int val,int num,int denom)297 static int rescale(int val, int num, int denom)
298 {
299 int64_t llnum = num;
300 int64_t llden = denom;
301 int64_t llval = val;
302
303 return (int)(llval * llnum / llden);
304 }
305
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)306 static void init_temporal_layer_context(VP8_COMP *cpi,
307 VP8_CONFIG *oxcf,
308 const int layer,
309 double prev_layer_framerate)
310 {
311 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
312
313 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
314 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
315
316 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
317 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
318 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
319
320 lc->starting_buffer_level =
321 rescale((int)(oxcf->starting_buffer_level),
322 lc->target_bandwidth, 1000);
323
324 if (oxcf->optimal_buffer_level == 0)
325 lc->optimal_buffer_level = lc->target_bandwidth / 8;
326 else
327 lc->optimal_buffer_level =
328 rescale((int)(oxcf->optimal_buffer_level),
329 lc->target_bandwidth, 1000);
330
331 if (oxcf->maximum_buffer_size == 0)
332 lc->maximum_buffer_size = lc->target_bandwidth / 8;
333 else
334 lc->maximum_buffer_size =
335 rescale((int)(oxcf->maximum_buffer_size),
336 lc->target_bandwidth, 1000);
337
338 /* Work out the average size of a frame within this layer */
339 if (layer > 0)
340 lc->avg_frame_size_for_layer =
341 (int)((cpi->oxcf.target_bitrate[layer] -
342 cpi->oxcf.target_bitrate[layer-1]) * 1000 /
343 (lc->framerate - prev_layer_framerate));
344
345 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
346 lc->active_best_quality = cpi->oxcf.best_allowed_q;
347 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
348
349 lc->buffer_level = lc->starting_buffer_level;
350 lc->bits_off_target = lc->starting_buffer_level;
351
352 lc->total_actual_bits = 0;
353 lc->ni_av_qi = 0;
354 lc->ni_tot_qi = 0;
355 lc->ni_frames = 0;
356 lc->rate_correction_factor = 1.0;
357 lc->key_frame_rate_correction_factor = 1.0;
358 lc->gf_rate_correction_factor = 1.0;
359 lc->inter_frame_target = 0;
360 }
361
362 // Upon a run-time change in temporal layers, reset the layer context parameters
363 // for any "new" layers. For "existing" layers, let them inherit the parameters
364 // from the previous layer state (at the same layer #). In future we may want
365 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)366 static void reset_temporal_layer_change(VP8_COMP *cpi,
367 VP8_CONFIG *oxcf,
368 const int prev_num_layers)
369 {
370 int i;
371 double prev_layer_framerate = 0;
372 const int curr_num_layers = cpi->oxcf.number_of_layers;
373 // If the previous state was 1 layer, get current layer context from cpi.
374 // We need this to set the layer context for the new layers below.
375 if (prev_num_layers == 1)
376 {
377 cpi->current_layer = 0;
378 save_layer_context(cpi);
379 }
380 for (i = 0; i < curr_num_layers; i++)
381 {
382 LAYER_CONTEXT *lc = &cpi->layer_context[i];
383 if (i >= prev_num_layers)
384 {
385 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
386 }
387 // The initial buffer levels are set based on their starting levels.
388 // We could set the buffer levels based on the previous state (normalized
389 // properly by the layer bandwidths) but we would need to keep track of
390 // the previous set of layer bandwidths (i.e., target_bitrate[i])
391 // before the layer change. For now, reset to the starting levels.
392 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
393 cpi->oxcf.target_bitrate[i];
394 lc->bits_off_target = lc->buffer_level;
395 // TDOD(marpan): Should we set the rate_correction_factor and
396 // active_worst/best_quality to values derived from the previous layer
397 // state (to smooth-out quality dips/rate fluctuation at transition)?
398
399 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
400 // is not set for 1 layer, and the restore_layer_context/save_context()
401 // are not called in the encoding loop, so we need to call it here to
402 // pass the layer context state to |cpi|.
403 if (curr_num_layers == 1)
404 {
405 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
406 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
407 lc->target_bandwidth / 1000;
408 lc->bits_off_target = lc->buffer_level;
409 restore_layer_context(cpi, 0);
410 }
411 prev_layer_framerate = cpi->output_framerate /
412 cpi->oxcf.rate_decimator[i];
413 }
414 }
415
setup_features(VP8_COMP * cpi)416 static void setup_features(VP8_COMP *cpi)
417 {
418 // If segmentation enabled set the update flags
419 if ( cpi->mb.e_mbd.segmentation_enabled )
420 {
421 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
422 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
423 }
424 else
425 {
426 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
427 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
428 }
429
430 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
431 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
432 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
433 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
434 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
435 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
436
437 set_default_lf_deltas(cpi);
438
439 }
440
441
442 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
443
444
dealloc_compressor_data(VP8_COMP * cpi)445 static void dealloc_compressor_data(VP8_COMP *cpi)
446 {
447 vpx_free(cpi->tplist);
448 cpi->tplist = NULL;
449
450 /* Delete last frame MV storage buffers */
451 vpx_free(cpi->lfmv);
452 cpi->lfmv = 0;
453
454 vpx_free(cpi->lf_ref_frame_sign_bias);
455 cpi->lf_ref_frame_sign_bias = 0;
456
457 vpx_free(cpi->lf_ref_frame);
458 cpi->lf_ref_frame = 0;
459
460 /* Delete sementation map */
461 vpx_free(cpi->segmentation_map);
462 cpi->segmentation_map = 0;
463
464 vpx_free(cpi->active_map);
465 cpi->active_map = 0;
466
467 vp8_de_alloc_frame_buffers(&cpi->common);
468
469 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
470 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
471 dealloc_raw_frame_buffers(cpi);
472
473 vpx_free(cpi->tok);
474 cpi->tok = 0;
475
476 /* Structure used to monitor GF usage */
477 vpx_free(cpi->gf_active_flags);
478 cpi->gf_active_flags = 0;
479
480 /* Activity mask based per mb zbin adjustments */
481 vpx_free(cpi->mb_activity_map);
482 cpi->mb_activity_map = 0;
483
484 vpx_free(cpi->mb.pip);
485 cpi->mb.pip = 0;
486
487 #if CONFIG_MULTITHREAD
488 vpx_free(cpi->mt_current_mb_col);
489 cpi->mt_current_mb_col = NULL;
490 #endif
491 }
492
enable_segmentation(VP8_COMP * cpi)493 static void enable_segmentation(VP8_COMP *cpi)
494 {
495 /* Set the appropriate feature bit */
496 cpi->mb.e_mbd.segmentation_enabled = 1;
497 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
498 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
499 }
disable_segmentation(VP8_COMP * cpi)500 static void disable_segmentation(VP8_COMP *cpi)
501 {
502 /* Clear the appropriate feature bit */
503 cpi->mb.e_mbd.segmentation_enabled = 0;
504 }
505
506 /* Valid values for a segment are 0 to 3
507 * Segmentation map is arrange as [Rows][Columns]
508 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)509 static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
510 {
511 /* Copy in the new segmentation map */
512 memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
513
514 /* Signal that the map should be updated. */
515 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
516 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
517 }
518
519 /* The values given for each segment can be either deltas (from the default
520 * value chosen for the frame) or absolute values.
521 *
522 * Valid range for abs values is:
523 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
524 * Valid range for delta values are:
525 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
526 *
527 * abs_delta = SEGMENT_DELTADATA (deltas)
528 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
529 *
530 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)531 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
532 {
533 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
534 memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
535 }
536
537
segmentation_test_function(VP8_COMP * cpi)538 static void segmentation_test_function(VP8_COMP *cpi)
539 {
540 unsigned char *seg_map;
541 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
542
543 // Create a temporary map for segmentation data.
544 CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
545
546 // Set the segmentation Map
547 set_segmentation_map(cpi, seg_map);
548
549 // Activate segmentation.
550 enable_segmentation(cpi);
551
552 // Set up the quant segment data
553 feature_data[MB_LVL_ALT_Q][0] = 0;
554 feature_data[MB_LVL_ALT_Q][1] = 4;
555 feature_data[MB_LVL_ALT_Q][2] = 0;
556 feature_data[MB_LVL_ALT_Q][3] = 0;
557 // Set up the loop segment data
558 feature_data[MB_LVL_ALT_LF][0] = 0;
559 feature_data[MB_LVL_ALT_LF][1] = 0;
560 feature_data[MB_LVL_ALT_LF][2] = 0;
561 feature_data[MB_LVL_ALT_LF][3] = 0;
562
563 // Initialise the feature data structure
564 // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
565 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
566
567 // Delete sementation map
568 vpx_free(seg_map);
569
570 seg_map = 0;
571 }
572
573 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)574 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
575 {
576 unsigned char *seg_map = cpi->segmentation_map;
577 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
578 int i;
579 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
580 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
581
582 cpi->cyclic_refresh_q = Q / 2;
583
584 if (cpi->oxcf.screen_content_mode) {
585 // Modify quality ramp-up based on Q. Above some Q level, increase the
586 // number of blocks to be refreshed, and reduce it below the thredhold.
587 // Turn-off under certain conditions (i.e., away from key frame, and if
588 // we are at good quality (low Q) and most of the blocks were skipped-encoded
589 // in previous frame.
590 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
591 if (Q >= qp_thresh) {
592 cpi->cyclic_refresh_mode_max_mbs_perframe =
593 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
594 } else if (cpi->frames_since_key > 250 &&
595 Q < 20 &&
596 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
597 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
598 } else {
599 cpi->cyclic_refresh_mode_max_mbs_perframe =
600 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
601 }
602 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
603 }
604
605 // Set every macroblock to be eligible for update.
606 // For key frame this will reset seg map to 0.
607 memset(cpi->segmentation_map, 0, mbs_in_frame);
608
609 if (cpi->common.frame_type != KEY_FRAME && block_count > 0)
610 {
611 /* Cycle through the macro_block rows */
612 /* MB loop to set local segmentation map */
613 i = cpi->cyclic_refresh_mode_index;
614 assert(i < mbs_in_frame);
615 do
616 {
617 /* If the MB is as a candidate for clean up then mark it for
618 * possible boost/refresh (segment 1) The segment id may get
619 * reset to 0 later if the MB gets coded anything other than
620 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
621 * refresh : that is to say Mbs likely to be background blocks.
622 */
623 if (cpi->cyclic_refresh_map[i] == 0)
624 {
625 seg_map[i] = 1;
626 block_count --;
627 }
628 else if (cpi->cyclic_refresh_map[i] < 0)
629 cpi->cyclic_refresh_map[i]++;
630
631 i++;
632 if (i == mbs_in_frame)
633 i = 0;
634
635 }
636 while(block_count && i != cpi->cyclic_refresh_mode_index);
637
638 cpi->cyclic_refresh_mode_index = i;
639
640 #if CONFIG_TEMPORAL_DENOISING
641 if (cpi->oxcf.noise_sensitivity > 0) {
642 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
643 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
644 (cpi->frames_since_key >
645 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
646 // Under aggressive denoising, use segmentation to turn off loop
647 // filter below some qp thresh. The filter is reduced for all
648 // blocks that have been encoded as ZEROMV LAST x frames in a row,
649 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
650 // This is to avoid "dot" artifacts that can occur from repeated
651 // loop filtering on noisy input source.
652 cpi->cyclic_refresh_q = Q;
653 // lf_adjustment = -MAX_LOOP_FILTER;
654 lf_adjustment = -40;
655 for (i = 0; i < mbs_in_frame; ++i) {
656 seg_map[i] = (cpi->consec_zero_last[i] >
657 cpi->denoiser.denoise_pars.consec_zerolast) ? 1 : 0;
658 }
659 }
660 }
661 #endif
662 }
663
664 /* Activate segmentation. */
665 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
666 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
667 enable_segmentation(cpi);
668
669 /* Set up the quant segment data */
670 feature_data[MB_LVL_ALT_Q][0] = 0;
671 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
672 feature_data[MB_LVL_ALT_Q][2] = 0;
673 feature_data[MB_LVL_ALT_Q][3] = 0;
674
675 /* Set up the loop segment data */
676 feature_data[MB_LVL_ALT_LF][0] = 0;
677 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
678 feature_data[MB_LVL_ALT_LF][2] = 0;
679 feature_data[MB_LVL_ALT_LF][3] = 0;
680
681 /* Initialise the feature data structure */
682 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
683
684 }
685
set_default_lf_deltas(VP8_COMP * cpi)686 static void set_default_lf_deltas(VP8_COMP *cpi)
687 {
688 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
689 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
690
691 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
692 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
693
694 /* Test of ref frame deltas */
695 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
696 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
697 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
698 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
699
700 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
701
702 if(cpi->oxcf.Mode == MODE_REALTIME)
703 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
704 else
705 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
706
707 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
708 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
709 }
710
711 /* Convenience macros for mapping speed and mode into a continuous
712 * range
713 */
714 #define GOOD(x) (x+1)
715 #define RT(x) (x+7)
716
speed_map(int speed,const int * map)717 static int speed_map(int speed, const int *map)
718 {
719 int res;
720
721 do
722 {
723 res = *map++;
724 } while(speed >= *map++);
725 return res;
726 }
727
728 static const int thresh_mult_map_znn[] = {
729 /* map common to zero, nearest, and near */
730 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
731 };
732
733 static const int thresh_mult_map_vhpred[] = {
734 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(1), 2000,
735 RT(7), INT_MAX, INT_MAX
736 };
737
738 static const int thresh_mult_map_bpred[] = {
739 2000, GOOD(0), 2500, GOOD(2), 5000, GOOD(3), 7500, RT(0), 2500, RT(1), 5000,
740 RT(6), INT_MAX, INT_MAX
741 };
742
743 static const int thresh_mult_map_tm[] = {
744 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 0, RT(1), 1000, RT(2), 2000,
745 RT(7), INT_MAX, INT_MAX
746 };
747
748 static const int thresh_mult_map_new1[] = {
749 1000, GOOD(2), 2000, RT(0), 2000, INT_MAX
750 };
751
752 static const int thresh_mult_map_new2[] = {
753 1000, GOOD(2), 2000, GOOD(3), 2500, GOOD(5), 4000, RT(0), 2000, RT(2), 2500,
754 RT(5), 4000, INT_MAX
755 };
756
757 static const int thresh_mult_map_split1[] = {
758 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
759 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
760 };
761
762 static const int thresh_mult_map_split2[] = {
763 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
764 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
765 };
766
767 static const int mode_check_freq_map_zn2[] = {
768 /* {zero,nearest}{2,3} */
769 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
770 };
771
772 static const int mode_check_freq_map_vhbpred[] = {
773 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
774 };
775
776 static const int mode_check_freq_map_near2[] = {
777 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(10), 1<<2, RT(11), 1<<3, RT(12), 1<<4,
778 INT_MAX
779 };
780
781 static const int mode_check_freq_map_new1[] = {
782 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
783 };
784
785 static const int mode_check_freq_map_new2[] = {
786 0, GOOD(5), 4, RT(0), 0, RT(3), 4, RT(10), 1<<3, RT(11), 1<<4, RT(12), 1<<5,
787 INT_MAX
788 };
789
790 static const int mode_check_freq_map_split1[] = {
791 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
792 };
793
794 static const int mode_check_freq_map_split2[] = {
795 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
796 };
797
vp8_set_speed_features(VP8_COMP * cpi)798 void vp8_set_speed_features(VP8_COMP *cpi)
799 {
800 SPEED_FEATURES *sf = &cpi->sf;
801 int Mode = cpi->compressor_speed;
802 int Speed = cpi->Speed;
803 int i;
804 VP8_COMMON *cm = &cpi->common;
805 int last_improved_quant = sf->improved_quant;
806 int ref_frames;
807
808 /* Initialise default mode frequency sampling variables */
809 for (i = 0; i < MAX_MODES; i ++)
810 {
811 cpi->mode_check_freq[i] = 0;
812 }
813
814 cpi->mb.mbs_tested_so_far = 0;
815 cpi->mb.mbs_zero_last_dot_suppress = 0;
816
817 /* best quality defaults */
818 sf->RD = 1;
819 sf->search_method = NSTEP;
820 sf->improved_quant = 1;
821 sf->improved_dct = 1;
822 sf->auto_filter = 1;
823 sf->recode_loop = 1;
824 sf->quarter_pixel_search = 1;
825 sf->half_pixel_search = 1;
826 sf->iterative_sub_pixel = 1;
827 sf->optimize_coefficients = 1;
828 sf->use_fastquant_for_pick = 0;
829 sf->no_skip_block4x4_search = 1;
830
831 sf->first_step = 0;
832 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
833 sf->improved_mv_pred = 1;
834
835 /* default thresholds to 0 */
836 for (i = 0; i < MAX_MODES; i++)
837 sf->thresh_mult[i] = 0;
838
839 /* Count enabled references */
840 ref_frames = 1;
841 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
842 ref_frames++;
843 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
844 ref_frames++;
845 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
846 ref_frames++;
847
848 /* Convert speed to continuous range, with clamping */
849 if (Mode == 0)
850 Speed = 0;
851 else if (Mode == 2)
852 Speed = RT(Speed);
853 else
854 {
855 if (Speed > 5)
856 Speed = 5;
857 Speed = GOOD(Speed);
858 }
859
860 sf->thresh_mult[THR_ZERO1] =
861 sf->thresh_mult[THR_NEAREST1] =
862 sf->thresh_mult[THR_NEAR1] =
863 sf->thresh_mult[THR_DC] = 0; /* always */
864
865 sf->thresh_mult[THR_ZERO2] =
866 sf->thresh_mult[THR_ZERO3] =
867 sf->thresh_mult[THR_NEAREST2] =
868 sf->thresh_mult[THR_NEAREST3] =
869 sf->thresh_mult[THR_NEAR2] =
870 sf->thresh_mult[THR_NEAR3] = speed_map(Speed, thresh_mult_map_znn);
871
872 sf->thresh_mult[THR_V_PRED] =
873 sf->thresh_mult[THR_H_PRED] = speed_map(Speed, thresh_mult_map_vhpred);
874 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
875 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
876 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
877 sf->thresh_mult[THR_NEW2] =
878 sf->thresh_mult[THR_NEW3] = speed_map(Speed, thresh_mult_map_new2);
879 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
880 sf->thresh_mult[THR_SPLIT2] =
881 sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2);
882
883 // Special case for temporal layers.
884 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
885 // used as second reference. We don't modify thresholds for ALTREF case
886 // since ALTREF is usually used as long-term reference in temporal layers.
887 if ((cpi->Speed <= 6) &&
888 (cpi->oxcf.number_of_layers > 1) &&
889 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
890 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
891 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
892 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
893 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
894 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
895 } else {
896 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
897 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
898 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
899 }
900 }
901
902 cpi->mode_check_freq[THR_ZERO1] =
903 cpi->mode_check_freq[THR_NEAREST1] =
904 cpi->mode_check_freq[THR_NEAR1] =
905 cpi->mode_check_freq[THR_TM] =
906 cpi->mode_check_freq[THR_DC] = 0; /* always */
907
908 cpi->mode_check_freq[THR_ZERO2] =
909 cpi->mode_check_freq[THR_ZERO3] =
910 cpi->mode_check_freq[THR_NEAREST2] =
911 cpi->mode_check_freq[THR_NEAREST3] = speed_map(Speed,
912 mode_check_freq_map_zn2);
913
914 cpi->mode_check_freq[THR_NEAR2] =
915 cpi->mode_check_freq[THR_NEAR3] = speed_map(Speed,
916 mode_check_freq_map_near2);
917
918 cpi->mode_check_freq[THR_V_PRED] =
919 cpi->mode_check_freq[THR_H_PRED] =
920 cpi->mode_check_freq[THR_B_PRED] = speed_map(Speed,
921 mode_check_freq_map_vhbpred);
922 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed,
923 mode_check_freq_map_new1);
924 cpi->mode_check_freq[THR_NEW2] =
925 cpi->mode_check_freq[THR_NEW3] = speed_map(Speed,
926 mode_check_freq_map_new2);
927 cpi->mode_check_freq[THR_SPLIT1] = speed_map(Speed,
928 mode_check_freq_map_split1);
929 cpi->mode_check_freq[THR_SPLIT2] =
930 cpi->mode_check_freq[THR_SPLIT3] = speed_map(Speed,
931 mode_check_freq_map_split2);
932 Speed = cpi->Speed;
933 switch (Mode)
934 {
935 #if !(CONFIG_REALTIME_ONLY)
936 case 0: /* best quality mode */
937 sf->first_step = 0;
938 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
939 break;
940 case 1:
941 case 3:
942 if (Speed > 0)
943 {
944 /* Disable coefficient optimization above speed 0 */
945 sf->optimize_coefficients = 0;
946 sf->use_fastquant_for_pick = 1;
947 sf->no_skip_block4x4_search = 0;
948
949 sf->first_step = 1;
950 }
951
952 if (Speed > 2)
953 {
954 sf->improved_quant = 0;
955 sf->improved_dct = 0;
956
957 /* Only do recode loop on key frames, golden frames and
958 * alt ref frames
959 */
960 sf->recode_loop = 2;
961
962 }
963
964 if (Speed > 3)
965 {
966 sf->auto_filter = 1;
967 sf->recode_loop = 0; /* recode loop off */
968 sf->RD = 0; /* Turn rd off */
969
970 }
971
972 if (Speed > 4)
973 {
974 sf->auto_filter = 0; /* Faster selection of loop filter */
975 }
976
977 break;
978 #endif
979 case 2:
980 sf->optimize_coefficients = 0;
981 sf->recode_loop = 0;
982 sf->auto_filter = 1;
983 sf->iterative_sub_pixel = 1;
984 sf->search_method = NSTEP;
985
986 if (Speed > 0)
987 {
988 sf->improved_quant = 0;
989 sf->improved_dct = 0;
990
991 sf->use_fastquant_for_pick = 1;
992 sf->no_skip_block4x4_search = 0;
993 sf->first_step = 1;
994 }
995
996 if (Speed > 2)
997 sf->auto_filter = 0; /* Faster selection of loop filter */
998
999 if (Speed > 3)
1000 {
1001 sf->RD = 0;
1002 sf->auto_filter = 1;
1003 }
1004
1005 if (Speed > 4)
1006 {
1007 sf->auto_filter = 0; /* Faster selection of loop filter */
1008 sf->search_method = HEX;
1009 sf->iterative_sub_pixel = 0;
1010 }
1011
1012 if (Speed > 6)
1013 {
1014 unsigned int sum = 0;
1015 unsigned int total_mbs = cm->MBs;
1016 int thresh;
1017 unsigned int total_skip;
1018
1019 int min = 2000;
1020
1021 if (cpi->oxcf.encode_breakout > 2000)
1022 min = cpi->oxcf.encode_breakout;
1023
1024 min >>= 7;
1025
1026 for (i = 0; i < min; i++)
1027 {
1028 sum += cpi->mb.error_bins[i];
1029 }
1030
1031 total_skip = sum;
1032 sum = 0;
1033
1034 /* i starts from 2 to make sure thresh started from 2048 */
1035 for (; i < 1024; i++)
1036 {
1037 sum += cpi->mb.error_bins[i];
1038
1039 if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
1040 break;
1041 }
1042
1043 i--;
1044 thresh = (i << 7);
1045
1046 if (thresh < 2000)
1047 thresh = 2000;
1048
1049 if (ref_frames > 1)
1050 {
1051 sf->thresh_mult[THR_NEW1 ] = thresh;
1052 sf->thresh_mult[THR_NEAREST1 ] = thresh >> 1;
1053 sf->thresh_mult[THR_NEAR1 ] = thresh >> 1;
1054 }
1055
1056 if (ref_frames > 2)
1057 {
1058 sf->thresh_mult[THR_NEW2] = thresh << 1;
1059 sf->thresh_mult[THR_NEAREST2 ] = thresh;
1060 sf->thresh_mult[THR_NEAR2 ] = thresh;
1061 }
1062
1063 if (ref_frames > 3)
1064 {
1065 sf->thresh_mult[THR_NEW3] = thresh << 1;
1066 sf->thresh_mult[THR_NEAREST3 ] = thresh;
1067 sf->thresh_mult[THR_NEAR3 ] = thresh;
1068 }
1069
1070 sf->improved_mv_pred = 0;
1071 }
1072
1073 if (Speed > 8)
1074 sf->quarter_pixel_search = 0;
1075
1076 if(cm->version == 0)
1077 {
1078 cm->filter_type = NORMAL_LOOPFILTER;
1079
1080 if (Speed >= 14)
1081 cm->filter_type = SIMPLE_LOOPFILTER;
1082 }
1083 else
1084 {
1085 cm->filter_type = SIMPLE_LOOPFILTER;
1086 }
1087
1088 /* This has a big hit on quality. Last resort */
1089 if (Speed >= 15)
1090 sf->half_pixel_search = 0;
1091
1092 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1093
1094 }; /* switch */
1095
1096 /* Slow quant, dct and trellis not worthwhile for first pass
1097 * so make sure they are always turned off.
1098 */
1099 if ( cpi->pass == 1 )
1100 {
1101 sf->improved_quant = 0;
1102 sf->optimize_coefficients = 0;
1103 sf->improved_dct = 0;
1104 }
1105
1106 if (cpi->sf.search_method == NSTEP)
1107 {
1108 vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1109 }
1110 else if (cpi->sf.search_method == DIAMOND)
1111 {
1112 vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1113 }
1114
1115 if (cpi->sf.improved_dct)
1116 {
1117 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1118 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1119 }
1120 else
1121 {
1122 /* No fast FDCT defined for any platform at this time. */
1123 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1124 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1125 }
1126
1127 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1128
1129 if (cpi->sf.improved_quant)
1130 {
1131 cpi->mb.quantize_b = vp8_regular_quantize_b;
1132 }
1133 else
1134 {
1135 cpi->mb.quantize_b = vp8_fast_quantize_b;
1136 }
1137 if (cpi->sf.improved_quant != last_improved_quant)
1138 vp8cx_init_quantizer(cpi);
1139
1140 if (cpi->sf.iterative_sub_pixel == 1)
1141 {
1142 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1143 }
1144 else if (cpi->sf.quarter_pixel_search)
1145 {
1146 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1147 }
1148 else if (cpi->sf.half_pixel_search)
1149 {
1150 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1151 }
1152 else
1153 {
1154 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1155 }
1156
1157 if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
1158 cpi->mb.optimize = 1;
1159 else
1160 cpi->mb.optimize = 0;
1161
1162 if (cpi->common.full_pixel)
1163 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1164
1165 #ifdef SPEEDSTATS
1166 frames_at_speed[cpi->Speed]++;
1167 #endif
1168 }
1169 #undef GOOD
1170 #undef RT
1171
alloc_raw_frame_buffers(VP8_COMP * cpi)1172 static void alloc_raw_frame_buffers(VP8_COMP *cpi)
1173 {
1174 #if VP8_TEMPORAL_ALT_REF
1175 int width = (cpi->oxcf.Width + 15) & ~15;
1176 int height = (cpi->oxcf.Height + 15) & ~15;
1177 #endif
1178
1179 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1180 cpi->oxcf.lag_in_frames);
1181 if(!cpi->lookahead)
1182 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1183 "Failed to allocate lag buffers");
1184
1185 #if VP8_TEMPORAL_ALT_REF
1186
1187 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
1188 width, height, VP8BORDERINPIXELS))
1189 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1190 "Failed to allocate altref buffer");
1191
1192 #endif
1193 }
1194
1195
dealloc_raw_frame_buffers(VP8_COMP * cpi)1196 static void dealloc_raw_frame_buffers(VP8_COMP *cpi)
1197 {
1198 #if VP8_TEMPORAL_ALT_REF
1199 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1200 #endif
1201 vp8_lookahead_destroy(cpi->lookahead);
1202 }
1203
1204
vp8_alloc_partition_data(VP8_COMP * cpi)1205 static int vp8_alloc_partition_data(VP8_COMP *cpi)
1206 {
1207 vpx_free(cpi->mb.pip);
1208
1209 cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
1210 (cpi->common.mb_rows + 1),
1211 sizeof(PARTITION_INFO));
1212 if(!cpi->mb.pip)
1213 return 1;
1214
1215 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1216
1217 return 0;
1218 }
1219
vp8_alloc_compressor_data(VP8_COMP * cpi)1220 void vp8_alloc_compressor_data(VP8_COMP *cpi)
1221 {
1222 VP8_COMMON *cm = & cpi->common;
1223
1224 int width = cm->Width;
1225 int height = cm->Height;
1226
1227 if (vp8_alloc_frame_buffers(cm, width, height))
1228 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1229 "Failed to allocate frame buffers");
1230
1231 if (vp8_alloc_partition_data(cpi))
1232 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1233 "Failed to allocate partition data");
1234
1235
1236 if ((width & 0xf) != 0)
1237 width += 16 - (width & 0xf);
1238
1239 if ((height & 0xf) != 0)
1240 height += 16 - (height & 0xf);
1241
1242
1243 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame,
1244 width, height, VP8BORDERINPIXELS))
1245 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1246 "Failed to allocate last frame buffer");
1247
1248 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
1249 width, height, VP8BORDERINPIXELS))
1250 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1251 "Failed to allocate scaled source buffer");
1252
1253 vpx_free(cpi->tok);
1254
1255 {
1256 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1257 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1258 #else
1259 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1260 #endif
1261 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1262 }
1263
1264 /* Data used for real time vc mode to see if gf needs refreshing */
1265 cpi->zeromv_count = 0;
1266
1267
1268 /* Structures used to monitor GF usage */
1269 vpx_free(cpi->gf_active_flags);
1270 CHECK_MEM_ERROR(cpi->gf_active_flags,
1271 vpx_calloc(sizeof(*cpi->gf_active_flags),
1272 cm->mb_rows * cm->mb_cols));
1273 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1274
1275 vpx_free(cpi->mb_activity_map);
1276 CHECK_MEM_ERROR(cpi->mb_activity_map,
1277 vpx_calloc(sizeof(*cpi->mb_activity_map),
1278 cm->mb_rows * cm->mb_cols));
1279
1280 /* allocate memory for storing last frame's MVs for MV prediction. */
1281 vpx_free(cpi->lfmv);
1282 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1283 sizeof(*cpi->lfmv)));
1284 vpx_free(cpi->lf_ref_frame_sign_bias);
1285 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1286 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1287 sizeof(*cpi->lf_ref_frame_sign_bias)));
1288 vpx_free(cpi->lf_ref_frame);
1289 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1290 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1291 sizeof(*cpi->lf_ref_frame)));
1292
1293 /* Create the encoder segmentation map and set all entries to 0 */
1294 vpx_free(cpi->segmentation_map);
1295 CHECK_MEM_ERROR(cpi->segmentation_map,
1296 vpx_calloc(cm->mb_rows * cm->mb_cols,
1297 sizeof(*cpi->segmentation_map)));
1298 cpi->cyclic_refresh_mode_index = 0;
1299 vpx_free(cpi->active_map);
1300 CHECK_MEM_ERROR(cpi->active_map,
1301 vpx_calloc(cm->mb_rows * cm->mb_cols,
1302 sizeof(*cpi->active_map)));
1303 memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
1304
1305 #if CONFIG_MULTITHREAD
1306 if (width < 640)
1307 cpi->mt_sync_range = 1;
1308 else if (width <= 1280)
1309 cpi->mt_sync_range = 4;
1310 else if (width <= 2560)
1311 cpi->mt_sync_range = 8;
1312 else
1313 cpi->mt_sync_range = 16;
1314
1315 if (cpi->oxcf.multi_threaded > 1)
1316 {
1317 vpx_free(cpi->mt_current_mb_col);
1318 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1319 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1320 }
1321
1322 #endif
1323
1324 vpx_free(cpi->tplist);
1325 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1326
1327 #if CONFIG_TEMPORAL_DENOISING
1328 if (cpi->oxcf.noise_sensitivity > 0) {
1329 vp8_denoiser_free(&cpi->denoiser);
1330 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1331 cm->mb_rows, cm->mb_cols,
1332 cpi->oxcf.noise_sensitivity);
1333 }
1334 #endif
1335 }
1336
1337
1338 /* Quant MOD */
1339 static const int q_trans[] =
1340 {
1341 0, 1, 2, 3, 4, 5, 7, 8,
1342 9, 10, 12, 13, 15, 17, 18, 19,
1343 20, 21, 23, 24, 25, 26, 27, 28,
1344 29, 30, 31, 33, 35, 37, 39, 41,
1345 43, 45, 47, 49, 51, 53, 55, 57,
1346 59, 61, 64, 67, 70, 73, 76, 79,
1347 82, 85, 88, 91, 94, 97, 100, 103,
1348 106, 109, 112, 115, 118, 121, 124, 127,
1349 };
1350
vp8_reverse_trans(int x)1351 int vp8_reverse_trans(int x)
1352 {
1353 int i;
1354
1355 for (i = 0; i < 64; i++)
1356 if (q_trans[i] >= x)
1357 return i;
1358
1359 return 63;
1360 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1361 void vp8_new_framerate(VP8_COMP *cpi, double framerate)
1362 {
1363 if(framerate < .1)
1364 framerate = 30;
1365
1366 cpi->framerate = framerate;
1367 cpi->output_framerate = framerate;
1368 cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth /
1369 cpi->output_framerate);
1370 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1371 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1372 cpi->oxcf.two_pass_vbrmin_section / 100);
1373
1374 /* Set Maximum gf/arf interval */
1375 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1376
1377 if(cpi->max_gf_interval < 12)
1378 cpi->max_gf_interval = 12;
1379
1380 /* Extended interval for genuinely static scenes */
1381 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1382
1383 /* Special conditions when altr ref frame enabled in lagged compress mode */
1384 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
1385 {
1386 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1387 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1388
1389 if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1390 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1391 }
1392
1393 if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
1394 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1395 }
1396
1397
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1398 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1399 {
1400 VP8_COMMON *cm = &cpi->common;
1401
1402 cpi->oxcf = *oxcf;
1403
1404 cpi->auto_gold = 1;
1405 cpi->auto_adjust_gold_quantizer = 1;
1406
1407 cm->version = oxcf->Version;
1408 vp8_setup_version(cm);
1409
1410 /* Frame rate is not available on the first frame, as it's derived from
1411 * the observed timestamps. The actual value used here doesn't matter
1412 * too much, as it will adapt quickly.
1413 */
1414 if (oxcf->timebase.num > 0) {
1415 cpi->framerate = (double)(oxcf->timebase.den) /
1416 (double)(oxcf->timebase.num);
1417 } else {
1418 cpi->framerate = 30;
1419 }
1420
1421 /* If the reciprocal of the timebase seems like a reasonable framerate,
1422 * then use that as a guess, otherwise use 30.
1423 */
1424 if (cpi->framerate > 180)
1425 cpi->framerate = 30;
1426
1427 cpi->ref_framerate = cpi->framerate;
1428
1429 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1430
1431 cm->refresh_golden_frame = 0;
1432 cm->refresh_last_frame = 1;
1433 cm->refresh_entropy_probs = 1;
1434
1435 /* change includes all joint functionality */
1436 vp8_change_config(cpi, oxcf);
1437
1438 /* Initialize active best and worst q and average q values. */
1439 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1440 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1441 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1442
1443 /* Initialise the starting buffer levels */
1444 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1445 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1446
1447 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1448 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1449 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1450 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1451
1452 cpi->total_actual_bits = 0;
1453 cpi->total_target_vs_actual = 0;
1454
1455 /* Temporal scalabilty */
1456 if (cpi->oxcf.number_of_layers > 1)
1457 {
1458 unsigned int i;
1459 double prev_layer_framerate=0;
1460
1461 for (i=0; i<cpi->oxcf.number_of_layers; i++)
1462 {
1463 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1464 prev_layer_framerate = cpi->output_framerate /
1465 cpi->oxcf.rate_decimator[i];
1466 }
1467 }
1468
1469 #if VP8_TEMPORAL_ALT_REF
1470 {
1471 int i;
1472
1473 cpi->fixed_divide[0] = 0;
1474
1475 for (i = 1; i < 512; i++)
1476 cpi->fixed_divide[i] = 0x80000 / i;
1477 }
1478 #endif
1479 }
1480
update_layer_contexts(VP8_COMP * cpi)1481 static void update_layer_contexts (VP8_COMP *cpi)
1482 {
1483 VP8_CONFIG *oxcf = &cpi->oxcf;
1484
1485 /* Update snapshots of the layer contexts to reflect new parameters */
1486 if (oxcf->number_of_layers > 1)
1487 {
1488 unsigned int i;
1489 double prev_layer_framerate=0;
1490
1491 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1492 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i)
1493 {
1494 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1495
1496 lc->framerate =
1497 cpi->ref_framerate / oxcf->rate_decimator[i];
1498 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1499
1500 lc->starting_buffer_level = rescale(
1501 (int)oxcf->starting_buffer_level_in_ms,
1502 lc->target_bandwidth, 1000);
1503
1504 if (oxcf->optimal_buffer_level == 0)
1505 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1506 else
1507 lc->optimal_buffer_level = rescale(
1508 (int)oxcf->optimal_buffer_level_in_ms,
1509 lc->target_bandwidth, 1000);
1510
1511 if (oxcf->maximum_buffer_size == 0)
1512 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1513 else
1514 lc->maximum_buffer_size = rescale(
1515 (int)oxcf->maximum_buffer_size_in_ms,
1516 lc->target_bandwidth, 1000);
1517
1518 /* Work out the average size of a frame within this layer */
1519 if (i > 0)
1520 lc->avg_frame_size_for_layer =
1521 (int)((oxcf->target_bitrate[i] -
1522 oxcf->target_bitrate[i-1]) * 1000 /
1523 (lc->framerate - prev_layer_framerate));
1524
1525 prev_layer_framerate = lc->framerate;
1526 }
1527 }
1528 }
1529
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1530 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1531 {
1532 VP8_COMMON *cm = &cpi->common;
1533 int last_w, last_h, prev_number_of_layers;
1534
1535 if (!cpi)
1536 return;
1537
1538 if (!oxcf)
1539 return;
1540
1541 #if CONFIG_MULTITHREAD
1542 /* wait for the last picture loopfilter thread done */
1543 if (cpi->b_lpf_running)
1544 {
1545 sem_wait(&cpi->h_event_end_lpf);
1546 cpi->b_lpf_running = 0;
1547 }
1548 #endif
1549
1550 if (cm->version != oxcf->Version)
1551 {
1552 cm->version = oxcf->Version;
1553 vp8_setup_version(cm);
1554 }
1555
1556 last_w = cpi->oxcf.Width;
1557 last_h = cpi->oxcf.Height;
1558 prev_number_of_layers = cpi->oxcf.number_of_layers;
1559
1560 cpi->oxcf = *oxcf;
1561
1562 switch (cpi->oxcf.Mode)
1563 {
1564
1565 case MODE_REALTIME:
1566 cpi->pass = 0;
1567 cpi->compressor_speed = 2;
1568
1569 if (cpi->oxcf.cpu_used < -16)
1570 {
1571 cpi->oxcf.cpu_used = -16;
1572 }
1573
1574 if (cpi->oxcf.cpu_used > 16)
1575 cpi->oxcf.cpu_used = 16;
1576
1577 break;
1578
1579 case MODE_GOODQUALITY:
1580 cpi->pass = 0;
1581 cpi->compressor_speed = 1;
1582
1583 if (cpi->oxcf.cpu_used < -5)
1584 {
1585 cpi->oxcf.cpu_used = -5;
1586 }
1587
1588 if (cpi->oxcf.cpu_used > 5)
1589 cpi->oxcf.cpu_used = 5;
1590
1591 break;
1592
1593 case MODE_BESTQUALITY:
1594 cpi->pass = 0;
1595 cpi->compressor_speed = 0;
1596 break;
1597
1598 case MODE_FIRSTPASS:
1599 cpi->pass = 1;
1600 cpi->compressor_speed = 1;
1601 break;
1602 case MODE_SECONDPASS:
1603 cpi->pass = 2;
1604 cpi->compressor_speed = 1;
1605
1606 if (cpi->oxcf.cpu_used < -5)
1607 {
1608 cpi->oxcf.cpu_used = -5;
1609 }
1610
1611 if (cpi->oxcf.cpu_used > 5)
1612 cpi->oxcf.cpu_used = 5;
1613
1614 break;
1615 case MODE_SECONDPASS_BEST:
1616 cpi->pass = 2;
1617 cpi->compressor_speed = 0;
1618 break;
1619 }
1620
1621 if (cpi->pass == 0)
1622 cpi->auto_worst_q = 1;
1623
1624 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1625 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1626 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1627
1628 if (oxcf->fixed_q >= 0)
1629 {
1630 if (oxcf->worst_allowed_q < 0)
1631 cpi->oxcf.fixed_q = q_trans[0];
1632 else
1633 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1634
1635 if (oxcf->alt_q < 0)
1636 cpi->oxcf.alt_q = q_trans[0];
1637 else
1638 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1639
1640 if (oxcf->key_q < 0)
1641 cpi->oxcf.key_q = q_trans[0];
1642 else
1643 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1644
1645 if (oxcf->gold_q < 0)
1646 cpi->oxcf.gold_q = q_trans[0];
1647 else
1648 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1649
1650 }
1651
1652 cpi->baseline_gf_interval =
1653 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1654
1655 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1656 cpi->oxcf.token_partitions = 3;
1657 #endif
1658
1659 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3)
1660 cm->multi_token_partition =
1661 (TOKEN_PARTITION) cpi->oxcf.token_partitions;
1662
1663 setup_features(cpi);
1664
1665 {
1666 int i;
1667
1668 for (i = 0; i < MAX_MB_SEGMENTS; i++)
1669 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1670 }
1671
1672 /* At the moment the first order values may not be > MAXQ */
1673 if (cpi->oxcf.fixed_q > MAXQ)
1674 cpi->oxcf.fixed_q = MAXQ;
1675
1676 /* local file playback mode == really big buffer */
1677 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
1678 {
1679 cpi->oxcf.starting_buffer_level = 60000;
1680 cpi->oxcf.optimal_buffer_level = 60000;
1681 cpi->oxcf.maximum_buffer_size = 240000;
1682 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1683 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1684 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1685 }
1686
1687 /* Convert target bandwidth from Kbit/s to Bit/s */
1688 cpi->oxcf.target_bandwidth *= 1000;
1689
1690 cpi->oxcf.starting_buffer_level =
1691 rescale((int)cpi->oxcf.starting_buffer_level,
1692 cpi->oxcf.target_bandwidth, 1000);
1693
1694 /* Set or reset optimal and maximum buffer levels. */
1695 if (cpi->oxcf.optimal_buffer_level == 0)
1696 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1697 else
1698 cpi->oxcf.optimal_buffer_level =
1699 rescale((int)cpi->oxcf.optimal_buffer_level,
1700 cpi->oxcf.target_bandwidth, 1000);
1701
1702 if (cpi->oxcf.maximum_buffer_size == 0)
1703 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1704 else
1705 cpi->oxcf.maximum_buffer_size =
1706 rescale((int)cpi->oxcf.maximum_buffer_size,
1707 cpi->oxcf.target_bandwidth, 1000);
1708 // Under a configuration change, where maximum_buffer_size may change,
1709 // keep buffer level clipped to the maximum allowed buffer size.
1710 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1711 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1712 cpi->buffer_level = cpi->bits_off_target;
1713 }
1714
1715 /* Set up frame rate and related parameters rate control values. */
1716 vp8_new_framerate(cpi, cpi->framerate);
1717
1718 /* Set absolute upper and lower quality limits */
1719 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1720 cpi->best_quality = cpi->oxcf.best_allowed_q;
1721
1722 /* active values should only be modified if out of new range */
1723 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
1724 {
1725 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1726 }
1727 /* less likely */
1728 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
1729 {
1730 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1731 }
1732 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q)
1733 {
1734 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1735 }
1736 /* less likely */
1737 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
1738 {
1739 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1740 }
1741
1742 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1743
1744 cpi->cq_target_quality = cpi->oxcf.cq_level;
1745
1746 /* Only allow dropped frames in buffered mode */
1747 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1748
1749 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1750
1751 // Check if the number of temporal layers has changed, and if so reset the
1752 // pattern counter and set/initialize the temporal layer context for the
1753 // new layer configuration.
1754 if (cpi->oxcf.number_of_layers != prev_number_of_layers)
1755 {
1756 // If the number of temporal layers are changed we must start at the
1757 // base of the pattern cycle, so set the layer id to 0 and reset
1758 // the temporal pattern counter.
1759 if (cpi->temporal_layer_id > 0) {
1760 cpi->temporal_layer_id = 0;
1761 }
1762 cpi->temporal_pattern_counter = 0;
1763 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1764 }
1765
1766 if (!cpi->initial_width)
1767 {
1768 cpi->initial_width = cpi->oxcf.Width;
1769 cpi->initial_height = cpi->oxcf.Height;
1770 }
1771
1772 cm->Width = cpi->oxcf.Width;
1773 cm->Height = cpi->oxcf.Height;
1774 assert(cm->Width <= cpi->initial_width);
1775 assert(cm->Height <= cpi->initial_height);
1776
1777 /* TODO(jkoleszar): if an internal spatial resampling is active,
1778 * and we downsize the input image, maybe we should clear the
1779 * internal scale immediately rather than waiting for it to
1780 * correct.
1781 */
1782
1783 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1784 if (cpi->oxcf.Sharpness > 7)
1785 cpi->oxcf.Sharpness = 7;
1786
1787 cm->sharpness_level = cpi->oxcf.Sharpness;
1788
1789 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL)
1790 {
1791 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1792 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1793
1794 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1795 Scale2Ratio(cm->vert_scale, &vr, &vs);
1796
1797 /* always go to the next whole number */
1798 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1799 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1800 }
1801
1802 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height)
1803 cpi->force_next_frame_intra = 1;
1804
1805 if (((cm->Width + 15) & 0xfffffff0) !=
1806 cm->yv12_fb[cm->lst_fb_idx].y_width ||
1807 ((cm->Height + 15) & 0xfffffff0) !=
1808 cm->yv12_fb[cm->lst_fb_idx].y_height ||
1809 cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
1810 {
1811 dealloc_raw_frame_buffers(cpi);
1812 alloc_raw_frame_buffers(cpi);
1813 vp8_alloc_compressor_data(cpi);
1814 }
1815
1816 if (cpi->oxcf.fixed_q >= 0)
1817 {
1818 cpi->last_q[0] = cpi->oxcf.fixed_q;
1819 cpi->last_q[1] = cpi->oxcf.fixed_q;
1820 }
1821
1822 cpi->Speed = cpi->oxcf.cpu_used;
1823
1824 /* force to allowlag to 0 if lag_in_frames is 0; */
1825 if (cpi->oxcf.lag_in_frames == 0)
1826 {
1827 cpi->oxcf.allow_lag = 0;
1828 }
1829 /* Limit on lag buffers as these are not currently dynamically allocated */
1830 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
1831 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1832
1833 /* YX Temp */
1834 cpi->alt_ref_source = NULL;
1835 cpi->is_src_frame_alt_ref = 0;
1836
1837 #if CONFIG_TEMPORAL_DENOISING
1838 if (cpi->oxcf.noise_sensitivity)
1839 {
1840 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc)
1841 {
1842 int width = (cpi->oxcf.Width + 15) & ~15;
1843 int height = (cpi->oxcf.Height + 15) & ~15;
1844 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1845 cm->mb_rows, cm->mb_cols,
1846 cpi->oxcf.noise_sensitivity);
1847 }
1848 }
1849 #endif
1850
1851 #if 0
1852 /* Experimental RD Code */
1853 cpi->frame_distortion = 0;
1854 cpi->last_frame_distortion = 0;
1855 #endif
1856
1857 }
1858
1859 #ifndef M_LOG2_E
1860 #define M_LOG2_E 0.693147180559945309417
1861 #endif
1862 #define log2f(x) (log (x) / (float) M_LOG2_E)
1863
cal_mvsadcosts(int * mvsadcost[2])1864 static void cal_mvsadcosts(int *mvsadcost[2])
1865 {
1866 int i = 1;
1867
1868 mvsadcost [0] [0] = 300;
1869 mvsadcost [1] [0] = 300;
1870
1871 do
1872 {
1873 double z = 256 * (2 * (log2f(8 * i) + .6));
1874 mvsadcost [0][i] = (int) z;
1875 mvsadcost [1][i] = (int) z;
1876 mvsadcost [0][-i] = (int) z;
1877 mvsadcost [1][-i] = (int) z;
1878 }
1879 while (++i <= mvfp_max);
1880 }
1881
vp8_create_compressor(VP8_CONFIG * oxcf)1882 struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
1883 {
1884 int i;
1885
1886 VP8_COMP *cpi;
1887 VP8_COMMON *cm;
1888
1889 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1890 /* Check that the CPI instance is valid */
1891 if (!cpi)
1892 return 0;
1893
1894 cm = &cpi->common;
1895
1896 memset(cpi, 0, sizeof(VP8_COMP));
1897
1898 if (setjmp(cm->error.jmp))
1899 {
1900 cpi->common.error.setjmp = 0;
1901 vp8_remove_compressor(&cpi);
1902 return 0;
1903 }
1904
1905 cpi->common.error.setjmp = 1;
1906
1907 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1908
1909 vp8_create_common(&cpi->common);
1910
1911 init_config(cpi, oxcf);
1912
1913 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
1914 cpi->common.current_video_frame = 0;
1915 cpi->temporal_pattern_counter = 0;
1916 cpi->temporal_layer_id = -1;
1917 cpi->kf_overspend_bits = 0;
1918 cpi->kf_bitrate_adjustment = 0;
1919 cpi->frames_till_gf_update_due = 0;
1920 cpi->gf_overspend_bits = 0;
1921 cpi->non_gf_bitrate_adjustment = 0;
1922 cpi->prob_last_coded = 128;
1923 cpi->prob_gf_coded = 128;
1924 cpi->prob_intra_coded = 63;
1925
1926 /* Prime the recent reference frame usage counters.
1927 * Hereafter they will be maintained as a sort of moving average
1928 */
1929 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1930 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1931 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1932 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1933
1934 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1935 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1936
1937 cpi->twopass.gf_decay_rate = 0;
1938 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1939
1940 cpi->gold_is_last = 0 ;
1941 cpi->alt_is_last = 0 ;
1942 cpi->gold_is_alt = 0 ;
1943
1944 cpi->active_map_enabled = 0;
1945
1946 #if 0
1947 /* Experimental code for lagged and one pass */
1948 /* Initialise one_pass GF frames stats */
1949 /* Update stats used for GF selection */
1950 if (cpi->pass == 0)
1951 {
1952 cpi->one_pass_frame_index = 0;
1953
1954 for (i = 0; i < MAX_LAG_BUFFERS; i++)
1955 {
1956 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1957 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1958 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1959 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1960 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1961 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1962 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1963 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1964 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1965 }
1966 }
1967 #endif
1968
1969 cpi->mse_source_denoised = 0;
1970
1971 /* Should we use the cyclic refresh method.
1972 * Currently this is tied to error resilliant mode
1973 */
1974 cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
1975 cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 5;
1976 if (cpi->oxcf.number_of_layers == 1) {
1977 cpi->cyclic_refresh_mode_max_mbs_perframe =
1978 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1979 } else if (cpi->oxcf.number_of_layers == 2) {
1980 cpi->cyclic_refresh_mode_max_mbs_perframe =
1981 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1982 }
1983 cpi->cyclic_refresh_mode_index = 0;
1984 cpi->cyclic_refresh_q = 32;
1985
1986 if (cpi->cyclic_refresh_mode_enabled)
1987 {
1988 CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1989 }
1990 else
1991 cpi->cyclic_refresh_map = (signed char *) NULL;
1992
1993 CHECK_MEM_ERROR(cpi->consec_zero_last,
1994 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1995 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1996 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1997
1998 #ifdef VP8_ENTROPY_STATS
1999 init_context_counters();
2000 #endif
2001
2002 /*Initialize the feed-forward activity masking.*/
2003 cpi->activity_avg = 90<<12;
2004
2005 /* Give a sensible default for the first frame. */
2006 cpi->frames_since_key = 8;
2007 cpi->key_frame_frequency = cpi->oxcf.key_freq;
2008 cpi->this_key_frame_forced = 0;
2009 cpi->next_key_frame_forced = 0;
2010
2011 cpi->source_alt_ref_pending = 0;
2012 cpi->source_alt_ref_active = 0;
2013 cpi->common.refresh_alt_ref_frame = 0;
2014
2015 cpi->force_maxqp = 0;
2016
2017 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
2018 #if CONFIG_INTERNAL_STATS
2019 cpi->b_calculate_ssimg = 0;
2020
2021 cpi->count = 0;
2022 cpi->bytes = 0;
2023
2024 if (cpi->b_calculate_psnr)
2025 {
2026 cpi->total_sq_error = 0.0;
2027 cpi->total_sq_error2 = 0.0;
2028 cpi->total_y = 0.0;
2029 cpi->total_u = 0.0;
2030 cpi->total_v = 0.0;
2031 cpi->total = 0.0;
2032 cpi->totalp_y = 0.0;
2033 cpi->totalp_u = 0.0;
2034 cpi->totalp_v = 0.0;
2035 cpi->totalp = 0.0;
2036 cpi->tot_recode_hits = 0;
2037 cpi->summed_quality = 0;
2038 cpi->summed_weights = 0;
2039 }
2040
2041 if (cpi->b_calculate_ssimg)
2042 {
2043 cpi->total_ssimg_y = 0;
2044 cpi->total_ssimg_u = 0;
2045 cpi->total_ssimg_v = 0;
2046 cpi->total_ssimg_all = 0;
2047 }
2048
2049 #endif
2050
2051 cpi->first_time_stamp_ever = 0x7FFFFFFF;
2052
2053 cpi->frames_till_gf_update_due = 0;
2054 cpi->key_frame_count = 1;
2055
2056 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
2057 cpi->ni_tot_qi = 0;
2058 cpi->ni_frames = 0;
2059 cpi->total_byte_count = 0;
2060
2061 cpi->drop_frame = 0;
2062
2063 cpi->rate_correction_factor = 1.0;
2064 cpi->key_frame_rate_correction_factor = 1.0;
2065 cpi->gf_rate_correction_factor = 1.0;
2066 cpi->twopass.est_max_qcorrection_factor = 1.0;
2067
2068 for (i = 0; i < KEY_FRAME_CONTEXT; i++)
2069 {
2070 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
2071 }
2072
2073 #ifdef OUTPUT_YUV_SRC
2074 yuv_file = fopen("bd.yuv", "ab");
2075 #endif
2076 #ifdef OUTPUT_YUV_DENOISED
2077 yuv_denoised_file = fopen("denoised.yuv", "ab");
2078 #endif
2079
2080 #if 0
2081 framepsnr = fopen("framepsnr.stt", "a");
2082 kf_list = fopen("kf_list.stt", "w");
2083 #endif
2084
2085 cpi->output_pkt_list = oxcf->output_pkt_list;
2086
2087 #if !(CONFIG_REALTIME_ONLY)
2088
2089 if (cpi->pass == 1)
2090 {
2091 vp8_init_first_pass(cpi);
2092 }
2093 else if (cpi->pass == 2)
2094 {
2095 size_t packet_sz = sizeof(FIRSTPASS_STATS);
2096 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
2097
2098 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
2099 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
2100 cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
2101 + (packets - 1) * packet_sz);
2102 vp8_init_second_pass(cpi);
2103 }
2104
2105 #endif
2106
2107 if (cpi->compressor_speed == 2)
2108 {
2109 cpi->avg_encode_time = 0;
2110 cpi->avg_pick_mode_time = 0;
2111 }
2112
2113 vp8_set_speed_features(cpi);
2114
2115 /* Set starting values of RD threshold multipliers (128 = *1) */
2116 for (i = 0; i < MAX_MODES; i++)
2117 {
2118 cpi->mb.rd_thresh_mult[i] = 128;
2119 }
2120
2121 #ifdef VP8_ENTROPY_STATS
2122 init_mv_ref_counts();
2123 #endif
2124
2125 #if CONFIG_MULTITHREAD
2126 if(vp8cx_create_encoder_threads(cpi))
2127 {
2128 vp8_remove_compressor(&cpi);
2129 return 0;
2130 }
2131 #endif
2132
2133 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2134 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2135 cpi->fn_ptr[BLOCK_16X16].svf = vp8_sub_pixel_variance16x16;
2136 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vp8_variance_halfpixvar16x16_h;
2137 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vp8_variance_halfpixvar16x16_v;
2138 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv;
2139 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
2140 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
2141 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2142
2143 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2144 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2145 cpi->fn_ptr[BLOCK_16X8].svf = vp8_sub_pixel_variance16x8;
2146 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
2147 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
2148 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
2149 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
2150 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
2151 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2152
2153 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2154 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2155 cpi->fn_ptr[BLOCK_8X16].svf = vp8_sub_pixel_variance8x16;
2156 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
2157 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
2158 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
2159 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2160 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2161 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2162
2163 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2164 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2165 cpi->fn_ptr[BLOCK_8X8].svf = vp8_sub_pixel_variance8x8;
2166 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
2167 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
2168 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
2169 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2170 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2171 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2172
2173 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2174 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2175 cpi->fn_ptr[BLOCK_4X4].svf = vp8_sub_pixel_variance4x4;
2176 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
2177 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
2178 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
2179 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2180 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2181 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2182
2183 #if ARCH_X86 || ARCH_X86_64
2184 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2185 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2186 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2187 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2188 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2189 #endif
2190
2191 cpi->full_search_sad = vp8_full_search_sad;
2192 cpi->diamond_search_sad = vp8_diamond_search_sad;
2193 cpi->refining_search_sad = vp8_refining_search_sad;
2194
2195 /* make sure frame 1 is okay */
2196 cpi->mb.error_bins[0] = cpi->common.MBs;
2197
2198 /* vp8cx_init_quantizer() is first called here. Add check in
2199 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2200 * called later when needed. This will avoid unnecessary calls of
2201 * vp8cx_init_quantizer() for every frame.
2202 */
2203 vp8cx_init_quantizer(cpi);
2204
2205 vp8_loop_filter_init(cm);
2206
2207 cpi->common.error.setjmp = 0;
2208
2209 #if CONFIG_MULTI_RES_ENCODING
2210
2211 /* Calculate # of MBs in a row in lower-resolution level image. */
2212 if (cpi->oxcf.mr_encoder_id > 0)
2213 vp8_cal_low_res_mb_cols(cpi);
2214
2215 #endif
2216
2217 /* setup RD costs to MACROBLOCK struct */
2218
2219 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max+1];
2220 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max+1];
2221 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max+1];
2222 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max+1];
2223
2224 cal_mvsadcosts(cpi->mb.mvsadcost);
2225
2226 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2227 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2228 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2229 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2230 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2231
2232 /* setup block ptrs & offsets */
2233 vp8_setup_block_ptrs(&cpi->mb);
2234 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2235
2236 return cpi;
2237 }
2238
2239
vp8_remove_compressor(VP8_COMP ** ptr)2240 void vp8_remove_compressor(VP8_COMP **ptr)
2241 {
2242 VP8_COMP *cpi = *ptr;
2243
2244 if (!cpi)
2245 return;
2246
2247 if (cpi && (cpi->common.current_video_frame > 0))
2248 {
2249 #if !(CONFIG_REALTIME_ONLY)
2250
2251 if (cpi->pass == 2)
2252 {
2253 vp8_end_second_pass(cpi);
2254 }
2255
2256 #endif
2257
2258 #ifdef VP8_ENTROPY_STATS
2259 print_context_counters();
2260 print_tree_update_probs();
2261 print_mode_context();
2262 #endif
2263
2264 #if CONFIG_INTERNAL_STATS
2265
2266 if (cpi->pass != 1)
2267 {
2268 FILE *f = fopen("opsnr.stt", "a");
2269 double time_encoded = (cpi->last_end_time_stamp_seen
2270 - cpi->first_time_stamp_ever) / 10000000.000;
2271 double total_encode_time = (cpi->time_receive_data +
2272 cpi->time_compress_data) / 1000.000;
2273 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2274
2275 if (cpi->b_calculate_psnr)
2276 {
2277 if (cpi->oxcf.number_of_layers > 1)
2278 {
2279 int i;
2280
2281 fprintf(f, "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2282 "GLPsnrP\tVPXSSIM\t\n");
2283 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2284 {
2285 double dr = (double)cpi->bytes_in_layer[i] *
2286 8.0 / 1000.0 / time_encoded;
2287 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2288 cpi->common.Width * cpi->common.Height;
2289 double total_psnr =
2290 vpx_sse_to_psnr(samples, 255.0,
2291 cpi->total_error2[i]);
2292 double total_psnr2 =
2293 vpx_sse_to_psnr(samples, 255.0,
2294 cpi->total_error2_p[i]);
2295 double total_ssim = 100 * pow(cpi->sum_ssim[i] /
2296 cpi->sum_weights[i], 8.0);
2297
2298 fprintf(f, "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2299 "%7.3f\t%7.3f\n",
2300 i, dr,
2301 cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2302 total_psnr,
2303 cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2304 total_psnr2, total_ssim);
2305 }
2306 }
2307 else
2308 {
2309 double samples = 3.0 / 2 * cpi->count *
2310 cpi->common.Width * cpi->common.Height;
2311 double total_psnr = vpx_sse_to_psnr(samples, 255.0,
2312 cpi->total_sq_error);
2313 double total_psnr2 = vpx_sse_to_psnr(samples, 255.0,
2314 cpi->total_sq_error2);
2315 double total_ssim = 100 * pow(cpi->summed_quality /
2316 cpi->summed_weights, 8.0);
2317
2318 fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2319 "GLPsnrP\tVPXSSIM\t Time(us)\n");
2320 fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2321 "%7.3f\t%8.0f\n",
2322 dr, cpi->total / cpi->count, total_psnr,
2323 cpi->totalp / cpi->count, total_psnr2,
2324 total_ssim, total_encode_time);
2325 }
2326 }
2327
2328 if (cpi->b_calculate_ssimg)
2329 {
2330 if (cpi->oxcf.number_of_layers > 1)
2331 {
2332 int i;
2333
2334 fprintf(f, "Layer\tBitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2335 "Time(us)\n");
2336 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2337 {
2338 double dr = (double)cpi->bytes_in_layer[i] *
2339 8.0 / 1000.0 / time_encoded;
2340 fprintf(f, "%5d\t%7.3f\t%6.4f\t"
2341 "%6.4f\t%6.4f\t%6.4f\t%8.0f\n",
2342 i, dr,
2343 cpi->total_ssimg_y_in_layer[i] /
2344 cpi->frames_in_layer[i],
2345 cpi->total_ssimg_u_in_layer[i] /
2346 cpi->frames_in_layer[i],
2347 cpi->total_ssimg_v_in_layer[i] /
2348 cpi->frames_in_layer[i],
2349 cpi->total_ssimg_all_in_layer[i] /
2350 cpi->frames_in_layer[i],
2351 total_encode_time);
2352 }
2353 }
2354 else
2355 {
2356 fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2357 "Time(us)\n");
2358 fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
2359 cpi->total_ssimg_y / cpi->count,
2360 cpi->total_ssimg_u / cpi->count,
2361 cpi->total_ssimg_v / cpi->count,
2362 cpi->total_ssimg_all / cpi->count, total_encode_time);
2363 }
2364 }
2365
2366 fclose(f);
2367 #if 0
2368 f = fopen("qskip.stt", "a");
2369 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2370 fclose(f);
2371 #endif
2372
2373 }
2374
2375 #endif
2376
2377
2378 #ifdef SPEEDSTATS
2379
2380 if (cpi->compressor_speed == 2)
2381 {
2382 int i;
2383 FILE *f = fopen("cxspeed.stt", "a");
2384 cnt_pm /= cpi->common.MBs;
2385
2386 for (i = 0; i < 16; i++)
2387 fprintf(f, "%5d", frames_at_speed[i]);
2388
2389 fprintf(f, "\n");
2390 fclose(f);
2391 }
2392
2393 #endif
2394
2395
2396 #ifdef MODE_STATS
2397 {
2398 extern int count_mb_seg[4];
2399 FILE *f = fopen("modes.stt", "a");
2400 double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ;
2401 fprintf(f, "intra_mode in Intra Frames:\n");
2402 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]);
2403 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]);
2404 fprintf(f, "B: ");
2405 {
2406 int i;
2407
2408 for (i = 0; i < 10; i++)
2409 fprintf(f, "%8d, ", b_modes[i]);
2410
2411 fprintf(f, "\n");
2412
2413 }
2414
2415 fprintf(f, "Modes in Inter Frames:\n");
2416 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2417 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4],
2418 inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]);
2419 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2420 fprintf(f, "B: ");
2421 {
2422 int i;
2423
2424 for (i = 0; i < 15; i++)
2425 fprintf(f, "%8d, ", inter_b_modes[i]);
2426
2427 fprintf(f, "\n");
2428
2429 }
2430 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
2431 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
2432
2433
2434
2435 fclose(f);
2436 }
2437 #endif
2438
2439 #ifdef VP8_ENTROPY_STATS
2440 {
2441 int i, j, k;
2442 FILE *fmode = fopen("modecontext.c", "w");
2443
2444 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2445 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2446 fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2447
2448 for (i = 0; i < 10; i++)
2449 {
2450
2451 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2452
2453 for (j = 0; j < 10; j++)
2454 {
2455
2456 fprintf(fmode, " {");
2457
2458 for (k = 0; k < 10; k++)
2459 {
2460 if (!intra_mode_stats[i][j][k])
2461 fprintf(fmode, " %5d, ", 1);
2462 else
2463 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2464 }
2465
2466 fprintf(fmode, "}, /* left_mode %d */\n", j);
2467
2468 }
2469
2470 fprintf(fmode, " },\n");
2471
2472 }
2473
2474 fprintf(fmode, "};\n");
2475 fclose(fmode);
2476 }
2477 #endif
2478
2479
2480 #if defined(SECTIONBITS_OUTPUT)
2481
2482 if (0)
2483 {
2484 int i;
2485 FILE *f = fopen("tokenbits.stt", "a");
2486
2487 for (i = 0; i < 28; i++)
2488 fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2489
2490 fprintf(f, "\n");
2491 fclose(f);
2492 }
2493
2494 #endif
2495
2496 #if 0
2497 {
2498 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2499 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2500 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2501 }
2502 #endif
2503
2504 }
2505
2506 #if CONFIG_MULTITHREAD
2507 vp8cx_remove_encoder_threads(cpi);
2508 #endif
2509
2510 #if CONFIG_TEMPORAL_DENOISING
2511 vp8_denoiser_free(&cpi->denoiser);
2512 #endif
2513 dealloc_compressor_data(cpi);
2514 vpx_free(cpi->mb.ss);
2515 vpx_free(cpi->tok);
2516 vpx_free(cpi->cyclic_refresh_map);
2517 vpx_free(cpi->consec_zero_last);
2518 vpx_free(cpi->consec_zero_last_mvbias);
2519
2520 vp8_remove_common(&cpi->common);
2521 vpx_free(cpi);
2522 *ptr = 0;
2523
2524 #ifdef OUTPUT_YUV_SRC
2525 fclose(yuv_file);
2526 #endif
2527 #ifdef OUTPUT_YUV_DENOISED
2528 fclose(yuv_denoised_file);
2529 #endif
2530
2531 #if 0
2532
2533 if (keyfile)
2534 fclose(keyfile);
2535
2536 if (framepsnr)
2537 fclose(framepsnr);
2538
2539 if (kf_list)
2540 fclose(kf_list);
2541
2542 #endif
2543
2544 }
2545
2546
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2547 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2548 unsigned char *recon, int recon_stride,
2549 unsigned int cols, unsigned int rows)
2550 {
2551 unsigned int row, col;
2552 uint64_t total_sse = 0;
2553 int diff;
2554
2555 for (row = 0; row + 16 <= rows; row += 16)
2556 {
2557 for (col = 0; col + 16 <= cols; col += 16)
2558 {
2559 unsigned int sse;
2560
2561 vpx_mse16x16(orig + col, orig_stride,
2562 recon + col, recon_stride,
2563 &sse);
2564 total_sse += sse;
2565 }
2566
2567 /* Handle odd-sized width */
2568 if (col < cols)
2569 {
2570 unsigned int border_row, border_col;
2571 unsigned char *border_orig = orig;
2572 unsigned char *border_recon = recon;
2573
2574 for (border_row = 0; border_row < 16; border_row++)
2575 {
2576 for (border_col = col; border_col < cols; border_col++)
2577 {
2578 diff = border_orig[border_col] - border_recon[border_col];
2579 total_sse += diff * diff;
2580 }
2581
2582 border_orig += orig_stride;
2583 border_recon += recon_stride;
2584 }
2585 }
2586
2587 orig += orig_stride * 16;
2588 recon += recon_stride * 16;
2589 }
2590
2591 /* Handle odd-sized height */
2592 for (; row < rows; row++)
2593 {
2594 for (col = 0; col < cols; col++)
2595 {
2596 diff = orig[col] - recon[col];
2597 total_sse += diff * diff;
2598 }
2599
2600 orig += orig_stride;
2601 recon += recon_stride;
2602 }
2603
2604 vp8_clear_system_state();
2605 return total_sse;
2606 }
2607
2608
generate_psnr_packet(VP8_COMP * cpi)2609 static void generate_psnr_packet(VP8_COMP *cpi)
2610 {
2611 YV12_BUFFER_CONFIG *orig = cpi->Source;
2612 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2613 struct vpx_codec_cx_pkt pkt;
2614 uint64_t sse;
2615 int i;
2616 unsigned int width = cpi->common.Width;
2617 unsigned int height = cpi->common.Height;
2618
2619 pkt.kind = VPX_CODEC_PSNR_PKT;
2620 sse = calc_plane_error(orig->y_buffer, orig->y_stride,
2621 recon->y_buffer, recon->y_stride,
2622 width, height);
2623 pkt.data.psnr.sse[0] = sse;
2624 pkt.data.psnr.sse[1] = sse;
2625 pkt.data.psnr.samples[0] = width * height;
2626 pkt.data.psnr.samples[1] = width * height;
2627
2628 width = (width + 1) / 2;
2629 height = (height + 1) / 2;
2630
2631 sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
2632 recon->u_buffer, recon->uv_stride,
2633 width, height);
2634 pkt.data.psnr.sse[0] += sse;
2635 pkt.data.psnr.sse[2] = sse;
2636 pkt.data.psnr.samples[0] += width * height;
2637 pkt.data.psnr.samples[2] = width * height;
2638
2639 sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
2640 recon->v_buffer, recon->uv_stride,
2641 width, height);
2642 pkt.data.psnr.sse[0] += sse;
2643 pkt.data.psnr.sse[3] = sse;
2644 pkt.data.psnr.samples[0] += width * height;
2645 pkt.data.psnr.samples[3] = width * height;
2646
2647 for (i = 0; i < 4; i++)
2648 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2649 (double)(pkt.data.psnr.sse[i]));
2650
2651 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2652 }
2653
2654
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2655 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags)
2656 {
2657 if (ref_frame_flags > 7)
2658 return -1 ;
2659
2660 cpi->ref_frame_flags = ref_frame_flags;
2661 return 0;
2662 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2663 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags)
2664 {
2665 if (ref_frame_flags > 7)
2666 return -1 ;
2667
2668 cpi->common.refresh_golden_frame = 0;
2669 cpi->common.refresh_alt_ref_frame = 0;
2670 cpi->common.refresh_last_frame = 0;
2671
2672 if (ref_frame_flags & VP8_LAST_FRAME)
2673 cpi->common.refresh_last_frame = 1;
2674
2675 if (ref_frame_flags & VP8_GOLD_FRAME)
2676 cpi->common.refresh_golden_frame = 1;
2677
2678 if (ref_frame_flags & VP8_ALTR_FRAME)
2679 cpi->common.refresh_alt_ref_frame = 1;
2680
2681 return 0;
2682 }
2683
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2684 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2685 {
2686 VP8_COMMON *cm = &cpi->common;
2687 int ref_fb_idx;
2688
2689 if (ref_frame_flag == VP8_LAST_FRAME)
2690 ref_fb_idx = cm->lst_fb_idx;
2691 else if (ref_frame_flag == VP8_GOLD_FRAME)
2692 ref_fb_idx = cm->gld_fb_idx;
2693 else if (ref_frame_flag == VP8_ALTR_FRAME)
2694 ref_fb_idx = cm->alt_fb_idx;
2695 else
2696 return -1;
2697
2698 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2699
2700 return 0;
2701 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2702 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2703 {
2704 VP8_COMMON *cm = &cpi->common;
2705
2706 int ref_fb_idx;
2707
2708 if (ref_frame_flag == VP8_LAST_FRAME)
2709 ref_fb_idx = cm->lst_fb_idx;
2710 else if (ref_frame_flag == VP8_GOLD_FRAME)
2711 ref_fb_idx = cm->gld_fb_idx;
2712 else if (ref_frame_flag == VP8_ALTR_FRAME)
2713 ref_fb_idx = cm->alt_fb_idx;
2714 else
2715 return -1;
2716
2717 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2718
2719 return 0;
2720 }
vp8_update_entropy(VP8_COMP * cpi,int update)2721 int vp8_update_entropy(VP8_COMP *cpi, int update)
2722 {
2723 VP8_COMMON *cm = &cpi->common;
2724 cm->refresh_entropy_probs = update;
2725
2726 return 0;
2727 }
2728
2729
2730 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
vp8_write_yuv_frame(FILE * yuv_file,YV12_BUFFER_CONFIG * s)2731 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s)
2732 {
2733 unsigned char *src = s->y_buffer;
2734 int h = s->y_height;
2735
2736 do
2737 {
2738 fwrite(src, s->y_width, 1, yuv_file);
2739 src += s->y_stride;
2740 }
2741 while (--h);
2742
2743 src = s->u_buffer;
2744 h = s->uv_height;
2745
2746 do
2747 {
2748 fwrite(src, s->uv_width, 1, yuv_file);
2749 src += s->uv_stride;
2750 }
2751 while (--h);
2752
2753 src = s->v_buffer;
2754 h = s->uv_height;
2755
2756 do
2757 {
2758 fwrite(src, s->uv_width, 1, yuv_file);
2759 src += s->uv_stride;
2760 }
2761 while (--h);
2762 }
2763 #endif
2764
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2765 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
2766 {
2767 VP8_COMMON *cm = &cpi->common;
2768
2769 /* are we resizing the image */
2770 if (cm->horiz_scale != 0 || cm->vert_scale != 0)
2771 {
2772 #if CONFIG_SPATIAL_RESAMPLING
2773 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2774 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2775 int tmp_height;
2776
2777 if (cm->vert_scale == 3)
2778 tmp_height = 9;
2779 else
2780 tmp_height = 11;
2781
2782 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2783 Scale2Ratio(cm->vert_scale, &vr, &vs);
2784
2785 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2786 tmp_height, hs, hr, vs, vr, 0);
2787
2788 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2789 cpi->Source = &cpi->scaled_source;
2790 #endif
2791 }
2792 else
2793 cpi->Source = sd;
2794 }
2795
2796
resize_key_frame(VP8_COMP * cpi)2797 static int resize_key_frame(VP8_COMP *cpi)
2798 {
2799 #if CONFIG_SPATIAL_RESAMPLING
2800 VP8_COMMON *cm = &cpi->common;
2801
2802 /* Do we need to apply resampling for one pass cbr.
2803 * In one pass this is more limited than in two pass cbr.
2804 * The test and any change is only made once per key frame sequence.
2805 */
2806 if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER))
2807 {
2808 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2809 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2810 int new_width, new_height;
2811
2812 /* If we are below the resample DOWN watermark then scale down a
2813 * notch.
2814 */
2815 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2816 {
2817 cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2818 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2819 }
2820 /* Should we now start scaling back up */
2821 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2822 {
2823 cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2824 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2825 }
2826
2827 /* Get the new height and width */
2828 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2829 Scale2Ratio(cm->vert_scale, &vr, &vs);
2830 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2831 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2832
2833 /* If the image size has changed we need to reallocate the buffers
2834 * and resample the source image
2835 */
2836 if ((cm->Width != new_width) || (cm->Height != new_height))
2837 {
2838 cm->Width = new_width;
2839 cm->Height = new_height;
2840 vp8_alloc_compressor_data(cpi);
2841 scale_and_extend_source(cpi->un_scaled_source, cpi);
2842 return 1;
2843 }
2844 }
2845
2846 #endif
2847 return 0;
2848 }
2849
2850
update_alt_ref_frame_stats(VP8_COMP * cpi)2851 static void update_alt_ref_frame_stats(VP8_COMP *cpi)
2852 {
2853 VP8_COMMON *cm = &cpi->common;
2854
2855 /* Select an interval before next GF or altref */
2856 if (!cpi->auto_gold)
2857 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2858
2859 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due)
2860 {
2861 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2862
2863 /* Set the bits per frame that we should try and recover in
2864 * subsequent inter frames to account for the extra GF spend...
2865 * note that his does not apply for GF updates that occur
2866 * coincident with a key frame as the extra cost of key frames is
2867 * dealt with elsewhere.
2868 */
2869 cpi->gf_overspend_bits += cpi->projected_frame_size;
2870 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2871 }
2872
2873 /* Update data structure that monitors level of reference to last GF */
2874 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2875 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2876
2877 /* this frame refreshes means next frames don't unless specified by user */
2878 cpi->frames_since_golden = 0;
2879
2880 /* Clear the alternate reference update pending flag. */
2881 cpi->source_alt_ref_pending = 0;
2882
2883 /* Set the alternate reference frame active flag */
2884 cpi->source_alt_ref_active = 1;
2885
2886
2887 }
update_golden_frame_stats(VP8_COMP * cpi)2888 static void update_golden_frame_stats(VP8_COMP *cpi)
2889 {
2890 VP8_COMMON *cm = &cpi->common;
2891
2892 /* Update the Golden frame usage counts. */
2893 if (cm->refresh_golden_frame)
2894 {
2895 /* Select an interval before next GF */
2896 if (!cpi->auto_gold)
2897 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2898
2899 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0))
2900 {
2901 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2902
2903 /* Set the bits per frame that we should try and recover in
2904 * subsequent inter frames to account for the extra GF spend...
2905 * note that his does not apply for GF updates that occur
2906 * coincident with a key frame as the extra cost of key frames
2907 * is dealt with elsewhere.
2908 */
2909 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active)
2910 {
2911 /* Calcluate GF bits to be recovered
2912 * Projected size - av frame bits available for inter
2913 * frames for clip as a whole
2914 */
2915 cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target);
2916 }
2917
2918 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2919
2920 }
2921
2922 /* Update data structure that monitors level of reference to last GF */
2923 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2924 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2925
2926 /* this frame refreshes means next frames don't unless specified by
2927 * user
2928 */
2929 cm->refresh_golden_frame = 0;
2930 cpi->frames_since_golden = 0;
2931
2932 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2933 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2934 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2935 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2936
2937 /* ******** Fixed Q test code only ************ */
2938 /* If we are going to use the ALT reference for the next group of
2939 * frames set a flag to say so.
2940 */
2941 if (cpi->oxcf.fixed_q >= 0 &&
2942 cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
2943 {
2944 cpi->source_alt_ref_pending = 1;
2945 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2946 }
2947
2948 if (!cpi->source_alt_ref_pending)
2949 cpi->source_alt_ref_active = 0;
2950
2951 /* Decrement count down till next gf */
2952 if (cpi->frames_till_gf_update_due > 0)
2953 cpi->frames_till_gf_update_due--;
2954
2955 }
2956 else if (!cpi->common.refresh_alt_ref_frame)
2957 {
2958 /* Decrement count down till next gf */
2959 if (cpi->frames_till_gf_update_due > 0)
2960 cpi->frames_till_gf_update_due--;
2961
2962 if (cpi->frames_till_alt_ref_frame)
2963 cpi->frames_till_alt_ref_frame --;
2964
2965 cpi->frames_since_golden ++;
2966
2967 if (cpi->frames_since_golden > 1)
2968 {
2969 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2970 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2971 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2972 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2973 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2974 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2975 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2976 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2977 }
2978 }
2979 }
2980
2981 /* This function updates the reference frame probability estimates that
2982 * will be used during mode selection
2983 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2984 static void update_rd_ref_frame_probs(VP8_COMP *cpi)
2985 {
2986 VP8_COMMON *cm = &cpi->common;
2987
2988 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2989 const int rf_intra = rfct[INTRA_FRAME];
2990 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2991
2992 if (cm->frame_type == KEY_FRAME)
2993 {
2994 cpi->prob_intra_coded = 255;
2995 cpi->prob_last_coded = 128;
2996 cpi->prob_gf_coded = 128;
2997 }
2998 else if (!(rf_intra + rf_inter))
2999 {
3000 cpi->prob_intra_coded = 63;
3001 cpi->prob_last_coded = 128;
3002 cpi->prob_gf_coded = 128;
3003 }
3004
3005 /* update reference frame costs since we can do better than what we got
3006 * last frame.
3007 */
3008 if (cpi->oxcf.number_of_layers == 1)
3009 {
3010 if (cpi->common.refresh_alt_ref_frame)
3011 {
3012 cpi->prob_intra_coded += 40;
3013 if (cpi->prob_intra_coded > 255)
3014 cpi->prob_intra_coded = 255;
3015 cpi->prob_last_coded = 200;
3016 cpi->prob_gf_coded = 1;
3017 }
3018 else if (cpi->frames_since_golden == 0)
3019 {
3020 cpi->prob_last_coded = 214;
3021 }
3022 else if (cpi->frames_since_golden == 1)
3023 {
3024 cpi->prob_last_coded = 192;
3025 cpi->prob_gf_coded = 220;
3026 }
3027 else if (cpi->source_alt_ref_active)
3028 {
3029 cpi->prob_gf_coded -= 20;
3030
3031 if (cpi->prob_gf_coded < 10)
3032 cpi->prob_gf_coded = 10;
3033 }
3034 if (!cpi->source_alt_ref_active)
3035 cpi->prob_gf_coded = 255;
3036 }
3037 }
3038
3039
3040 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)3041 static int decide_key_frame(VP8_COMP *cpi)
3042 {
3043 VP8_COMMON *cm = &cpi->common;
3044
3045 int code_key_frame = 0;
3046
3047 cpi->kf_boost = 0;
3048
3049 if (cpi->Speed > 11)
3050 return 0;
3051
3052 /* Clear down mmx registers */
3053 vp8_clear_system_state();
3054
3055 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0))
3056 {
3057 double change = 1.0 * abs((int)(cpi->mb.intra_error -
3058 cpi->last_intra_error)) / (1 + cpi->last_intra_error);
3059 double change2 = 1.0 * abs((int)(cpi->mb.prediction_error -
3060 cpi->last_prediction_error)) / (1 + cpi->last_prediction_error);
3061 double minerror = cm->MBs * 256;
3062
3063 cpi->last_intra_error = cpi->mb.intra_error;
3064 cpi->last_prediction_error = cpi->mb.prediction_error;
3065
3066 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15
3067 && cpi->mb.prediction_error > minerror
3068 && (change > .25 || change2 > .25))
3069 {
3070 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/
3071 return 1;
3072 }
3073
3074 return 0;
3075
3076 }
3077
3078 /* If the following are true we might as well code a key frame */
3079 if (((cpi->this_frame_percent_intra == 100) &&
3080 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
3081 ((cpi->this_frame_percent_intra > 95) &&
3082 (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5))))
3083 {
3084 code_key_frame = 1;
3085 }
3086 /* in addition if the following are true and this is not a golden frame
3087 * then code a key frame Note that on golden frames there often seems
3088 * to be a pop in intra useage anyway hence this restriction is
3089 * designed to prevent spurious key frames. The Intra pop needs to be
3090 * investigated.
3091 */
3092 else if (((cpi->this_frame_percent_intra > 60) &&
3093 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) ||
3094 ((cpi->this_frame_percent_intra > 75) &&
3095 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) ||
3096 ((cpi->this_frame_percent_intra > 90) &&
3097 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10))))
3098 {
3099 if (!cm->refresh_golden_frame)
3100 code_key_frame = 1;
3101 }
3102
3103 return code_key_frame;
3104
3105 }
3106
3107 #if !(CONFIG_REALTIME_ONLY)
Pass1Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned int * frame_flags)3108 static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
3109 {
3110 (void) size;
3111 (void) dest;
3112 (void) frame_flags;
3113 vp8_set_quantizer(cpi, 26);
3114
3115 vp8_first_pass(cpi);
3116 }
3117 #endif
3118
3119 #if 0
3120 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
3121 {
3122
3123 /* write the frame */
3124 FILE *yframe;
3125 int i;
3126 char filename[255];
3127
3128 sprintf(filename, "cx\\y%04d.raw", this_frame);
3129 yframe = fopen(filename, "wb");
3130
3131 for (i = 0; i < frame->y_height; i++)
3132 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
3133
3134 fclose(yframe);
3135 sprintf(filename, "cx\\u%04d.raw", this_frame);
3136 yframe = fopen(filename, "wb");
3137
3138 for (i = 0; i < frame->uv_height; i++)
3139 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3140
3141 fclose(yframe);
3142 sprintf(filename, "cx\\v%04d.raw", this_frame);
3143 yframe = fopen(filename, "wb");
3144
3145 for (i = 0; i < frame->uv_height; i++)
3146 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3147
3148 fclose(yframe);
3149 }
3150 #endif
3151 /* return of 0 means drop frame */
3152
3153 /* Function to test for conditions that indeicate we should loop
3154 * back and recode a frame.
3155 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)3156 static int recode_loop_test( VP8_COMP *cpi,
3157 int high_limit, int low_limit,
3158 int q, int maxq, int minq )
3159 {
3160 int force_recode = 0;
3161 VP8_COMMON *cm = &cpi->common;
3162
3163 /* Is frame recode allowed at all
3164 * Yes if either recode mode 1 is selected or mode two is selcted
3165 * and the frame is a key frame. golden frame or alt_ref_frame
3166 */
3167 if ( (cpi->sf.recode_loop == 1) ||
3168 ( (cpi->sf.recode_loop == 2) &&
3169 ( (cm->frame_type == KEY_FRAME) ||
3170 cm->refresh_golden_frame ||
3171 cm->refresh_alt_ref_frame ) ) )
3172 {
3173 /* General over and under shoot tests */
3174 if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
3175 ((cpi->projected_frame_size < low_limit) && (q > minq)) )
3176 {
3177 force_recode = 1;
3178 }
3179 /* Special Constrained quality tests */
3180 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3181 {
3182 /* Undershoot and below auto cq level */
3183 if ( (q > cpi->cq_target_quality) &&
3184 (cpi->projected_frame_size <
3185 ((cpi->this_frame_target * 7) >> 3)))
3186 {
3187 force_recode = 1;
3188 }
3189 /* Severe undershoot and between auto and user cq level */
3190 else if ( (q > cpi->oxcf.cq_level) &&
3191 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
3192 (cpi->active_best_quality > cpi->oxcf.cq_level))
3193 {
3194 force_recode = 1;
3195 cpi->active_best_quality = cpi->oxcf.cq_level;
3196 }
3197 }
3198 }
3199
3200 return force_recode;
3201 }
3202
update_reference_frames(VP8_COMP * cpi)3203 static void update_reference_frames(VP8_COMP *cpi)
3204 {
3205 VP8_COMMON *cm = &cpi->common;
3206 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
3207
3208 /* At this point the new frame has been encoded.
3209 * If any buffer copy / swapping is signaled it should be done here.
3210 */
3211
3212 if (cm->frame_type == KEY_FRAME)
3213 {
3214 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME ;
3215
3216 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3217 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3218
3219 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
3220
3221 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3222 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3223 }
3224 else /* For non key frames */
3225 {
3226 if (cm->refresh_alt_ref_frame)
3227 {
3228 assert(!cm->copy_buffer_to_arf);
3229
3230 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
3231 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3232 cm->alt_fb_idx = cm->new_fb_idx;
3233
3234 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3235 }
3236 else if (cm->copy_buffer_to_arf)
3237 {
3238 assert(!(cm->copy_buffer_to_arf & ~0x3));
3239
3240 if (cm->copy_buffer_to_arf == 1)
3241 {
3242 if(cm->alt_fb_idx != cm->lst_fb_idx)
3243 {
3244 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
3245 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3246 cm->alt_fb_idx = cm->lst_fb_idx;
3247
3248 cpi->current_ref_frames[ALTREF_FRAME] =
3249 cpi->current_ref_frames[LAST_FRAME];
3250 }
3251 }
3252 else /* if (cm->copy_buffer_to_arf == 2) */
3253 {
3254 if(cm->alt_fb_idx != cm->gld_fb_idx)
3255 {
3256 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
3257 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3258 cm->alt_fb_idx = cm->gld_fb_idx;
3259
3260 cpi->current_ref_frames[ALTREF_FRAME] =
3261 cpi->current_ref_frames[GOLDEN_FRAME];
3262 }
3263 }
3264 }
3265
3266 if (cm->refresh_golden_frame)
3267 {
3268 assert(!cm->copy_buffer_to_gf);
3269
3270 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
3271 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3272 cm->gld_fb_idx = cm->new_fb_idx;
3273
3274 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3275 }
3276 else if (cm->copy_buffer_to_gf)
3277 {
3278 assert(!(cm->copy_buffer_to_arf & ~0x3));
3279
3280 if (cm->copy_buffer_to_gf == 1)
3281 {
3282 if(cm->gld_fb_idx != cm->lst_fb_idx)
3283 {
3284 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
3285 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3286 cm->gld_fb_idx = cm->lst_fb_idx;
3287
3288 cpi->current_ref_frames[GOLDEN_FRAME] =
3289 cpi->current_ref_frames[LAST_FRAME];
3290 }
3291 }
3292 else /* if (cm->copy_buffer_to_gf == 2) */
3293 {
3294 if(cm->alt_fb_idx != cm->gld_fb_idx)
3295 {
3296 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
3297 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3298 cm->gld_fb_idx = cm->alt_fb_idx;
3299
3300 cpi->current_ref_frames[GOLDEN_FRAME] =
3301 cpi->current_ref_frames[ALTREF_FRAME];
3302 }
3303 }
3304 }
3305 }
3306
3307 if (cm->refresh_last_frame)
3308 {
3309 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
3310 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
3311 cm->lst_fb_idx = cm->new_fb_idx;
3312
3313 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
3314 }
3315
3316 #if CONFIG_TEMPORAL_DENOISING
3317 if (cpi->oxcf.noise_sensitivity)
3318 {
3319 /* we shouldn't have to keep multiple copies as we know in advance which
3320 * buffer we should start - for now to get something up and running
3321 * I've chosen to copy the buffers
3322 */
3323 if (cm->frame_type == KEY_FRAME)
3324 {
3325 int i;
3326 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3327 vp8_yv12_copy_frame(cpi->Source,
3328 &cpi->denoiser.yv12_running_avg[i]);
3329 }
3330 else /* For non key frames */
3331 {
3332 vp8_yv12_extend_frame_borders(
3333 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3334
3335 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
3336 {
3337 vp8_yv12_copy_frame(
3338 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3339 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3340 }
3341 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
3342 {
3343 vp8_yv12_copy_frame(
3344 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3345 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3346 }
3347 if(cm->refresh_last_frame)
3348 {
3349 vp8_yv12_copy_frame(
3350 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3351 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3352 }
3353 }
3354 if (cpi->oxcf.noise_sensitivity == 4)
3355 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3356
3357 }
3358 #endif
3359
3360 }
3361
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)3362 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3363 YV12_BUFFER_CONFIG *dest,
3364 VP8_COMP *cpi)
3365 {
3366 int i, j;
3367 int Total = 0;
3368 int num_blocks = 0;
3369 int skip = 2;
3370 int min_consec_zero_last = 10;
3371 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3372 unsigned char *src = source->y_buffer;
3373 unsigned char *dst = dest->y_buffer;
3374
3375 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3376 * summing the square differences, and only for blocks that have been
3377 * zero_last mode at least |x| frames in a row.
3378 */
3379 for (i = 0; i < source->y_height; i += 16 * skip)
3380 {
3381 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3382 for (j = 0; j < source->y_width; j += 16 * skip)
3383 {
3384 int index = block_index_row + (j >> 4);
3385 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3386 unsigned int sse;
3387 Total += vpx_mse16x16(src + j,
3388 source->y_stride,
3389 dst + j, dest->y_stride,
3390 &sse);
3391 num_blocks++;
3392 }
3393 }
3394 src += 16 * skip * source->y_stride;
3395 dst += 16 * skip * dest->y_stride;
3396 }
3397 // Only return non-zero if we have at least ~1/16 samples for estimate.
3398 if (num_blocks > (tot_num_blocks >> 4)) {
3399 return (Total / num_blocks);
3400 } else {
3401 return 0;
3402 }
3403 }
3404
3405 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3406 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3407 const VP8_COMMON *const cm = &cpi->common;
3408 int i, j;
3409 int total = 0;
3410 int num_blocks = 0;
3411 // Number of blocks skipped along row/column in computing the
3412 // nmse (normalized mean square error) of source.
3413 int skip = 2;
3414 // Only select blocks for computing nmse that have been encoded
3415 // as ZERO LAST min_consec_zero_last frames in a row.
3416 // Scale with number of temporal layers.
3417 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3418 // Decision is tested for changing the denoising mode every
3419 // num_mode_change times this function is called. Note that this
3420 // function called every 8 frames, so (8 * num_mode_change) is number
3421 // of frames where denoising mode change is tested for switch.
3422 int num_mode_change = 20;
3423 // Framerate factor, to compensate for larger mse at lower framerates.
3424 // Use ref_framerate, which is full source framerate for temporal layers.
3425 // TODO(marpan): Adjust this factor.
3426 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3427 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3428 int ystride = cpi->Source->y_stride;
3429 unsigned char *src = cpi->Source->y_buffer;
3430 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3431 static const unsigned char const_source[16] = {
3432 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
3433 128, 128, 128};
3434 int bandwidth = (int)(cpi->target_bandwidth);
3435 // For temporal layers, use full bandwidth (top layer).
3436 if (cpi->oxcf.number_of_layers > 1) {
3437 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3438 bandwidth = (int)(lc->target_bandwidth);
3439 }
3440 // Loop through the Y plane, every skip blocks along rows and columns,
3441 // summing the normalized mean square error, only for blocks that have
3442 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3443 // a row and have small sum difference between current and previous frame.
3444 // Normalization here is by the contrast of the current frame block.
3445 for (i = 0; i < cm->Height; i += 16 * skip) {
3446 int block_index_row = (i >> 4) * cm->mb_cols;
3447 for (j = 0; j < cm->Width; j += 16 * skip) {
3448 int index = block_index_row + (j >> 4);
3449 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3450 unsigned int sse;
3451 const unsigned int var = vpx_variance16x16(src + j,
3452 ystride,
3453 dst + j,
3454 ystride,
3455 &sse);
3456 // Only consider this block as valid for noise measurement
3457 // if the sum_diff average of the current and previous frame
3458 // is small (to avoid effects from lighting change).
3459 if ((sse - var) < 128) {
3460 unsigned int sse2;
3461 const unsigned int act = vpx_variance16x16(src + j,
3462 ystride,
3463 const_source,
3464 0,
3465 &sse2);
3466 if (act > 0)
3467 total += sse / act;
3468 num_blocks++;
3469 }
3470 }
3471 }
3472 src += 16 * skip * ystride;
3473 dst += 16 * skip * ystride;
3474 }
3475 total = total * fac_framerate / 100;
3476
3477 // Only consider this frame as valid sample if we have computed nmse over
3478 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3479 // application inputs duplicate frames, or contrast is all zero).
3480 if (total > 0 &&
3481 (num_blocks > (tot_num_blocks >> 4))) {
3482 // Update the recursive mean square source_diff.
3483 total = (total << 8) / num_blocks;
3484 if (cpi->denoiser.nmse_source_diff_count == 0) {
3485 // First sample in new interval.
3486 cpi->denoiser.nmse_source_diff = total;
3487 cpi->denoiser.qp_avg = cm->base_qindex;
3488 } else {
3489 // For subsequent samples, use average with weight ~1/4 for new sample.
3490 cpi->denoiser.nmse_source_diff = (int)((total +
3491 3 * cpi->denoiser.nmse_source_diff) >> 2);
3492 cpi->denoiser.qp_avg = (int)((cm->base_qindex +
3493 3 * cpi->denoiser.qp_avg) >> 2);
3494 }
3495 cpi->denoiser.nmse_source_diff_count++;
3496 }
3497 // Check for changing the denoiser mode, when we have obtained #samples =
3498 // num_mode_change. Condition the change also on the bitrate and QP.
3499 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3500 // Check for going up: from normal to aggressive mode.
3501 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3502 (cpi->denoiser.nmse_source_diff >
3503 cpi->denoiser.threshold_aggressive_mode) &&
3504 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3505 bandwidth > cpi->denoiser.bitrate_threshold)) {
3506 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3507 } else {
3508 // Check for going down: from aggressive to normal mode.
3509 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3510 (cpi->denoiser.nmse_source_diff <
3511 cpi->denoiser.threshold_aggressive_mode)) ||
3512 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3513 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3514 bandwidth < cpi->denoiser.bitrate_threshold))) {
3515 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3516 }
3517 }
3518 // Reset metric and counter for next interval.
3519 cpi->denoiser.nmse_source_diff = 0;
3520 cpi->denoiser.qp_avg = 0;
3521 cpi->denoiser.nmse_source_diff_count = 0;
3522 }
3523 }
3524 #endif
3525
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3526 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
3527 {
3528 const FRAME_TYPE frame_type = cm->frame_type;
3529
3530 int update_any_ref_buffers = 1;
3531 if (cpi->common.refresh_last_frame == 0 &&
3532 cpi->common.refresh_golden_frame == 0 &&
3533 cpi->common.refresh_alt_ref_frame == 0) {
3534 update_any_ref_buffers = 0;
3535 }
3536
3537 if (cm->no_lpf)
3538 {
3539 cm->filter_level = 0;
3540 }
3541 else
3542 {
3543 struct vpx_usec_timer timer;
3544
3545 vp8_clear_system_state();
3546
3547 vpx_usec_timer_start(&timer);
3548 if (cpi->sf.auto_filter == 0) {
3549 #if CONFIG_TEMPORAL_DENOISING
3550 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3551 // Use the denoised buffer for selecting base loop filter level.
3552 // Denoised signal for current frame is stored in INTRA_FRAME.
3553 // No denoising on key frames.
3554 vp8cx_pick_filter_level_fast(
3555 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3556 } else {
3557 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3558 }
3559 #else
3560 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3561 #endif
3562 } else {
3563 #if CONFIG_TEMPORAL_DENOISING
3564 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3565 // Use the denoised buffer for selecting base loop filter level.
3566 // Denoised signal for current frame is stored in INTRA_FRAME.
3567 // No denoising on key frames.
3568 vp8cx_pick_filter_level(
3569 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3570 } else {
3571 vp8cx_pick_filter_level(cpi->Source, cpi);
3572 }
3573 #else
3574 vp8cx_pick_filter_level(cpi->Source, cpi);
3575 #endif
3576 }
3577
3578
3579 if (cm->filter_level > 0)
3580 {
3581 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3582 }
3583
3584 vpx_usec_timer_mark(&timer);
3585 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3586 }
3587
3588 #if CONFIG_MULTITHREAD
3589 if (cpi->b_multi_threaded)
3590 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3591 #endif
3592
3593 // No need to apply loop-filter if the encoded frame does not update
3594 // any reference buffers.
3595 if (cm->filter_level > 0 && update_any_ref_buffers)
3596 {
3597 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3598 }
3599
3600 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3601
3602 }
3603
encode_frame_to_data_rate(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3604 static void encode_frame_to_data_rate
3605 (
3606 VP8_COMP *cpi,
3607 unsigned long *size,
3608 unsigned char *dest,
3609 unsigned char* dest_end,
3610 unsigned int *frame_flags
3611 )
3612 {
3613 int Q;
3614 int frame_over_shoot_limit;
3615 int frame_under_shoot_limit;
3616
3617 int Loop = 0;
3618 int loop_count;
3619
3620 VP8_COMMON *cm = &cpi->common;
3621 int active_worst_qchanged = 0;
3622
3623 #if !(CONFIG_REALTIME_ONLY)
3624 int q_low;
3625 int q_high;
3626 int zbin_oq_high;
3627 int zbin_oq_low = 0;
3628 int top_index;
3629 int bottom_index;
3630 int overshoot_seen = 0;
3631 int undershoot_seen = 0;
3632 #endif
3633
3634 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3635 cpi->oxcf.optimal_buffer_level / 100);
3636 int drop_mark75 = drop_mark * 2 / 3;
3637 int drop_mark50 = drop_mark / 4;
3638 int drop_mark25 = drop_mark / 8;
3639
3640
3641 /* Clear down mmx registers to allow floating point in what follows */
3642 vp8_clear_system_state();
3643
3644 #if CONFIG_MULTITHREAD
3645 /* wait for the last picture loopfilter thread done */
3646 if (cpi->b_lpf_running)
3647 {
3648 sem_wait(&cpi->h_event_end_lpf);
3649 cpi->b_lpf_running = 0;
3650 }
3651 #endif
3652
3653 if(cpi->force_next_frame_intra)
3654 {
3655 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3656 cpi->force_next_frame_intra = 0;
3657 }
3658
3659 /* For an alt ref frame in 2 pass we skip the call to the second pass
3660 * function that sets the target bandwidth
3661 */
3662 #if !(CONFIG_REALTIME_ONLY)
3663
3664 if (cpi->pass == 2)
3665 {
3666 if (cpi->common.refresh_alt_ref_frame)
3667 {
3668 /* Per frame bit target for the alt ref frame */
3669 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3670 /* per second target bitrate */
3671 cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
3672 cpi->output_framerate);
3673 }
3674 }
3675 else
3676 #endif
3677 cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate);
3678
3679 /* Default turn off buffer to buffer copying */
3680 cm->copy_buffer_to_gf = 0;
3681 cm->copy_buffer_to_arf = 0;
3682
3683 /* Clear zbin over-quant value and mode boost values. */
3684 cpi->mb.zbin_over_quant = 0;
3685 cpi->mb.zbin_mode_boost = 0;
3686
3687 /* Enable or disable mode based tweaking of the zbin
3688 * For 2 Pass Only used where GF/ARF prediction quality
3689 * is above a threshold
3690 */
3691 cpi->mb.zbin_mode_boost_enabled = 1;
3692 if (cpi->pass == 2)
3693 {
3694 if ( cpi->gfu_boost <= 400 )
3695 {
3696 cpi->mb.zbin_mode_boost_enabled = 0;
3697 }
3698 }
3699
3700 /* Current default encoder behaviour for the altref sign bias */
3701 if (cpi->source_alt_ref_active)
3702 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3703 else
3704 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3705
3706 /* Check to see if a key frame is signaled
3707 * For two pass with auto key frame enabled cm->frame_type may already
3708 * be set, but not for one pass.
3709 */
3710 if ((cm->current_video_frame == 0) ||
3711 (cm->frame_flags & FRAMEFLAGS_KEY) ||
3712 (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
3713 {
3714 /* Key frame from VFW/auto-keyframe/first frame */
3715 cm->frame_type = KEY_FRAME;
3716 #if CONFIG_TEMPORAL_DENOISING
3717 if (cpi->oxcf.noise_sensitivity == 4) {
3718 // For adaptive mode, reset denoiser to normal mode on key frame.
3719 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3720 }
3721 #endif
3722 }
3723
3724 #if CONFIG_MULTI_RES_ENCODING
3725 if (cpi->oxcf.mr_total_resolutions > 1) {
3726 LOWER_RES_FRAME_INFO* low_res_frame_info
3727 = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
3728
3729 if (cpi->oxcf.mr_encoder_id) {
3730
3731 // TODO(marpan): This constraint shouldn't be needed, as we would like
3732 // to allow for key frame setting (forced or periodic) defined per
3733 // spatial layer. For now, keep this in.
3734 cm->frame_type = low_res_frame_info->frame_type;
3735
3736 // Check if lower resolution is available for motion vector reuse.
3737 if(cm->frame_type != KEY_FRAME)
3738 {
3739 cpi->mr_low_res_mv_avail = 1;
3740 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3741
3742 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3743 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
3744 == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3745
3746 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3747 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
3748 == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3749
3750 // Don't use altref to determine whether low res is available.
3751 // TODO (marpan): Should we make this type of condition on a
3752 // per-reference frame basis?
3753 /*
3754 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3755 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3756 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3757 */
3758 }
3759 }
3760
3761 // On a key frame: For the lowest resolution, keep track of the key frame
3762 // counter value. For the higher resolutions, reset the current video
3763 // frame counter to that of the lowest resolution.
3764 // This is done to the handle the case where we may stop/start encoding
3765 // higher layer(s). The restart-encoding of higher layer is only signaled
3766 // by a key frame for now.
3767 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3768 if (cm->frame_type == KEY_FRAME) {
3769 if (cpi->oxcf.mr_encoder_id) {
3770 // If the initial starting value of the buffer level is zero (this can
3771 // happen because we may have not started encoding this higher stream),
3772 // then reset it to non-zero value based on |starting_buffer_level|.
3773 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3774 unsigned int i;
3775 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3776 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3777 for (i = 0; i < cpi->oxcf.number_of_layers; i++) {
3778 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3779 lc->bits_off_target = lc->starting_buffer_level;
3780 lc->buffer_level = lc->starting_buffer_level;
3781 }
3782 }
3783 cpi->common.current_video_frame =
3784 low_res_frame_info->key_frame_counter_value;
3785 } else {
3786 low_res_frame_info->key_frame_counter_value =
3787 cpi->common.current_video_frame;
3788 }
3789 }
3790
3791 }
3792 #endif
3793
3794 // Find the reference frame closest to the current frame.
3795 cpi->closest_reference_frame = LAST_FRAME;
3796 if(cm->frame_type != KEY_FRAME) {
3797 int i;
3798 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3799 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3800 closest_ref = LAST_FRAME;
3801 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3802 closest_ref = GOLDEN_FRAME;
3803 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3804 closest_ref = ALTREF_FRAME;
3805 }
3806 for(i = 1; i <= 3; i++) {
3807 vpx_ref_frame_type_t ref_frame_type = (vpx_ref_frame_type_t)
3808 ((i == 3) ? 4 : i);
3809 if (cpi->ref_frame_flags & ref_frame_type) {
3810 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3811 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3812 closest_ref = i;
3813 }
3814 }
3815 }
3816 cpi->closest_reference_frame = closest_ref;
3817 }
3818
3819 /* Set various flags etc to special state if it is a key frame */
3820 if (cm->frame_type == KEY_FRAME)
3821 {
3822 int i;
3823
3824 // Set the loop filter deltas and segmentation map update
3825 setup_features(cpi);
3826
3827 /* The alternate reference frame cannot be active for a key frame */
3828 cpi->source_alt_ref_active = 0;
3829
3830 /* Reset the RD threshold multipliers to default of * 1 (128) */
3831 for (i = 0; i < MAX_MODES; i++)
3832 {
3833 cpi->mb.rd_thresh_mult[i] = 128;
3834 }
3835
3836 // Reset the zero_last counter to 0 on key frame.
3837 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3838 memset(cpi->consec_zero_last_mvbias, 0,
3839 (cpi->common.mb_rows * cpi->common.mb_cols));
3840 }
3841
3842 #if 0
3843 /* Experimental code for lagged compress and one pass
3844 * Initialise one_pass GF frames stats
3845 * Update stats used for GF selection
3846 */
3847 {
3848 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3849
3850 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3851 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3852 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3853 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3854 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3855 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3856 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3857 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3858 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3859 }
3860 #endif
3861
3862 update_rd_ref_frame_probs(cpi);
3863
3864 if (cpi->drop_frames_allowed)
3865 {
3866 /* The reset to decimation 0 is only done here for one pass.
3867 * Once it is set two pass leaves decimation on till the next kf.
3868 */
3869 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0))
3870 cpi->decimation_factor --;
3871
3872 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0)
3873 cpi->decimation_factor = 1;
3874
3875 else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3))
3876 {
3877 cpi->decimation_factor = 3;
3878 }
3879 else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2))
3880 {
3881 cpi->decimation_factor = 2;
3882 }
3883 else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1))
3884 {
3885 cpi->decimation_factor = 1;
3886 }
3887 }
3888
3889 /* The following decimates the frame rate according to a regular
3890 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3891 * prevent buffer under-run in CBR mode. Alternatively it might be
3892 * desirable in some situations to drop frame rate but throw more bits
3893 * at each frame.
3894 *
3895 * Note that dropping a key frame can be problematic if spatial
3896 * resampling is also active
3897 */
3898 if (cpi->decimation_factor > 0)
3899 {
3900 switch (cpi->decimation_factor)
3901 {
3902 case 1:
3903 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3904 break;
3905 case 2:
3906 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3907 break;
3908 case 3:
3909 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3910 break;
3911 }
3912
3913 /* Note that we should not throw out a key frame (especially when
3914 * spatial resampling is enabled).
3915 */
3916 if (cm->frame_type == KEY_FRAME)
3917 {
3918 cpi->decimation_count = cpi->decimation_factor;
3919 }
3920 else if (cpi->decimation_count > 0)
3921 {
3922 cpi->decimation_count --;
3923
3924 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3925 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
3926 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3927
3928 #if CONFIG_MULTI_RES_ENCODING
3929 vp8_store_drop_frame_info(cpi);
3930 #endif
3931
3932 cm->current_video_frame++;
3933 cpi->frames_since_key++;
3934 // We advance the temporal pattern for dropped frames.
3935 cpi->temporal_pattern_counter++;
3936
3937 #if CONFIG_INTERNAL_STATS
3938 cpi->count ++;
3939 #endif
3940
3941 cpi->buffer_level = cpi->bits_off_target;
3942
3943 if (cpi->oxcf.number_of_layers > 1)
3944 {
3945 unsigned int i;
3946
3947 /* Propagate bits saved by dropping the frame to higher
3948 * layers
3949 */
3950 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
3951 {
3952 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3953 lc->bits_off_target += (int)(lc->target_bandwidth /
3954 lc->framerate);
3955 if (lc->bits_off_target > lc->maximum_buffer_size)
3956 lc->bits_off_target = lc->maximum_buffer_size;
3957 lc->buffer_level = lc->bits_off_target;
3958 }
3959 }
3960
3961 return;
3962 }
3963 else
3964 cpi->decimation_count = cpi->decimation_factor;
3965 }
3966 else
3967 cpi->decimation_count = 0;
3968
3969 /* Decide how big to make the frame */
3970 if (!vp8_pick_frame_size(cpi))
3971 {
3972 /*TODO: 2 drop_frame and return code could be put together. */
3973 #if CONFIG_MULTI_RES_ENCODING
3974 vp8_store_drop_frame_info(cpi);
3975 #endif
3976 cm->current_video_frame++;
3977 cpi->frames_since_key++;
3978 // We advance the temporal pattern for dropped frames.
3979 cpi->temporal_pattern_counter++;
3980 return;
3981 }
3982
3983 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3984 * This has a knock on effect on active best quality as well.
3985 * For CBR if the buffer reaches its maximum level then we can no longer
3986 * save up bits for later frames so we might as well use them up
3987 * on the current frame.
3988 */
3989 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3990 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode)
3991 {
3992 /* Max adjustment is 1/4 */
3993 int Adjustment = cpi->active_worst_quality / 4;
3994
3995 if (Adjustment)
3996 {
3997 int buff_lvl_step;
3998
3999 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size)
4000 {
4001 buff_lvl_step = (int)
4002 ((cpi->oxcf.maximum_buffer_size -
4003 cpi->oxcf.optimal_buffer_level) /
4004 Adjustment);
4005
4006 if (buff_lvl_step)
4007 Adjustment = (int)
4008 ((cpi->buffer_level -
4009 cpi->oxcf.optimal_buffer_level) /
4010 buff_lvl_step);
4011 else
4012 Adjustment = 0;
4013 }
4014
4015 cpi->active_worst_quality -= Adjustment;
4016
4017 if(cpi->active_worst_quality < cpi->active_best_quality)
4018 cpi->active_worst_quality = cpi->active_best_quality;
4019 }
4020 }
4021
4022 /* Set an active best quality and if necessary active worst quality
4023 * There is some odd behavior for one pass here that needs attention.
4024 */
4025 if ( (cpi->pass == 2) || (cpi->ni_frames > 150))
4026 {
4027 vp8_clear_system_state();
4028
4029 Q = cpi->active_worst_quality;
4030
4031 if ( cm->frame_type == KEY_FRAME )
4032 {
4033 if ( cpi->pass == 2 )
4034 {
4035 if (cpi->gfu_boost > 600)
4036 cpi->active_best_quality = kf_low_motion_minq[Q];
4037 else
4038 cpi->active_best_quality = kf_high_motion_minq[Q];
4039
4040 /* Special case for key frames forced because we have reached
4041 * the maximum key frame interval. Here force the Q to a range
4042 * based on the ambient Q to reduce the risk of popping
4043 */
4044 if ( cpi->this_key_frame_forced )
4045 {
4046 if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8)
4047 cpi->active_best_quality = cpi->avg_frame_qindex * 7/8;
4048 else if ( cpi->active_best_quality < cpi->avg_frame_qindex >> 2 )
4049 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
4050 }
4051 }
4052 /* One pass more conservative */
4053 else
4054 cpi->active_best_quality = kf_high_motion_minq[Q];
4055 }
4056
4057 else if (cpi->oxcf.number_of_layers==1 &&
4058 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame))
4059 {
4060 /* Use the lower of cpi->active_worst_quality and recent
4061 * average Q as basis for GF/ARF Q limit unless last frame was
4062 * a key frame.
4063 */
4064 if ( (cpi->frames_since_key > 1) &&
4065 (cpi->avg_frame_qindex < cpi->active_worst_quality) )
4066 {
4067 Q = cpi->avg_frame_qindex;
4068 }
4069
4070 /* For constrained quality dont allow Q less than the cq level */
4071 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4072 (Q < cpi->cq_target_quality) )
4073 {
4074 Q = cpi->cq_target_quality;
4075 }
4076
4077 if ( cpi->pass == 2 )
4078 {
4079 if ( cpi->gfu_boost > 1000 )
4080 cpi->active_best_quality = gf_low_motion_minq[Q];
4081 else if ( cpi->gfu_boost < 400 )
4082 cpi->active_best_quality = gf_high_motion_minq[Q];
4083 else
4084 cpi->active_best_quality = gf_mid_motion_minq[Q];
4085
4086 /* Constrained quality use slightly lower active best. */
4087 if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
4088 {
4089 cpi->active_best_quality =
4090 cpi->active_best_quality * 15/16;
4091 }
4092 }
4093 /* One pass more conservative */
4094 else
4095 cpi->active_best_quality = gf_high_motion_minq[Q];
4096 }
4097 else
4098 {
4099 cpi->active_best_quality = inter_minq[Q];
4100
4101 /* For the constant/constrained quality mode we dont want
4102 * q to fall below the cq level.
4103 */
4104 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4105 (cpi->active_best_quality < cpi->cq_target_quality) )
4106 {
4107 /* If we are strongly undershooting the target rate in the last
4108 * frames then use the user passed in cq value not the auto
4109 * cq value.
4110 */
4111 if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
4112 cpi->active_best_quality = cpi->oxcf.cq_level;
4113 else
4114 cpi->active_best_quality = cpi->cq_target_quality;
4115 }
4116 }
4117
4118 /* If CBR and the buffer is as full then it is reasonable to allow
4119 * higher quality on the frames to prevent bits just going to waste.
4120 */
4121 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
4122 {
4123 /* Note that the use of >= here elliminates the risk of a devide
4124 * by 0 error in the else if clause
4125 */
4126 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size)
4127 cpi->active_best_quality = cpi->best_quality;
4128
4129 else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)
4130 {
4131 int Fraction = (int)
4132 (((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128)
4133 / (cpi->oxcf.maximum_buffer_size -
4134 cpi->oxcf.optimal_buffer_level));
4135 int min_qadjustment = ((cpi->active_best_quality -
4136 cpi->best_quality) * Fraction) / 128;
4137
4138 cpi->active_best_quality -= min_qadjustment;
4139 }
4140 }
4141 }
4142 /* Make sure constrained quality mode limits are adhered to for the first
4143 * few frames of one pass encodes
4144 */
4145 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
4146 {
4147 if ( (cm->frame_type == KEY_FRAME) ||
4148 cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame )
4149 {
4150 cpi->active_best_quality = cpi->best_quality;
4151 }
4152 else if (cpi->active_best_quality < cpi->cq_target_quality)
4153 {
4154 cpi->active_best_quality = cpi->cq_target_quality;
4155 }
4156 }
4157
4158 /* Clip the active best and worst quality values to limits */
4159 if (cpi->active_worst_quality > cpi->worst_quality)
4160 cpi->active_worst_quality = cpi->worst_quality;
4161
4162 if (cpi->active_best_quality < cpi->best_quality)
4163 cpi->active_best_quality = cpi->best_quality;
4164
4165 if ( cpi->active_worst_quality < cpi->active_best_quality )
4166 cpi->active_worst_quality = cpi->active_best_quality;
4167
4168 /* Determine initial Q to try */
4169 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4170
4171 #if !(CONFIG_REALTIME_ONLY)
4172
4173 /* Set highest allowed value for Zbin over quant */
4174 if (cm->frame_type == KEY_FRAME)
4175 zbin_oq_high = 0;
4176 else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame ||
4177 (cm->refresh_golden_frame && !cpi->source_alt_ref_active))))
4178 {
4179 zbin_oq_high = 16;
4180 }
4181 else
4182 zbin_oq_high = ZBIN_OQ_MAX;
4183 #endif
4184
4185 /* Setup background Q adjustment for error resilient mode.
4186 * For multi-layer encodes only enable this for the base layer.
4187 */
4188 if (cpi->cyclic_refresh_mode_enabled)
4189 {
4190 // Special case for screen_content_mode with golden frame updates.
4191 int disable_cr_gf = (cpi->oxcf.screen_content_mode == 2 &&
4192 cm->refresh_golden_frame);
4193 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf)
4194 cyclic_background_refresh(cpi, Q, 0);
4195 else
4196 disable_segmentation(cpi);
4197 }
4198
4199 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4200
4201 #if !(CONFIG_REALTIME_ONLY)
4202 /* Limit Q range for the adaptive loop. */
4203 bottom_index = cpi->active_best_quality;
4204 top_index = cpi->active_worst_quality;
4205 q_low = cpi->active_best_quality;
4206 q_high = cpi->active_worst_quality;
4207 #endif
4208
4209 vp8_save_coding_context(cpi);
4210
4211 loop_count = 0;
4212
4213 scale_and_extend_source(cpi->un_scaled_source, cpi);
4214
4215 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
4216 // Option to apply spatial blur under the aggressive or adaptive
4217 // (temporal denoising) mode.
4218 if (cpi->oxcf.noise_sensitivity >= 3) {
4219 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
4220 vp8_de_noise(cm, cpi->Source, cpi->Source,
4221 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
4222 }
4223 }
4224 #endif
4225
4226 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
4227
4228 if (cpi->oxcf.noise_sensitivity > 0)
4229 {
4230 unsigned char *src;
4231 int l = 0;
4232
4233 switch (cpi->oxcf.noise_sensitivity)
4234 {
4235 case 1:
4236 l = 20;
4237 break;
4238 case 2:
4239 l = 40;
4240 break;
4241 case 3:
4242 l = 60;
4243 break;
4244 case 4:
4245 l = 80;
4246 break;
4247 case 5:
4248 l = 100;
4249 break;
4250 case 6:
4251 l = 150;
4252 break;
4253 }
4254
4255
4256 if (cm->frame_type == KEY_FRAME)
4257 {
4258 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1);
4259 }
4260 else
4261 {
4262 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1);
4263
4264 src = cpi->Source->y_buffer;
4265
4266 if (cpi->Source->y_stride < 0)
4267 {
4268 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
4269 }
4270 }
4271 }
4272
4273 #endif
4274
4275
4276 #ifdef OUTPUT_YUV_SRC
4277 vp8_write_yuv_frame(yuv_file, cpi->Source);
4278 #endif
4279
4280 do
4281 {
4282 vp8_clear_system_state();
4283
4284 vp8_set_quantizer(cpi, Q);
4285
4286 /* setup skip prob for costing in mode/mv decision */
4287 if (cpi->common.mb_no_coeff_skip)
4288 {
4289 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
4290
4291 if (cm->frame_type != KEY_FRAME)
4292 {
4293 if (cpi->common.refresh_alt_ref_frame)
4294 {
4295 if (cpi->last_skip_false_probs[2] != 0)
4296 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
4297
4298 /*
4299 if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 )
4300 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
4301 else if (cpi->last_skip_false_probs[2]!=0)
4302 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2;
4303 */
4304 }
4305 else if (cpi->common.refresh_golden_frame)
4306 {
4307 if (cpi->last_skip_false_probs[1] != 0)
4308 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
4309
4310 /*
4311 if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 )
4312 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
4313 else if (cpi->last_skip_false_probs[1]!=0)
4314 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2;
4315 */
4316 }
4317 else
4318 {
4319 if (cpi->last_skip_false_probs[0] != 0)
4320 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
4321
4322 /*
4323 if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 )
4324 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
4325 else if(cpi->last_skip_false_probs[0]!=0)
4326 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2;
4327 */
4328 }
4329
4330 /* as this is for cost estimate, let's make sure it does not
4331 * go extreme eitehr way
4332 */
4333 if (cpi->prob_skip_false < 5)
4334 cpi->prob_skip_false = 5;
4335
4336 if (cpi->prob_skip_false > 250)
4337 cpi->prob_skip_false = 250;
4338
4339 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref)
4340 cpi->prob_skip_false = 1;
4341 }
4342
4343 #if 0
4344
4345 if (cpi->pass != 1)
4346 {
4347 FILE *f = fopen("skip.stt", "a");
4348 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
4349 fclose(f);
4350 }
4351
4352 #endif
4353
4354 }
4355
4356 if (cm->frame_type == KEY_FRAME)
4357 {
4358 if(resize_key_frame(cpi))
4359 {
4360 /* If the frame size has changed, need to reset Q, quantizer,
4361 * and background refresh.
4362 */
4363 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4364 if (cpi->cyclic_refresh_mode_enabled)
4365 {
4366 if (cpi->current_layer==0)
4367 cyclic_background_refresh(cpi, Q, 0);
4368 else
4369 disable_segmentation(cpi);
4370 }
4371 // Reset the zero_last counter to 0 on key frame.
4372 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
4373 memset(cpi->consec_zero_last_mvbias, 0,
4374 (cpi->common.mb_rows * cpi->common.mb_cols));
4375 vp8_set_quantizer(cpi, Q);
4376 }
4377
4378 vp8_setup_key_frame(cpi);
4379 }
4380
4381
4382
4383 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
4384 {
4385 if(cpi->oxcf.error_resilient_mode)
4386 cm->refresh_entropy_probs = 0;
4387
4388 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
4389 {
4390 if (cm->frame_type == KEY_FRAME)
4391 cm->refresh_entropy_probs = 1;
4392 }
4393
4394 if (cm->refresh_entropy_probs == 0)
4395 {
4396 /* save a copy for later refresh */
4397 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
4398 }
4399
4400 vp8_update_coef_context(cpi);
4401
4402 vp8_update_coef_probs(cpi);
4403
4404 /* transform / motion compensation build reconstruction frame
4405 * +pack coef partitions
4406 */
4407 vp8_encode_frame(cpi);
4408
4409 /* cpi->projected_frame_size is not needed for RT mode */
4410 }
4411 #else
4412 /* transform / motion compensation build reconstruction frame */
4413 vp8_encode_frame(cpi);
4414
4415 if (cpi->oxcf.screen_content_mode == 2) {
4416 if (vp8_drop_encodedframe_overshoot(cpi, Q))
4417 return;
4418 }
4419
4420 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
4421 cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
4422 #endif
4423 vp8_clear_system_state();
4424
4425 /* Test to see if the stats generated for this frame indicate that
4426 * we should have coded a key frame (assuming that we didn't)!
4427 */
4428
4429 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME
4430 && cpi->compressor_speed != 2)
4431 {
4432 #if !(CONFIG_REALTIME_ONLY)
4433 if (decide_key_frame(cpi))
4434 {
4435 /* Reset all our sizing numbers and recode */
4436 cm->frame_type = KEY_FRAME;
4437
4438 vp8_pick_frame_size(cpi);
4439
4440 /* Clear the Alt reference frame active flag when we have
4441 * a key frame
4442 */
4443 cpi->source_alt_ref_active = 0;
4444
4445 // Set the loop filter deltas and segmentation map update
4446 setup_features(cpi);
4447
4448 vp8_restore_coding_context(cpi);
4449
4450 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4451
4452 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4453
4454 /* Limit Q range for the adaptive loop. */
4455 bottom_index = cpi->active_best_quality;
4456 top_index = cpi->active_worst_quality;
4457 q_low = cpi->active_best_quality;
4458 q_high = cpi->active_worst_quality;
4459
4460 loop_count++;
4461 Loop = 1;
4462
4463 continue;
4464 }
4465 #endif
4466 }
4467
4468 vp8_clear_system_state();
4469
4470 if (frame_over_shoot_limit == 0)
4471 frame_over_shoot_limit = 1;
4472
4473 /* Are we are overshooting and up against the limit of active max Q. */
4474 if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4475 (Q == cpi->active_worst_quality) &&
4476 (cpi->active_worst_quality < cpi->worst_quality) &&
4477 (cpi->projected_frame_size > frame_over_shoot_limit))
4478 {
4479 int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit;
4480
4481 /* If so is there any scope for relaxing it */
4482 while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0))
4483 {
4484 cpi->active_worst_quality++;
4485 /* Assume 1 qstep = about 4% on frame size. */
4486 over_size_percent = (int)(over_size_percent * 0.96);
4487 }
4488 #if !(CONFIG_REALTIME_ONLY)
4489 top_index = cpi->active_worst_quality;
4490 #endif
4491 /* If we have updated the active max Q do not call
4492 * vp8_update_rate_correction_factors() this loop.
4493 */
4494 active_worst_qchanged = 1;
4495 }
4496 else
4497 active_worst_qchanged = 0;
4498
4499 #if !(CONFIG_REALTIME_ONLY)
4500 /* Special case handling for forced key frames */
4501 if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
4502 {
4503 int last_q = Q;
4504 int kf_err = vp8_calc_ss_err(cpi->Source,
4505 &cm->yv12_fb[cm->new_fb_idx]);
4506
4507 /* The key frame is not good enough */
4508 if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
4509 {
4510 /* Lower q_high */
4511 q_high = (Q > q_low) ? (Q - 1) : q_low;
4512
4513 /* Adjust Q */
4514 Q = (q_high + q_low) >> 1;
4515 }
4516 /* The key frame is much better than the previous frame */
4517 else if ( kf_err < (cpi->ambient_err >> 1) )
4518 {
4519 /* Raise q_low */
4520 q_low = (Q < q_high) ? (Q + 1) : q_high;
4521
4522 /* Adjust Q */
4523 Q = (q_high + q_low + 1) >> 1;
4524 }
4525
4526 /* Clamp Q to upper and lower limits: */
4527 if (Q > q_high)
4528 Q = q_high;
4529 else if (Q < q_low)
4530 Q = q_low;
4531
4532 Loop = Q != last_q;
4533 }
4534
4535 /* Is the projected frame size out of range and are we allowed
4536 * to attempt to recode.
4537 */
4538 else if ( recode_loop_test( cpi,
4539 frame_over_shoot_limit, frame_under_shoot_limit,
4540 Q, top_index, bottom_index ) )
4541 {
4542 int last_q = Q;
4543 int Retries = 0;
4544
4545 /* Frame size out of permitted range. Update correction factor
4546 * & compute new Q to try...
4547 */
4548
4549 /* Frame is too large */
4550 if (cpi->projected_frame_size > cpi->this_frame_target)
4551 {
4552 /* Raise Qlow as to at least the current value */
4553 q_low = (Q < q_high) ? (Q + 1) : q_high;
4554
4555 /* If we are using over quant do the same for zbin_oq_low */
4556 if (cpi->mb.zbin_over_quant > 0)
4557 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4558 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4559
4560 if (undershoot_seen)
4561 {
4562 /* Update rate_correction_factor unless
4563 * cpi->active_worst_quality has changed.
4564 */
4565 if (!active_worst_qchanged)
4566 vp8_update_rate_correction_factors(cpi, 1);
4567
4568 Q = (q_high + q_low + 1) / 2;
4569
4570 /* Adjust cpi->zbin_over_quant (only allowed when Q
4571 * is max)
4572 */
4573 if (Q < MAXQ)
4574 cpi->mb.zbin_over_quant = 0;
4575 else
4576 {
4577 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4578 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4579 cpi->mb.zbin_over_quant =
4580 (zbin_oq_high + zbin_oq_low) / 2;
4581 }
4582 }
4583 else
4584 {
4585 /* Update rate_correction_factor unless
4586 * cpi->active_worst_quality has changed.
4587 */
4588 if (!active_worst_qchanged)
4589 vp8_update_rate_correction_factors(cpi, 0);
4590
4591 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4592
4593 while (((Q < q_low) ||
4594 (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4595 (Retries < 10))
4596 {
4597 vp8_update_rate_correction_factors(cpi, 0);
4598 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4599 Retries ++;
4600 }
4601 }
4602
4603 overshoot_seen = 1;
4604 }
4605 /* Frame is too small */
4606 else
4607 {
4608 if (cpi->mb.zbin_over_quant == 0)
4609 /* Lower q_high if not using over quant */
4610 q_high = (Q > q_low) ? (Q - 1) : q_low;
4611 else
4612 /* else lower zbin_oq_high */
4613 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
4614 (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
4615
4616 if (overshoot_seen)
4617 {
4618 /* Update rate_correction_factor unless
4619 * cpi->active_worst_quality has changed.
4620 */
4621 if (!active_worst_qchanged)
4622 vp8_update_rate_correction_factors(cpi, 1);
4623
4624 Q = (q_high + q_low) / 2;
4625
4626 /* Adjust cpi->zbin_over_quant (only allowed when Q
4627 * is max)
4628 */
4629 if (Q < MAXQ)
4630 cpi->mb.zbin_over_quant = 0;
4631 else
4632 cpi->mb.zbin_over_quant =
4633 (zbin_oq_high + zbin_oq_low) / 2;
4634 }
4635 else
4636 {
4637 /* Update rate_correction_factor unless
4638 * cpi->active_worst_quality has changed.
4639 */
4640 if (!active_worst_qchanged)
4641 vp8_update_rate_correction_factors(cpi, 0);
4642
4643 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4644
4645 /* Special case reset for qlow for constrained quality.
4646 * This should only trigger where there is very substantial
4647 * undershoot on a frame and the auto cq level is above
4648 * the user passsed in value.
4649 */
4650 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4651 (Q < q_low) )
4652 {
4653 q_low = Q;
4654 }
4655
4656 while (((Q > q_high) ||
4657 (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4658 (Retries < 10))
4659 {
4660 vp8_update_rate_correction_factors(cpi, 0);
4661 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4662 Retries ++;
4663 }
4664 }
4665
4666 undershoot_seen = 1;
4667 }
4668
4669 /* Clamp Q to upper and lower limits: */
4670 if (Q > q_high)
4671 Q = q_high;
4672 else if (Q < q_low)
4673 Q = q_low;
4674
4675 /* Clamp cpi->zbin_over_quant */
4676 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
4677 zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
4678 zbin_oq_high : cpi->mb.zbin_over_quant;
4679
4680 Loop = Q != last_q;
4681 }
4682 else
4683 #endif
4684 Loop = 0;
4685
4686 if (cpi->is_src_frame_alt_ref)
4687 Loop = 0;
4688
4689 if (Loop == 1)
4690 {
4691 vp8_restore_coding_context(cpi);
4692 loop_count++;
4693 #if CONFIG_INTERNAL_STATS
4694 cpi->tot_recode_hits++;
4695 #endif
4696 }
4697 }
4698 while (Loop == 1);
4699
4700 #if 0
4701 /* Experimental code for lagged and one pass
4702 * Update stats used for one pass GF selection
4703 */
4704 {
4705 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4706 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4707 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4708 }
4709 #endif
4710
4711 /* Special case code to reduce pulsing when key frames are forced at a
4712 * fixed interval. Note the reconstruction error if it is the frame before
4713 * the force key frame
4714 */
4715 if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
4716 {
4717 cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
4718 &cm->yv12_fb[cm->new_fb_idx]);
4719 }
4720
4721 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4722 * Last frame has one more line(add to bottom) and one more column(add to
4723 * right) than cm->mip. The edge elements are initialized to 0.
4724 */
4725 #if CONFIG_MULTI_RES_ENCODING
4726 if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
4727 #else
4728 if(cm->show_frame) /* do not save for altref frame */
4729 #endif
4730 {
4731 int mb_row;
4732 int mb_col;
4733 /* Point to beginning of allocated MODE_INFO arrays. */
4734 MODE_INFO *tmp = cm->mip;
4735
4736 if(cm->frame_type != KEY_FRAME)
4737 {
4738 for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
4739 {
4740 for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
4741 {
4742 if(tmp->mbmi.ref_frame != INTRA_FRAME)
4743 cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int;
4744
4745 cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4746 cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame;
4747 tmp++;
4748 }
4749 }
4750 }
4751 }
4752
4753 /* Count last ref frame 0,0 usage on current encoded frame. */
4754 {
4755 int mb_row;
4756 int mb_col;
4757 /* Point to beginning of MODE_INFO arrays. */
4758 MODE_INFO *tmp = cm->mi;
4759
4760 cpi->zeromv_count = 0;
4761
4762 if(cm->frame_type != KEY_FRAME)
4763 {
4764 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
4765 {
4766 for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
4767 {
4768 if (tmp->mbmi.mode == ZEROMV &&
4769 tmp->mbmi.ref_frame == LAST_FRAME)
4770 cpi->zeromv_count++;
4771 tmp++;
4772 }
4773 tmp++;
4774 }
4775 }
4776 }
4777
4778 #if CONFIG_MULTI_RES_ENCODING
4779 vp8_cal_dissimilarity(cpi);
4780 #endif
4781
4782 /* Update the GF useage maps.
4783 * This is done after completing the compression of a frame when all
4784 * modes etc. are finalized but before loop filter
4785 */
4786 if (cpi->oxcf.number_of_layers == 1)
4787 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4788
4789 if (cm->frame_type == KEY_FRAME)
4790 cm->refresh_last_frame = 1;
4791
4792 #if 0
4793 {
4794 FILE *f = fopen("gfactive.stt", "a");
4795 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4796 fclose(f);
4797 }
4798 #endif
4799
4800 /* For inter frames the current default behavior is that when
4801 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4802 * This is purely an encoder decision at present.
4803 */
4804 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame)
4805 cm->copy_buffer_to_arf = 2;
4806 else
4807 cm->copy_buffer_to_arf = 0;
4808
4809 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4810
4811 #if CONFIG_TEMPORAL_DENOISING
4812 // Get some measure of the amount of noise, by measuring the (partial) mse
4813 // between source and denoised buffer, for y channel. Partial refers to
4814 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along row/column),
4815 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4816 // Do this every ~8 frames, to further reduce complexity.
4817 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity < 4,
4818 // should be removed in favor of the process_denoiser_mode_change() function below.
4819 if (cpi->oxcf.noise_sensitivity > 0 &&
4820 cpi->oxcf.noise_sensitivity < 4 &&
4821 !cpi->oxcf.screen_content_mode &&
4822 cpi->frames_since_key%8 == 0 &&
4823 cm->frame_type != KEY_FRAME) {
4824 cpi->mse_source_denoised = measure_square_diff_partial(
4825 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4826 }
4827
4828 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4829 // of source diff (between current and previous frame), and determine if we
4830 // should switch the denoiser mode. Sampling refers to computing the mse for
4831 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4832 // only for blocks in that set that have used ZEROMV LAST, along with some
4833 // constraint on the sum diff between blocks. This process is called every
4834 // ~8 frames, to further reduce complexity.
4835 if (cpi->oxcf.noise_sensitivity == 4 &&
4836 !cpi->oxcf.screen_content_mode &&
4837 cpi->frames_since_key % 8 == 0 &&
4838 cm->frame_type != KEY_FRAME) {
4839 process_denoiser_mode_change(cpi);
4840 }
4841 #endif
4842
4843 #if CONFIG_MULTITHREAD
4844 if (cpi->b_multi_threaded)
4845 {
4846 /* start loopfilter in separate thread */
4847 sem_post(&cpi->h_event_start_lpf);
4848 cpi->b_lpf_running = 1;
4849 }
4850 else
4851 #endif
4852 {
4853 vp8_loopfilter_frame(cpi, cm);
4854 }
4855
4856 update_reference_frames(cpi);
4857
4858 #ifdef OUTPUT_YUV_DENOISED
4859 vp8_write_yuv_frame(yuv_denoised_file,
4860 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4861 #endif
4862
4863 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4864 if (cpi->oxcf.error_resilient_mode)
4865 {
4866 cm->refresh_entropy_probs = 0;
4867 }
4868 #endif
4869
4870 #if CONFIG_MULTITHREAD
4871 /* wait that filter_level is picked so that we can continue with stream packing */
4872 if (cpi->b_multi_threaded)
4873 sem_wait(&cpi->h_event_end_lpf);
4874 #endif
4875
4876 /* build the bitstream */
4877 vp8_pack_bitstream(cpi, dest, dest_end, size);
4878
4879 #if CONFIG_MULTITHREAD
4880 /* if PSNR packets are generated we have to wait for the lpf */
4881 if (cpi->b_lpf_running && cpi->b_calculate_psnr)
4882 {
4883 sem_wait(&cpi->h_event_end_lpf);
4884 cpi->b_lpf_running = 0;
4885 }
4886 #endif
4887
4888 /* Move storing frame_type out of the above loop since it is also
4889 * needed in motion search besides loopfilter */
4890 cm->last_frame_type = cm->frame_type;
4891
4892 /* Update rate control heuristics */
4893 cpi->total_byte_count += (*size);
4894 cpi->projected_frame_size = (*size) << 3;
4895
4896 if (cpi->oxcf.number_of_layers > 1)
4897 {
4898 unsigned int i;
4899 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4900 cpi->layer_context[i].total_byte_count += (*size);
4901 }
4902
4903 if (!active_worst_qchanged)
4904 vp8_update_rate_correction_factors(cpi, 2);
4905
4906 cpi->last_q[cm->frame_type] = cm->base_qindex;
4907
4908 if (cm->frame_type == KEY_FRAME)
4909 {
4910 vp8_adjust_key_frame_context(cpi);
4911 }
4912
4913 /* Keep a record of ambient average Q. */
4914 if (cm->frame_type != KEY_FRAME)
4915 cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4916
4917 /* Keep a record from which we can calculate the average Q excluding
4918 * GF updates and key frames
4919 */
4920 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
4921 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)))
4922 {
4923 cpi->ni_frames++;
4924
4925 /* Calculate the average Q for normal inter frames (not key or GFU
4926 * frames).
4927 */
4928 if ( cpi->pass == 2 )
4929 {
4930 cpi->ni_tot_qi += Q;
4931 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4932 }
4933 else
4934 {
4935 /* Damp value for first few frames */
4936 if (cpi->ni_frames > 150 )
4937 {
4938 cpi->ni_tot_qi += Q;
4939 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4940 }
4941 /* For one pass, early in the clip ... average the current frame Q
4942 * value with the worstq entered by the user as a dampening measure
4943 */
4944 else
4945 {
4946 cpi->ni_tot_qi += Q;
4947 cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4948 }
4949
4950 /* If the average Q is higher than what was used in the last
4951 * frame (after going through the recode loop to keep the frame
4952 * size within range) then use the last frame value - 1. The -1
4953 * is designed to stop Q and hence the data rate, from
4954 * progressively falling away during difficult sections, but at
4955 * the same time reduce the number of itterations around the
4956 * recode loop.
4957 */
4958 if (Q > cpi->ni_av_qi)
4959 cpi->ni_av_qi = Q - 1;
4960 }
4961 }
4962
4963 /* Update the buffer level variable. */
4964 /* Non-viewable frames are a special case and are treated as pure overhead. */
4965 if ( !cm->show_frame )
4966 cpi->bits_off_target -= cpi->projected_frame_size;
4967 else
4968 cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4969
4970 /* Clip the buffer level to the maximum specified buffer size */
4971 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
4972 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4973
4974 // If the frame dropper is not enabled, don't let the buffer level go below
4975 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4976 // this for screen content input.
4977 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4978 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size)
4979 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4980
4981 /* Rolling monitors of whether we are over or underspending used to
4982 * help regulate min and Max Q in two pass.
4983 */
4984 cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4985 cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4986 cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4987 cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
4988
4989 /* Actual bits spent */
4990 cpi->total_actual_bits += cpi->projected_frame_size;
4991
4992 /* Debug stats */
4993 cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
4994
4995 cpi->buffer_level = cpi->bits_off_target;
4996
4997 /* Propagate values to higher temporal layers */
4998 if (cpi->oxcf.number_of_layers > 1)
4999 {
5000 unsigned int i;
5001
5002 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
5003 {
5004 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5005 int bits_off_for_this_layer =
5006 (int)(lc->target_bandwidth / lc->framerate -
5007 cpi->projected_frame_size);
5008
5009 lc->bits_off_target += bits_off_for_this_layer;
5010
5011 /* Clip buffer level to maximum buffer size for the layer */
5012 if (lc->bits_off_target > lc->maximum_buffer_size)
5013 lc->bits_off_target = lc->maximum_buffer_size;
5014
5015 lc->total_actual_bits += cpi->projected_frame_size;
5016 lc->total_target_vs_actual += bits_off_for_this_layer;
5017 lc->buffer_level = lc->bits_off_target;
5018 }
5019 }
5020
5021 /* Update bits left to the kf and gf groups to account for overshoot
5022 * or undershoot on these frames
5023 */
5024 if (cm->frame_type == KEY_FRAME)
5025 {
5026 cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
5027
5028 if (cpi->twopass.kf_group_bits < 0)
5029 cpi->twopass.kf_group_bits = 0 ;
5030 }
5031 else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
5032 {
5033 cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
5034
5035 if (cpi->twopass.gf_group_bits < 0)
5036 cpi->twopass.gf_group_bits = 0 ;
5037 }
5038
5039 if (cm->frame_type != KEY_FRAME)
5040 {
5041 if (cpi->common.refresh_alt_ref_frame)
5042 {
5043 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
5044 cpi->last_skip_probs_q[2] = cm->base_qindex;
5045 }
5046 else if (cpi->common.refresh_golden_frame)
5047 {
5048 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
5049 cpi->last_skip_probs_q[1] = cm->base_qindex;
5050 }
5051 else
5052 {
5053 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
5054 cpi->last_skip_probs_q[0] = cm->base_qindex;
5055
5056 /* update the baseline */
5057 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
5058
5059 }
5060 }
5061
5062 #if 0 && CONFIG_INTERNAL_STATS
5063 {
5064 FILE *f = fopen("tmp.stt", "a");
5065
5066 vp8_clear_system_state();
5067
5068 if (cpi->twopass.total_left_stats.coded_error != 0.0)
5069 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
5070 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
5071 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
5072 cpi->common.current_video_frame, cpi->this_frame_target,
5073 cpi->projected_frame_size,
5074 (cpi->projected_frame_size - cpi->this_frame_target),
5075 cpi->total_target_vs_actual,
5076 cpi->buffer_level,
5077 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
5078 cpi->total_actual_bits, cm->base_qindex,
5079 cpi->active_best_quality, cpi->active_worst_quality,
5080 cpi->ni_av_qi, cpi->cq_target_quality,
5081 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
5082 cm->frame_type, cpi->gfu_boost,
5083 cpi->twopass.est_max_qcorrection_factor,
5084 cpi->twopass.bits_left,
5085 cpi->twopass.total_left_stats.coded_error,
5086 (double)cpi->twopass.bits_left /
5087 cpi->twopass.total_left_stats.coded_error,
5088 cpi->tot_recode_hits);
5089 else
5090 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
5091 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
5092 "%8.2lf %"PRId64" %10.3lf %8d\n",
5093 cpi->common.current_video_frame, cpi->this_frame_target,
5094 cpi->projected_frame_size,
5095 (cpi->projected_frame_size - cpi->this_frame_target),
5096 cpi->total_target_vs_actual,
5097 cpi->buffer_level,
5098 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
5099 cpi->total_actual_bits, cm->base_qindex,
5100 cpi->active_best_quality, cpi->active_worst_quality,
5101 cpi->ni_av_qi, cpi->cq_target_quality,
5102 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
5103 cm->frame_type, cpi->gfu_boost,
5104 cpi->twopass.est_max_qcorrection_factor,
5105 cpi->twopass.bits_left,
5106 cpi->twopass.total_left_stats.coded_error,
5107 cpi->tot_recode_hits);
5108
5109 fclose(f);
5110
5111 {
5112 FILE *fmodes = fopen("Modes.stt", "a");
5113
5114 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
5115 cpi->common.current_video_frame,
5116 cm->frame_type, cm->refresh_golden_frame,
5117 cm->refresh_alt_ref_frame);
5118
5119 fprintf(fmodes, "\n");
5120
5121 fclose(fmodes);
5122 }
5123 }
5124
5125 #endif
5126
5127 if (cm->refresh_golden_frame == 1)
5128 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
5129 else
5130 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
5131
5132 if (cm->refresh_alt_ref_frame == 1)
5133 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
5134 else
5135 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
5136
5137
5138 if (cm->refresh_last_frame & cm->refresh_golden_frame)
5139 /* both refreshed */
5140 cpi->gold_is_last = 1;
5141 else if (cm->refresh_last_frame ^ cm->refresh_golden_frame)
5142 /* 1 refreshed but not the other */
5143 cpi->gold_is_last = 0;
5144
5145 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame)
5146 /* both refreshed */
5147 cpi->alt_is_last = 1;
5148 else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame)
5149 /* 1 refreshed but not the other */
5150 cpi->alt_is_last = 0;
5151
5152 if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame)
5153 /* both refreshed */
5154 cpi->gold_is_alt = 1;
5155 else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame)
5156 /* 1 refreshed but not the other */
5157 cpi->gold_is_alt = 0;
5158
5159 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
5160
5161 if (cpi->gold_is_last)
5162 cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
5163
5164 if (cpi->alt_is_last)
5165 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
5166
5167 if (cpi->gold_is_alt)
5168 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
5169
5170
5171 if (!cpi->oxcf.error_resilient_mode)
5172 {
5173 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
5174 /* Update the alternate reference frame stats as appropriate. */
5175 update_alt_ref_frame_stats(cpi);
5176 else
5177 /* Update the Golden frame stats as appropriate. */
5178 update_golden_frame_stats(cpi);
5179 }
5180
5181 if (cm->frame_type == KEY_FRAME)
5182 {
5183 /* Tell the caller that the frame was coded as a key frame */
5184 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
5185
5186 /* As this frame is a key frame the next defaults to an inter frame. */
5187 cm->frame_type = INTER_FRAME;
5188
5189 cpi->last_frame_percent_intra = 100;
5190 }
5191 else
5192 {
5193 *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
5194
5195 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
5196 }
5197
5198 /* Clear the one shot update flags for segmentation map and mode/ref
5199 * loop filter deltas.
5200 */
5201 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
5202 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
5203 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
5204
5205
5206 /* Dont increment frame counters if this was an altref buffer update
5207 * not a real frame
5208 */
5209 if (cm->show_frame)
5210 {
5211 cm->current_video_frame++;
5212 cpi->frames_since_key++;
5213 cpi->temporal_pattern_counter++;
5214 }
5215
5216 /* reset to normal state now that we are done. */
5217
5218
5219
5220 #if 0
5221 {
5222 char filename[512];
5223 FILE *recon_file;
5224 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
5225 recon_file = fopen(filename, "wb");
5226 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
5227 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
5228 fclose(recon_file);
5229 }
5230 #endif
5231
5232 /* DEBUG */
5233 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
5234
5235
5236 }
5237 #if !(CONFIG_REALTIME_ONLY)
Pass2Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)5238 static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
5239 {
5240
5241 if (!cpi->common.refresh_alt_ref_frame)
5242 vp8_second_pass(cpi);
5243
5244 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5245 cpi->twopass.bits_left -= 8 * *size;
5246
5247 if (!cpi->common.refresh_alt_ref_frame)
5248 {
5249 double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
5250 *cpi->oxcf.two_pass_vbrmin_section / 100);
5251 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
5252 }
5253 }
5254 #endif
5255
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)5256 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
5257 {
5258 struct vpx_usec_timer timer;
5259 int res = 0;
5260
5261 vpx_usec_timer_start(&timer);
5262
5263 /* Reinit the lookahead buffer if the frame size changes */
5264 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height)
5265 {
5266 assert(cpi->oxcf.lag_in_frames < 2);
5267 dealloc_raw_frame_buffers(cpi);
5268 alloc_raw_frame_buffers(cpi);
5269 }
5270
5271 if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
5272 frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
5273 res = -1;
5274 vpx_usec_timer_mark(&timer);
5275 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
5276
5277 return res;
5278 }
5279
5280
frame_is_reference(const VP8_COMP * cpi)5281 static int frame_is_reference(const VP8_COMP *cpi)
5282 {
5283 const VP8_COMMON *cm = &cpi->common;
5284 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
5285
5286 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
5287 || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
5288 || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
5289 || cm->refresh_entropy_probs
5290 || xd->mode_ref_lf_delta_update
5291 || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
5292 }
5293
5294
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,unsigned long * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)5295 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush)
5296 {
5297 VP8_COMMON *cm;
5298 struct vpx_usec_timer tsctimer;
5299 struct vpx_usec_timer ticktimer;
5300 struct vpx_usec_timer cmptimer;
5301 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
5302
5303 if (!cpi)
5304 return -1;
5305
5306 cm = &cpi->common;
5307
5308 if (setjmp(cpi->common.error.jmp))
5309 {
5310 cpi->common.error.setjmp = 0;
5311 vp8_clear_system_state();
5312 return VPX_CODEC_CORRUPT_FRAME;
5313 }
5314
5315 cpi->common.error.setjmp = 1;
5316
5317 vpx_usec_timer_start(&cmptimer);
5318
5319 cpi->source = NULL;
5320
5321 #if !(CONFIG_REALTIME_ONLY)
5322 /* Should we code an alternate reference frame */
5323 if (cpi->oxcf.error_resilient_mode == 0 &&
5324 cpi->oxcf.play_alternate &&
5325 cpi->source_alt_ref_pending)
5326 {
5327 if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
5328 cpi->frames_till_gf_update_due,
5329 PEEK_FORWARD)))
5330 {
5331 cpi->alt_ref_source = cpi->source;
5332 if (cpi->oxcf.arnr_max_frames > 0)
5333 {
5334 vp8_temporal_filter_prepare_c(cpi,
5335 cpi->frames_till_gf_update_due);
5336 force_src_buffer = &cpi->alt_ref_buffer;
5337 }
5338 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
5339 cm->refresh_alt_ref_frame = 1;
5340 cm->refresh_golden_frame = 0;
5341 cm->refresh_last_frame = 0;
5342 cm->show_frame = 0;
5343 /* Clear Pending alt Ref flag. */
5344 cpi->source_alt_ref_pending = 0;
5345 cpi->is_src_frame_alt_ref = 0;
5346 }
5347 }
5348 #endif
5349
5350 if (!cpi->source)
5351 {
5352 /* Read last frame source if we are encoding first pass. */
5353 if (cpi->pass == 1 && cm->current_video_frame > 0)
5354 {
5355 if((cpi->last_source = vp8_lookahead_peek(cpi->lookahead, 1,
5356 PEEK_BACKWARD)) == NULL)
5357 return -1;
5358 }
5359
5360
5361 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush)))
5362 {
5363 cm->show_frame = 1;
5364
5365 cpi->is_src_frame_alt_ref = cpi->alt_ref_source
5366 && (cpi->source == cpi->alt_ref_source);
5367
5368 if(cpi->is_src_frame_alt_ref)
5369 cpi->alt_ref_source = NULL;
5370 }
5371 }
5372
5373 if (cpi->source)
5374 {
5375 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
5376 cpi->un_scaled_source = cpi->Source;
5377 *time_stamp = cpi->source->ts_start;
5378 *time_end = cpi->source->ts_end;
5379 *frame_flags = cpi->source->flags;
5380
5381 if (cpi->pass == 1 && cm->current_video_frame > 0)
5382 {
5383 cpi->last_frame_unscaled_source = &cpi->last_source->img;
5384 }
5385 }
5386 else
5387 {
5388 *size = 0;
5389 #if !(CONFIG_REALTIME_ONLY)
5390
5391 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
5392 {
5393 vp8_end_first_pass(cpi); /* get last stats packet */
5394 cpi->twopass.first_pass_done = 1;
5395 }
5396
5397 #endif
5398
5399 return -1;
5400 }
5401
5402 if (cpi->source->ts_start < cpi->first_time_stamp_ever)
5403 {
5404 cpi->first_time_stamp_ever = cpi->source->ts_start;
5405 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
5406 }
5407
5408 /* adjust frame rates based on timestamps given */
5409 if (cm->show_frame)
5410 {
5411 int64_t this_duration;
5412 int step = 0;
5413
5414 if (cpi->source->ts_start == cpi->first_time_stamp_ever)
5415 {
5416 this_duration = cpi->source->ts_end - cpi->source->ts_start;
5417 step = 1;
5418 }
5419 else
5420 {
5421 int64_t last_duration;
5422
5423 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
5424 last_duration = cpi->last_end_time_stamp_seen
5425 - cpi->last_time_stamp_seen;
5426 /* do a step update if the duration changes by 10% */
5427 if (last_duration)
5428 step = (int)(((this_duration - last_duration) *
5429 10 / last_duration));
5430 }
5431
5432 if (this_duration)
5433 {
5434 if (step)
5435 cpi->ref_framerate = 10000000.0 / this_duration;
5436 else
5437 {
5438 double avg_duration, interval;
5439
5440 /* Average this frame's rate into the last second's average
5441 * frame rate. If we haven't seen 1 second yet, then average
5442 * over the whole interval seen.
5443 */
5444 interval = (double)(cpi->source->ts_end -
5445 cpi->first_time_stamp_ever);
5446 if(interval > 10000000.0)
5447 interval = 10000000;
5448
5449 avg_duration = 10000000.0 / cpi->ref_framerate;
5450 avg_duration *= (interval - avg_duration + this_duration);
5451 avg_duration /= interval;
5452
5453 cpi->ref_framerate = 10000000.0 / avg_duration;
5454 }
5455 #if CONFIG_MULTI_RES_ENCODING
5456 if (cpi->oxcf.mr_total_resolutions > 1) {
5457 LOWER_RES_FRAME_INFO* low_res_frame_info = (LOWER_RES_FRAME_INFO*)
5458 cpi->oxcf.mr_low_res_mode_info;
5459 // Frame rate should be the same for all spatial layers in
5460 // multi-res-encoding (simulcast), so we constrain the frame for
5461 // higher layers to be that of lowest resolution. This is needed
5462 // as he application may decide to skip encoding a high layer and
5463 // then start again, in which case a big jump in time-stamps will
5464 // be received for that high layer, which will yield an incorrect
5465 // frame rate (from time-stamp adjustment in above calculation).
5466 if (cpi->oxcf.mr_encoder_id) {
5467 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
5468 }
5469 else {
5470 // Keep track of frame rate for lowest resolution.
5471 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
5472 }
5473 }
5474 #endif
5475 if (cpi->oxcf.number_of_layers > 1)
5476 {
5477 unsigned int i;
5478
5479 /* Update frame rates for each layer */
5480 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5481 for (i = 0; i < cpi->oxcf.number_of_layers &&
5482 i < VPX_TS_MAX_LAYERS; ++i)
5483 {
5484 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5485 lc->framerate = cpi->ref_framerate /
5486 cpi->oxcf.rate_decimator[i];
5487 }
5488 }
5489 else
5490 vp8_new_framerate(cpi, cpi->ref_framerate);
5491 }
5492
5493 cpi->last_time_stamp_seen = cpi->source->ts_start;
5494 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5495 }
5496
5497 if (cpi->oxcf.number_of_layers > 1)
5498 {
5499 int layer;
5500
5501 update_layer_contexts (cpi);
5502
5503 /* Restore layer specific context & set frame rate */
5504 if (cpi->temporal_layer_id >= 0) {
5505 layer = cpi->temporal_layer_id;
5506 } else {
5507 layer = cpi->oxcf.layer_id[
5508 cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5509 }
5510 restore_layer_context (cpi, layer);
5511 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5512 }
5513
5514 if (cpi->compressor_speed == 2)
5515 {
5516 vpx_usec_timer_start(&tsctimer);
5517 vpx_usec_timer_start(&ticktimer);
5518 }
5519
5520 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100)/cm->MBs;
5521
5522 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5523 {
5524 int i;
5525 const int num_part = (1 << cm->multi_token_partition);
5526 /* the available bytes in dest */
5527 const unsigned long dest_size = dest_end - dest;
5528 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5529
5530 unsigned char *dp = dest;
5531
5532 cpi->partition_d[0] = dp;
5533 dp += dest_size/10; /* reserve 1/10 for control partition */
5534 cpi->partition_d_end[0] = dp;
5535
5536 for(i = 0; i < num_part; i++)
5537 {
5538 cpi->partition_d[i + 1] = dp;
5539 dp += tok_part_buff_size;
5540 cpi->partition_d_end[i + 1] = dp;
5541 }
5542 }
5543 #endif
5544
5545 /* start with a 0 size frame */
5546 *size = 0;
5547
5548 /* Clear down mmx registers */
5549 vp8_clear_system_state();
5550
5551 cm->frame_type = INTER_FRAME;
5552 cm->frame_flags = *frame_flags;
5553
5554 #if 0
5555
5556 if (cm->refresh_alt_ref_frame)
5557 {
5558 cm->refresh_golden_frame = 0;
5559 cm->refresh_last_frame = 0;
5560 }
5561 else
5562 {
5563 cm->refresh_golden_frame = 0;
5564 cm->refresh_last_frame = 1;
5565 }
5566
5567 #endif
5568 /* find a free buffer for the new frame */
5569 {
5570 int i = 0;
5571 for(; i < NUM_YV12_BUFFERS; i++)
5572 {
5573 if(!cm->yv12_fb[i].flags)
5574 {
5575 cm->new_fb_idx = i;
5576 break;
5577 }
5578 }
5579
5580 assert(i < NUM_YV12_BUFFERS );
5581 }
5582 #if !(CONFIG_REALTIME_ONLY)
5583
5584 if (cpi->pass == 1)
5585 {
5586 Pass1Encode(cpi, size, dest, frame_flags);
5587 }
5588 else if (cpi->pass == 2)
5589 {
5590 Pass2Encode(cpi, size, dest, dest_end, frame_flags);
5591 }
5592 else
5593 #endif
5594 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5595
5596 if (cpi->compressor_speed == 2)
5597 {
5598 unsigned int duration, duration2;
5599 vpx_usec_timer_mark(&tsctimer);
5600 vpx_usec_timer_mark(&ticktimer);
5601
5602 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5603 duration2 = (unsigned int)((double)duration / 2);
5604
5605 if (cm->frame_type != KEY_FRAME)
5606 {
5607 if (cpi->avg_encode_time == 0)
5608 cpi->avg_encode_time = duration;
5609 else
5610 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5611 }
5612
5613 if (duration2)
5614 {
5615 {
5616
5617 if (cpi->avg_pick_mode_time == 0)
5618 cpi->avg_pick_mode_time = duration2;
5619 else
5620 cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5621 }
5622 }
5623
5624 }
5625
5626 if (cm->refresh_entropy_probs == 0)
5627 {
5628 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5629 }
5630
5631 /* Save the contexts separately for alt ref, gold and last. */
5632 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5633 if(cm->refresh_alt_ref_frame)
5634 memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5635
5636 if(cm->refresh_golden_frame)
5637 memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5638
5639 if(cm->refresh_last_frame)
5640 memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5641
5642 /* if its a dropped frame honor the requests on subsequent frames */
5643 if (*size > 0)
5644 {
5645 cpi->droppable = !frame_is_reference(cpi);
5646
5647 /* return to normal state */
5648 cm->refresh_entropy_probs = 1;
5649 cm->refresh_alt_ref_frame = 0;
5650 cm->refresh_golden_frame = 0;
5651 cm->refresh_last_frame = 1;
5652 cm->frame_type = INTER_FRAME;
5653
5654 }
5655
5656 /* Save layer specific state */
5657 if (cpi->oxcf.number_of_layers > 1)
5658 save_layer_context (cpi);
5659
5660 vpx_usec_timer_mark(&cmptimer);
5661 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5662
5663 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
5664 {
5665 generate_psnr_packet(cpi);
5666 }
5667
5668 #if CONFIG_INTERNAL_STATS
5669
5670 if (cpi->pass != 1)
5671 {
5672 cpi->bytes += *size;
5673
5674 if (cm->show_frame)
5675 {
5676 cpi->common.show_frame_mi = cpi->common.mi;
5677 cpi->count ++;
5678
5679 if (cpi->b_calculate_psnr)
5680 {
5681 uint64_t ye,ue,ve;
5682 double frame_psnr;
5683 YV12_BUFFER_CONFIG *orig = cpi->Source;
5684 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5685 unsigned int y_width = cpi->common.Width;
5686 unsigned int y_height = cpi->common.Height;
5687 unsigned int uv_width = (y_width + 1) / 2;
5688 unsigned int uv_height = (y_height + 1) / 2;
5689 int y_samples = y_height * y_width;
5690 int uv_samples = uv_height * uv_width;
5691 int t_samples = y_samples + 2 * uv_samples;
5692 double sq_error;
5693
5694 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5695 recon->y_buffer, recon->y_stride, y_width, y_height);
5696
5697 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5698 recon->u_buffer, recon->uv_stride, uv_width, uv_height);
5699
5700 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5701 recon->v_buffer, recon->uv_stride, uv_width, uv_height);
5702
5703 sq_error = (double)(ye + ue + ve);
5704
5705 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5706
5707 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5708 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5709 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5710 cpi->total_sq_error += sq_error;
5711 cpi->total += frame_psnr;
5712 #if CONFIG_POSTPROC
5713 {
5714 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5715 double sq_error2;
5716 double frame_psnr2, frame_ssim2 = 0;
5717 double weight = 0;
5718
5719 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0);
5720 vp8_clear_system_state();
5721
5722 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5723 pp->y_buffer, pp->y_stride, y_width, y_height);
5724
5725 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5726 pp->u_buffer, pp->uv_stride, uv_width, uv_height);
5727
5728 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5729 pp->v_buffer, pp->uv_stride, uv_width, uv_height);
5730
5731 sq_error2 = (double)(ye + ue + ve);
5732
5733 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5734
5735 cpi->totalp_y += vpx_sse_to_psnr(y_samples,
5736 255.0, (double)ye);
5737 cpi->totalp_u += vpx_sse_to_psnr(uv_samples,
5738 255.0, (double)ue);
5739 cpi->totalp_v += vpx_sse_to_psnr(uv_samples,
5740 255.0, (double)ve);
5741 cpi->total_sq_error2 += sq_error2;
5742 cpi->totalp += frame_psnr2;
5743
5744 frame_ssim2 = vp8_calc_ssim(cpi->Source,
5745 &cm->post_proc_buffer, 1, &weight);
5746
5747 cpi->summed_quality += frame_ssim2 * weight;
5748 cpi->summed_weights += weight;
5749
5750 if (cpi->oxcf.number_of_layers > 1)
5751 {
5752 unsigned int i;
5753
5754 for (i=cpi->current_layer;
5755 i<cpi->oxcf.number_of_layers; i++)
5756 {
5757 cpi->frames_in_layer[i]++;
5758
5759 cpi->bytes_in_layer[i] += *size;
5760 cpi->sum_psnr[i] += frame_psnr;
5761 cpi->sum_psnr_p[i] += frame_psnr2;
5762 cpi->total_error2[i] += sq_error;
5763 cpi->total_error2_p[i] += sq_error2;
5764 cpi->sum_ssim[i] += frame_ssim2 * weight;
5765 cpi->sum_weights[i] += weight;
5766 }
5767 }
5768 }
5769 #endif
5770 }
5771
5772 if (cpi->b_calculate_ssimg)
5773 {
5774 double y, u, v, frame_all;
5775 frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
5776 &y, &u, &v);
5777
5778 if (cpi->oxcf.number_of_layers > 1)
5779 {
5780 unsigned int i;
5781
5782 for (i=cpi->current_layer;
5783 i<cpi->oxcf.number_of_layers; i++)
5784 {
5785 if (!cpi->b_calculate_psnr)
5786 cpi->frames_in_layer[i]++;
5787
5788 cpi->total_ssimg_y_in_layer[i] += y;
5789 cpi->total_ssimg_u_in_layer[i] += u;
5790 cpi->total_ssimg_v_in_layer[i] += v;
5791 cpi->total_ssimg_all_in_layer[i] += frame_all;
5792 }
5793 }
5794 else
5795 {
5796 cpi->total_ssimg_y += y;
5797 cpi->total_ssimg_u += u;
5798 cpi->total_ssimg_v += v;
5799 cpi->total_ssimg_all += frame_all;
5800 }
5801 }
5802
5803 }
5804 }
5805
5806 #if 0
5807
5808 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5809 {
5810 skiptruecount += cpi->skip_true_count;
5811 skipfalsecount += cpi->skip_false_count;
5812 }
5813
5814 #endif
5815 #if 0
5816
5817 if (cpi->pass != 1)
5818 {
5819 FILE *f = fopen("skip.stt", "a");
5820 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5821
5822 if (cpi->is_src_frame_alt_ref == 1)
5823 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5824
5825 fclose(f);
5826 }
5827
5828 #endif
5829 #endif
5830
5831 cpi->common.error.setjmp = 0;
5832
5833 return 0;
5834 }
5835
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5836 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags)
5837 {
5838 if (cpi->common.refresh_alt_ref_frame)
5839 return -1;
5840 else
5841 {
5842 int ret;
5843
5844 #if CONFIG_MULTITHREAD
5845 if(cpi->b_lpf_running)
5846 {
5847 sem_wait(&cpi->h_event_end_lpf);
5848 cpi->b_lpf_running = 0;
5849 }
5850 #endif
5851
5852 #if CONFIG_POSTPROC
5853 cpi->common.show_frame_mi = cpi->common.mi;
5854 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5855 #else
5856 (void)flags;
5857
5858 if (cpi->common.frame_to_show)
5859 {
5860 *dest = *cpi->common.frame_to_show;
5861 dest->y_width = cpi->common.Width;
5862 dest->y_height = cpi->common.Height;
5863 dest->uv_height = cpi->common.Height / 2;
5864 ret = 0;
5865 }
5866 else
5867 {
5868 ret = -1;
5869 }
5870
5871 #endif
5872 vp8_clear_system_state();
5873 return ret;
5874 }
5875 }
5876
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5877 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
5878 {
5879 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5880 int internal_delta_q[MAX_MB_SEGMENTS];
5881 const int range = 63;
5882 int i;
5883
5884 // This method is currently incompatible with the cyclic refresh method
5885 if ( cpi->cyclic_refresh_mode_enabled )
5886 return -1;
5887
5888 // Check number of rows and columns match
5889 if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
5890 return -1;
5891
5892 // Range check the delta Q values and convert the external Q range values
5893 // to internal ones.
5894 if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5895 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) )
5896 return -1;
5897
5898 // Range check the delta lf values
5899 if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5900 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) )
5901 return -1;
5902
5903 if (!map)
5904 {
5905 disable_segmentation(cpi);
5906 return 0;
5907 }
5908
5909 // Translate the external delta q values to internal values.
5910 for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
5911 internal_delta_q[i] =
5912 ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5913
5914 /* Set the segmentation Map */
5915 set_segmentation_map(cpi, map);
5916
5917 /* Activate segmentation. */
5918 enable_segmentation(cpi);
5919
5920 /* Set up the quant segment data */
5921 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5922 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5923 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5924 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5925
5926 /* Set up the loop segment data s */
5927 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5928 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5929 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5930 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5931
5932 cpi->segment_encode_breakout[0] = threshold[0];
5933 cpi->segment_encode_breakout[1] = threshold[1];
5934 cpi->segment_encode_breakout[2] = threshold[2];
5935 cpi->segment_encode_breakout[3] = threshold[3];
5936
5937 /* Initialise the feature data structure */
5938 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5939
5940 return 0;
5941 }
5942
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5943 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols)
5944 {
5945 if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols)
5946 {
5947 if (map)
5948 {
5949 memcpy(cpi->active_map, map, rows * cols);
5950 cpi->active_map_enabled = 1;
5951 }
5952 else
5953 cpi->active_map_enabled = 0;
5954
5955 return 0;
5956 }
5957 else
5958 {
5959 return -1 ;
5960 }
5961 }
5962
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5963 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode)
5964 {
5965 if (horiz_mode <= ONETWO)
5966 cpi->common.horiz_scale = horiz_mode;
5967 else
5968 return -1;
5969
5970 if (vert_mode <= ONETWO)
5971 cpi->common.vert_scale = vert_mode;
5972 else
5973 return -1;
5974
5975 return 0;
5976 }
5977
5978
5979
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5980 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
5981 {
5982 int i, j;
5983 int Total = 0;
5984
5985 unsigned char *src = source->y_buffer;
5986 unsigned char *dst = dest->y_buffer;
5987
5988 /* Loop through the Y plane raw and reconstruction data summing
5989 * (square differences)
5990 */
5991 for (i = 0; i < source->y_height; i += 16)
5992 {
5993 for (j = 0; j < source->y_width; j += 16)
5994 {
5995 unsigned int sse;
5996 Total += vpx_mse16x16(src + j, source->y_stride,
5997 dst + j, dest->y_stride, &sse);
5998 }
5999
6000 src += 16 * source->y_stride;
6001 dst += 16 * dest->y_stride;
6002 }
6003
6004 return Total;
6005 }
6006
6007
vp8_get_quantizer(VP8_COMP * cpi)6008 int vp8_get_quantizer(VP8_COMP *cpi)
6009 {
6010 return cpi->common.base_qindex;
6011 }
6012