1 /*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * multimedia converter based on the FFmpeg libraries
24 */
25
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87
88
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101
102 #include <time.h>
103
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106
107 #include "libavutil/avassert.h"
108
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111
112 static FILE *vstats_file;
113
114 const char *const forced_keyframes_const_names[] = {
115 "n",
116 "n_forced",
117 "prev_forced_n",
118 "prev_forced_t",
119 "t",
120 NULL
121 };
122
123 typedef struct BenchmarkTimeStamps {
124 int64_t real_usec;
125 int64_t user_usec;
126 int64_t sys_usec;
127 } BenchmarkTimeStamps;
128
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
133
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139
140 static int want_sdp = 1;
141
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
144
145 static uint8_t *subtitle_out;
146
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
151
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
156
157 FilterGraph **filtergraphs;
158 int nb_filtergraphs;
159
160 #if HAVE_TERMIOS_H
161
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170
171 /* sub2video hack:
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
174 */
175
sub2video_get_blank_frame(InputStream * ist)176 static int sub2video_get_blank_frame(InputStream *ist)
177 {
178 int ret;
179 AVFrame *frame = ist->sub2video.frame;
180
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186 return ret;
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 return 0;
189 }
190
sub2video_copy_rect(uint8_t * dst,int dst_linesize,int w,int h,AVSubtitleRect * r)191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192 AVSubtitleRect *r)
193 {
194 uint32_t *pal, *dst2;
195 uint8_t *src, *src2;
196 int x, y;
197
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200 return;
201 }
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
205 );
206 return;
207 }
208
209 dst += r->y * dst_linesize + r->x * 4;
210 src = r->data[0];
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
214 src2 = src;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
217 dst += dst_linesize;
218 src += r->linesize[0];
219 }
220 }
221
sub2video_push_ref(InputStream * ist,int64_t pts)222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224 AVFrame *frame = ist->sub2video.frame;
225 int i;
226 int ret;
227
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236 av_err2str(ret));
237 }
238 }
239
sub2video_update(InputStream * ist,int64_t heartbeat_pts,AVSubtitle * sub)240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242 AVFrame *frame = ist->sub2video.frame;
243 int8_t *dst;
244 int dst_linesize;
245 int num_rects, i;
246 int64_t pts, end_pts;
247
248 if (!frame)
249 return;
250 if (sub) {
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
256 } else {
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
263 end_pts = INT64_MAX;
264 num_rects = 0;
265 }
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
269 return;
270 }
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
278 }
279
sub2video_heartbeat(InputStream * ist,int64_t pts)280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282 InputFile *infile = input_files[ist->file_index];
283 int i, j, nb_reqs;
284 int64_t pts2;
285
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
293 continue;
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
299 continue;
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307 if (nb_reqs)
308 sub2video_push_ref(ist2, pts2);
309 }
310 }
311
sub2video_flush(InputStream * ist)312 static void sub2video_flush(InputStream *ist)
313 {
314 int i;
315 int ret;
316
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323 }
324 }
325
326 /* end of sub2video hack */
327
term_exit_sigsafe(void)328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331 if(restore_tty)
332 tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335
term_exit(void)336 void term_exit(void)
337 {
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
339 term_exit_sigsafe();
340 }
341
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347
348 static void
sigterm_handler(int sig)349 sigterm_handler(int sig)
350 {
351 int ret;
352 received_sigterm = sig;
353 received_nb_signals++;
354 term_exit_sigsafe();
355 if(received_nb_signals > 3) {
356 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357 strlen("Received > 3 system signals, hard exiting\n"));
358 if (ret < 0) { /* Do nothing */ };
359 exit(123);
360 }
361 }
362
363 #if HAVE_SETCONSOLECTRLHANDLER
CtrlHandler(DWORD fdwCtrlType)364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367
368 switch (fdwCtrlType)
369 {
370 case CTRL_C_EVENT:
371 case CTRL_BREAK_EVENT:
372 sigterm_handler(SIGINT);
373 return TRUE;
374
375 case CTRL_CLOSE_EVENT:
376 case CTRL_LOGOFF_EVENT:
377 case CTRL_SHUTDOWN_EVENT:
378 sigterm_handler(SIGTERM);
379 /* Basically, with these 3 events, when we return from this method the
380 process is hard terminated, so stall as long as we need to
381 to try and let the main thread(s) clean up and gracefully terminate
382 (we have at most 5 seconds, but should be done far before that). */
383 while (!ffmpeg_exited) {
384 Sleep(0);
385 }
386 return TRUE;
387
388 default:
389 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 return FALSE;
391 }
392 }
393 #endif
394
term_init(void)395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
398 if (!run_as_daemon && stdin_interaction) {
399 struct termios tty;
400 if (tcgetattr (0, &tty) == 0) {
401 oldtty = tty;
402 restore_tty = 1;
403
404 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405 |INLCR|IGNCR|ICRNL|IXON);
406 tty.c_oflag |= OPOST;
407 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408 tty.c_cflag &= ~(CSIZE|PARENB);
409 tty.c_cflag |= CS8;
410 tty.c_cc[VMIN] = 1;
411 tty.c_cc[VTIME] = 0;
412
413 tcsetattr (0, TCSANOW, &tty);
414 }
415 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416 }
417 #endif
418
419 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422 signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431
432 /* read a key without blocking */
read_key(void)433 static int read_key(void)
434 {
435 unsigned char ch;
436 #if HAVE_TERMIOS_H
437 int n = 1;
438 struct timeval tv;
439 fd_set rfds;
440
441 FD_ZERO(&rfds);
442 FD_SET(0, &rfds);
443 tv.tv_sec = 0;
444 tv.tv_usec = 0;
445 n = select(1, &rfds, NULL, NULL, &tv);
446 if (n > 0) {
447 n = read(0, &ch, 1);
448 if (n == 1)
449 return ch;
450
451 return n;
452 }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455 static int is_pipe;
456 static HANDLE input_handle;
457 DWORD dw, nchars;
458 if(!input_handle){
459 input_handle = GetStdHandle(STD_INPUT_HANDLE);
460 is_pipe = !GetConsoleMode(input_handle, &dw);
461 }
462
463 if (is_pipe) {
464 /* When running under a GUI, you will end here. */
465 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466 // input pipe may have been closed by the program that ran ffmpeg
467 return -1;
468 }
469 //Read it
470 if(nchars != 0) {
471 read(0, &ch, 1);
472 return ch;
473 }else{
474 return -1;
475 }
476 }
477 # endif
478 if(kbhit())
479 return(getch());
480 #endif
481 return -1;
482 }
483
decode_interrupt_cb(void * ctx)484 static int decode_interrupt_cb(void *ctx)
485 {
486 return received_nb_signals > atomic_load(&transcode_init_done);
487 }
488
489 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
490
ffmpeg_cleanup(int ret)491 static void ffmpeg_cleanup(int ret)
492 {
493 int i, j;
494
495 if (do_benchmark) {
496 int maxrss = getmaxrss() / 1024;
497 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498 }
499
500 for (i = 0; i < nb_filtergraphs; i++) {
501 FilterGraph *fg = filtergraphs[i];
502 avfilter_graph_free(&fg->graph);
503 for (j = 0; j < fg->nb_inputs; j++) {
504 InputFilter *ifilter = fg->inputs[j];
505 struct InputStream *ist = ifilter->ist;
506
507 while (av_fifo_size(ifilter->frame_queue)) {
508 AVFrame *frame;
509 av_fifo_generic_read(ifilter->frame_queue, &frame,
510 sizeof(frame), NULL);
511 av_frame_free(&frame);
512 }
513 av_fifo_freep(&ifilter->frame_queue);
514 if (ist->sub2video.sub_queue) {
515 while (av_fifo_size(ist->sub2video.sub_queue)) {
516 AVSubtitle sub;
517 av_fifo_generic_read(ist->sub2video.sub_queue,
518 &sub, sizeof(sub), NULL);
519 avsubtitle_free(&sub);
520 }
521 av_fifo_freep(&ist->sub2video.sub_queue);
522 }
523 av_buffer_unref(&ifilter->hw_frames_ctx);
524 av_freep(&ifilter->name);
525 av_freep(&fg->inputs[j]);
526 }
527 av_freep(&fg->inputs);
528 for (j = 0; j < fg->nb_outputs; j++) {
529 OutputFilter *ofilter = fg->outputs[j];
530
531 avfilter_inout_free(&ofilter->out_tmp);
532 av_freep(&ofilter->name);
533 av_freep(&ofilter->formats);
534 av_freep(&ofilter->channel_layouts);
535 av_freep(&ofilter->sample_rates);
536 av_freep(&fg->outputs[j]);
537 }
538 av_freep(&fg->outputs);
539 av_freep(&fg->graph_desc);
540
541 av_freep(&filtergraphs[i]);
542 }
543 av_freep(&filtergraphs);
544
545 av_freep(&subtitle_out);
546
547 /* close files */
548 for (i = 0; i < nb_output_files; i++) {
549 OutputFile *of = output_files[i];
550 AVFormatContext *s;
551 if (!of)
552 continue;
553 s = of->ctx;
554 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
555 avio_closep(&s->pb);
556 avformat_free_context(s);
557 av_dict_free(&of->opts);
558
559 av_freep(&output_files[i]);
560 }
561 for (i = 0; i < nb_output_streams; i++) {
562 OutputStream *ost = output_streams[i];
563
564 if (!ost)
565 continue;
566
567 av_bsf_free(&ost->bsf_ctx);
568
569 av_frame_free(&ost->filtered_frame);
570 av_frame_free(&ost->last_frame);
571 av_dict_free(&ost->encoder_opts);
572
573 av_freep(&ost->forced_keyframes);
574 av_expr_free(ost->forced_keyframes_pexpr);
575 av_freep(&ost->avfilter);
576 av_freep(&ost->logfile_prefix);
577
578 av_freep(&ost->audio_channels_map);
579 ost->audio_channels_mapped = 0;
580
581 av_dict_free(&ost->sws_dict);
582 av_dict_free(&ost->swr_opts);
583
584 avcodec_free_context(&ost->enc_ctx);
585 avcodec_parameters_free(&ost->ref_par);
586
587 if (ost->muxing_queue) {
588 while (av_fifo_size(ost->muxing_queue)) {
589 AVPacket pkt;
590 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
591 av_packet_unref(&pkt);
592 }
593 av_fifo_freep(&ost->muxing_queue);
594 }
595
596 av_freep(&output_streams[i]);
597 }
598 #if HAVE_THREADS
599 free_input_threads();
600 #endif
601 for (i = 0; i < nb_input_files; i++) {
602 avformat_close_input(&input_files[i]->ctx);
603 av_freep(&input_files[i]);
604 }
605 for (i = 0; i < nb_input_streams; i++) {
606 InputStream *ist = input_streams[i];
607
608 av_frame_free(&ist->decoded_frame);
609 av_frame_free(&ist->filter_frame);
610 av_dict_free(&ist->decoder_opts);
611 avsubtitle_free(&ist->prev_sub.subtitle);
612 av_frame_free(&ist->sub2video.frame);
613 av_freep(&ist->filters);
614 av_freep(&ist->hwaccel_device);
615 av_freep(&ist->dts_buffer);
616
617 avcodec_free_context(&ist->dec_ctx);
618
619 av_freep(&input_streams[i]);
620 }
621
622 if (vstats_file) {
623 if (fclose(vstats_file))
624 av_log(NULL, AV_LOG_ERROR,
625 "Error closing vstats file, loss of information possible: %s\n",
626 av_err2str(AVERROR(errno)));
627 }
628 av_freep(&vstats_filename);
629
630 av_freep(&input_streams);
631 av_freep(&input_files);
632 av_freep(&output_streams);
633 av_freep(&output_files);
634
635 uninit_opts();
636
637 avformat_network_deinit();
638
639 if (received_sigterm) {
640 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
641 (int) received_sigterm);
642 } else if (ret && atomic_load(&transcode_init_done)) {
643 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
644 }
645 term_exit();
646 ffmpeg_exited = 1;
647 }
648
remove_avoptions(AVDictionary ** a,AVDictionary * b)649 void remove_avoptions(AVDictionary **a, AVDictionary *b)
650 {
651 AVDictionaryEntry *t = NULL;
652
653 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
654 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
655 }
656 }
657
assert_avoptions(AVDictionary * m)658 void assert_avoptions(AVDictionary *m)
659 {
660 AVDictionaryEntry *t;
661 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
662 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
663 exit_program(1);
664 }
665 }
666
abort_codec_experimental(AVCodec * c,int encoder)667 static void abort_codec_experimental(AVCodec *c, int encoder)
668 {
669 exit_program(1);
670 }
671
update_benchmark(const char * fmt,...)672 static void update_benchmark(const char *fmt, ...)
673 {
674 if (do_benchmark_all) {
675 BenchmarkTimeStamps t = get_benchmark_time_stamps();
676 va_list va;
677 char buf[1024];
678
679 if (fmt) {
680 va_start(va, fmt);
681 vsnprintf(buf, sizeof(buf), fmt, va);
682 va_end(va);
683 av_log(NULL, AV_LOG_INFO,
684 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
685 t.user_usec - current_time.user_usec,
686 t.sys_usec - current_time.sys_usec,
687 t.real_usec - current_time.real_usec, buf);
688 }
689 current_time = t;
690 }
691 }
692
close_all_output_streams(OutputStream * ost,OSTFinished this_stream,OSTFinished others)693 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
694 {
695 int i;
696 for (i = 0; i < nb_output_streams; i++) {
697 OutputStream *ost2 = output_streams[i];
698 ost2->finished |= ost == ost2 ? this_stream : others;
699 }
700 }
701
write_packet(OutputFile * of,AVPacket * pkt,OutputStream * ost,int unqueue)702 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
703 {
704 AVFormatContext *s = of->ctx;
705 AVStream *st = ost->st;
706 int ret;
707
708 /*
709 * Audio encoders may split the packets -- #frames in != #packets out.
710 * But there is no reordering, so we can limit the number of output packets
711 * by simply dropping them here.
712 * Counting encoded video frames needs to be done separately because of
713 * reordering, see do_video_out().
714 * Do not count the packet when unqueued because it has been counted when queued.
715 */
716 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
717 if (ost->frame_number >= ost->max_frames) {
718 av_packet_unref(pkt);
719 return;
720 }
721 ost->frame_number++;
722 }
723
724 if (!of->header_written) {
725 AVPacket tmp_pkt = {0};
726 /* the muxer is not initialized yet, buffer the packet */
727 if (!av_fifo_space(ost->muxing_queue)) {
728 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
729 ost->max_muxing_queue_size);
730 if (new_size <= av_fifo_size(ost->muxing_queue)) {
731 av_log(NULL, AV_LOG_ERROR,
732 "Too many packets buffered for output stream %d:%d.\n",
733 ost->file_index, ost->st->index);
734 exit_program(1);
735 }
736 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
737 if (ret < 0)
738 exit_program(1);
739 }
740 ret = av_packet_make_refcounted(pkt);
741 if (ret < 0)
742 exit_program(1);
743 av_packet_move_ref(&tmp_pkt, pkt);
744 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
745 return;
746 }
747
748 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
749 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
750 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
751
752 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
753 int i;
754 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
755 NULL);
756 ost->quality = sd ? AV_RL32(sd) : -1;
757 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
758
759 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
760 if (sd && i < sd[5])
761 ost->error[i] = AV_RL64(sd + 8 + 8*i);
762 else
763 ost->error[i] = -1;
764 }
765
766 if (ost->frame_rate.num && ost->is_cfr) {
767 if (pkt->duration > 0)
768 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
769 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
770 ost->mux_timebase);
771 }
772 }
773
774 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
775
776 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
777 if (pkt->dts != AV_NOPTS_VALUE &&
778 pkt->pts != AV_NOPTS_VALUE &&
779 pkt->dts > pkt->pts) {
780 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
781 pkt->dts, pkt->pts,
782 ost->file_index, ost->st->index);
783 pkt->pts =
784 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
785 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
786 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
787 }
788 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
789 pkt->dts != AV_NOPTS_VALUE &&
790 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
791 ost->last_mux_dts != AV_NOPTS_VALUE) {
792 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
793 if (pkt->dts < max) {
794 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
795 if (exit_on_error)
796 loglevel = AV_LOG_ERROR;
797 av_log(s, loglevel, "Non-monotonous DTS in output stream "
798 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
799 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
800 if (exit_on_error) {
801 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
802 exit_program(1);
803 }
804 av_log(s, loglevel, "changing to %"PRId64". This may result "
805 "in incorrect timestamps in the output file.\n",
806 max);
807 if (pkt->pts >= pkt->dts)
808 pkt->pts = FFMAX(pkt->pts, max);
809 pkt->dts = max;
810 }
811 }
812 }
813 ost->last_mux_dts = pkt->dts;
814
815 ost->data_size += pkt->size;
816 ost->packets_written++;
817
818 pkt->stream_index = ost->index;
819
820 if (debug_ts) {
821 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
822 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
823 av_get_media_type_string(ost->enc_ctx->codec_type),
824 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
825 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
826 pkt->size
827 );
828 }
829
830 ret = av_interleaved_write_frame(s, pkt);
831 if (ret < 0) {
832 print_error("av_interleaved_write_frame()", ret);
833 main_return_code = 1;
834 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
835 }
836 av_packet_unref(pkt);
837 }
838
close_output_stream(OutputStream * ost)839 static void close_output_stream(OutputStream *ost)
840 {
841 OutputFile *of = output_files[ost->file_index];
842
843 ost->finished |= ENCODER_FINISHED;
844 if (of->shortest) {
845 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
846 of->recording_time = FFMIN(of->recording_time, end);
847 }
848 }
849
850 /*
851 * Send a single packet to the output, applying any bitstream filters
852 * associated with the output stream. This may result in any number
853 * of packets actually being written, depending on what bitstream
854 * filters are applied. The supplied packet is consumed and will be
855 * blank (as if newly-allocated) when this function returns.
856 *
857 * If eof is set, instead indicate EOF to all bitstream filters and
858 * therefore flush any delayed packets to the output. A blank packet
859 * must be supplied in this case.
860 */
output_packet(OutputFile * of,AVPacket * pkt,OutputStream * ost,int eof)861 static void output_packet(OutputFile *of, AVPacket *pkt,
862 OutputStream *ost, int eof)
863 {
864 int ret = 0;
865
866 /* apply the output bitstream filters */
867 if (ost->bsf_ctx) {
868 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
869 if (ret < 0)
870 goto finish;
871 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
872 write_packet(of, pkt, ost, 0);
873 if (ret == AVERROR(EAGAIN))
874 ret = 0;
875 } else if (!eof)
876 write_packet(of, pkt, ost, 0);
877
878 finish:
879 if (ret < 0 && ret != AVERROR_EOF) {
880 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882 if(exit_on_error)
883 exit_program(1);
884 }
885 }
886
check_recording_time(OutputStream * ost)887 static int check_recording_time(OutputStream *ost)
888 {
889 OutputFile *of = output_files[ost->file_index];
890
891 if (of->recording_time != INT64_MAX &&
892 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
893 AV_TIME_BASE_Q) >= 0) {
894 close_output_stream(ost);
895 return 0;
896 }
897 return 1;
898 }
899
do_audio_out(OutputFile * of,OutputStream * ost,AVFrame * frame)900 static void do_audio_out(OutputFile *of, OutputStream *ost,
901 AVFrame *frame)
902 {
903 AVCodecContext *enc = ost->enc_ctx;
904 AVPacket pkt;
905 int ret;
906
907 av_init_packet(&pkt);
908 pkt.data = NULL;
909 pkt.size = 0;
910
911 if (!check_recording_time(ost))
912 return;
913
914 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915 frame->pts = ost->sync_opts;
916 ost->sync_opts = frame->pts + frame->nb_samples;
917 ost->samples_encoded += frame->nb_samples;
918 ost->frames_encoded++;
919
920 av_assert0(pkt.size || !pkt.data);
921 update_benchmark(NULL);
922 if (debug_ts) {
923 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926 enc->time_base.num, enc->time_base.den);
927 }
928
929 ret = avcodec_send_frame(enc, frame);
930 if (ret < 0)
931 goto error;
932
933 while (1) {
934 ret = avcodec_receive_packet(enc, &pkt);
935 if (ret == AVERROR(EAGAIN))
936 break;
937 if (ret < 0)
938 goto error;
939
940 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941
942 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
943
944 if (debug_ts) {
945 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
947 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
948 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
949 }
950
951 output_packet(of, &pkt, ost, 0);
952 }
953
954 return;
955 error:
956 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
957 exit_program(1);
958 }
959
do_subtitle_out(OutputFile * of,OutputStream * ost,AVSubtitle * sub)960 static void do_subtitle_out(OutputFile *of,
961 OutputStream *ost,
962 AVSubtitle *sub)
963 {
964 int subtitle_out_max_size = 1024 * 1024;
965 int subtitle_out_size, nb, i;
966 AVCodecContext *enc;
967 AVPacket pkt;
968 int64_t pts;
969
970 if (sub->pts == AV_NOPTS_VALUE) {
971 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972 if (exit_on_error)
973 exit_program(1);
974 return;
975 }
976
977 enc = ost->enc_ctx;
978
979 if (!subtitle_out) {
980 subtitle_out = av_malloc(subtitle_out_max_size);
981 if (!subtitle_out) {
982 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
983 exit_program(1);
984 }
985 }
986
987 /* Note: DVB subtitle need one packet to draw them and one other
988 packet to clear them */
989 /* XXX: signal it in the codec context ? */
990 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
991 nb = 2;
992 else
993 nb = 1;
994
995 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996 pts = sub->pts;
997 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998 pts -= output_files[ost->file_index]->start_time;
999 for (i = 0; i < nb; i++) {
1000 unsigned save_num_rects = sub->num_rects;
1001
1002 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003 if (!check_recording_time(ost))
1004 return;
1005
1006 sub->pts = pts;
1007 // start_display_time is required to be 0
1008 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009 sub->end_display_time -= sub->start_display_time;
1010 sub->start_display_time = 0;
1011 if (i == 1)
1012 sub->num_rects = 0;
1013
1014 ost->frames_encoded++;
1015
1016 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017 subtitle_out_max_size, sub);
1018 if (i == 1)
1019 sub->num_rects = save_num_rects;
1020 if (subtitle_out_size < 0) {
1021 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1022 exit_program(1);
1023 }
1024
1025 av_init_packet(&pkt);
1026 pkt.data = subtitle_out;
1027 pkt.size = subtitle_out_size;
1028 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031 /* XXX: the pts correction is handled here. Maybe handling
1032 it in the codec would be better */
1033 if (i == 0)
1034 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035 else
1036 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037 }
1038 pkt.dts = pkt.pts;
1039 output_packet(of, &pkt, ost, 0);
1040 }
1041 }
1042
do_video_out(OutputFile * of,OutputStream * ost,AVFrame * next_picture,double sync_ipts)1043 static void do_video_out(OutputFile *of,
1044 OutputStream *ost,
1045 AVFrame *next_picture,
1046 double sync_ipts)
1047 {
1048 int ret, format_video_sync;
1049 AVPacket pkt;
1050 AVCodecContext *enc = ost->enc_ctx;
1051 AVCodecParameters *mux_par = ost->st->codecpar;
1052 AVRational frame_rate;
1053 int nb_frames, nb0_frames, i;
1054 double delta, delta0;
1055 double duration = 0;
1056 int frame_size = 0;
1057 InputStream *ist = NULL;
1058 AVFilterContext *filter = ost->filter->filter;
1059
1060 if (ost->source_index >= 0)
1061 ist = input_streams[ost->source_index];
1062
1063 frame_rate = av_buffersink_get_frame_rate(filter);
1064 if (frame_rate.num > 0 && frame_rate.den > 0)
1065 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066
1067 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069
1070 if (!ost->filters_script &&
1071 !ost->filters &&
1072 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1073 next_picture &&
1074 ist &&
1075 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1076 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1077 }
1078
1079 if (!next_picture) {
1080 //end, flushing
1081 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1082 ost->last_nb0_frames[1],
1083 ost->last_nb0_frames[2]);
1084 } else {
1085 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1086 delta = delta0 + duration;
1087
1088 /* by default, we output a single frame */
1089 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1090 nb_frames = 1;
1091
1092 format_video_sync = video_sync_method;
1093 if (format_video_sync == VSYNC_AUTO) {
1094 if(!strcmp(of->ctx->oformat->name, "avi")) {
1095 format_video_sync = VSYNC_VFR;
1096 } else
1097 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1098 if ( ist
1099 && format_video_sync == VSYNC_CFR
1100 && input_files[ist->file_index]->ctx->nb_streams == 1
1101 && input_files[ist->file_index]->input_ts_offset == 0) {
1102 format_video_sync = VSYNC_VSCFR;
1103 }
1104 if (format_video_sync == VSYNC_CFR && copy_ts) {
1105 format_video_sync = VSYNC_VSCFR;
1106 }
1107 }
1108 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1109
1110 if (delta0 < 0 &&
1111 delta > 0 &&
1112 format_video_sync != VSYNC_PASSTHROUGH &&
1113 format_video_sync != VSYNC_DROP) {
1114 if (delta0 < -0.6) {
1115 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1116 } else
1117 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1118 sync_ipts = ost->sync_opts;
1119 duration += delta0;
1120 delta0 = 0;
1121 }
1122
1123 switch (format_video_sync) {
1124 case VSYNC_VSCFR:
1125 if (ost->frame_number == 0 && delta0 >= 0.5) {
1126 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1127 delta = duration;
1128 delta0 = 0;
1129 ost->sync_opts = llrint(sync_ipts);
1130 }
1131 case VSYNC_CFR:
1132 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1133 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1134 nb_frames = 0;
1135 } else if (delta < -1.1)
1136 nb_frames = 0;
1137 else if (delta > 1.1) {
1138 nb_frames = lrintf(delta);
1139 if (delta0 > 1.1)
1140 nb0_frames = llrintf(delta0 - 0.6);
1141 }
1142 break;
1143 case VSYNC_VFR:
1144 if (delta <= -0.6)
1145 nb_frames = 0;
1146 else if (delta > 0.6)
1147 ost->sync_opts = llrint(sync_ipts);
1148 break;
1149 case VSYNC_DROP:
1150 case VSYNC_PASSTHROUGH:
1151 ost->sync_opts = llrint(sync_ipts);
1152 break;
1153 default:
1154 av_assert0(0);
1155 }
1156 }
1157
1158 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1159 nb0_frames = FFMIN(nb0_frames, nb_frames);
1160
1161 memmove(ost->last_nb0_frames + 1,
1162 ost->last_nb0_frames,
1163 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1164 ost->last_nb0_frames[0] = nb0_frames;
1165
1166 if (nb0_frames == 0 && ost->last_dropped) {
1167 nb_frames_drop++;
1168 av_log(NULL, AV_LOG_VERBOSE,
1169 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1170 ost->frame_number, ost->st->index, ost->last_frame->pts);
1171 }
1172 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1173 if (nb_frames > dts_error_threshold * 30) {
1174 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1175 nb_frames_drop++;
1176 return;
1177 }
1178 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1179 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1180 if (nb_frames_dup > dup_warning) {
1181 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1182 dup_warning *= 10;
1183 }
1184 }
1185 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1186
1187 /* duplicates frame if needed */
1188 for (i = 0; i < nb_frames; i++) {
1189 AVFrame *in_picture;
1190 int forced_keyframe = 0;
1191 double pts_time;
1192 av_init_packet(&pkt);
1193 pkt.data = NULL;
1194 pkt.size = 0;
1195
1196 if (i < nb0_frames && ost->last_frame) {
1197 in_picture = ost->last_frame;
1198 } else
1199 in_picture = next_picture;
1200
1201 if (!in_picture)
1202 return;
1203
1204 in_picture->pts = ost->sync_opts;
1205
1206 if (!check_recording_time(ost))
1207 return;
1208
1209 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1210 ost->top_field_first >= 0)
1211 in_picture->top_field_first = !!ost->top_field_first;
1212
1213 if (in_picture->interlaced_frame) {
1214 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1215 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1216 else
1217 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1218 } else
1219 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1220
1221 in_picture->quality = enc->global_quality;
1222 in_picture->pict_type = 0;
1223
1224 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1225 in_picture->pts != AV_NOPTS_VALUE)
1226 ost->forced_kf_ref_pts = in_picture->pts;
1227
1228 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1230 if (ost->forced_kf_index < ost->forced_kf_count &&
1231 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232 ost->forced_kf_index++;
1233 forced_keyframe = 1;
1234 } else if (ost->forced_keyframes_pexpr) {
1235 double res;
1236 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1237 res = av_expr_eval(ost->forced_keyframes_pexpr,
1238 ost->forced_keyframes_expr_const_values, NULL);
1239 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1240 ost->forced_keyframes_expr_const_values[FKF_N],
1241 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1242 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1243 ost->forced_keyframes_expr_const_values[FKF_T],
1244 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1245 res);
1246 if (res) {
1247 forced_keyframe = 1;
1248 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1249 ost->forced_keyframes_expr_const_values[FKF_N];
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1251 ost->forced_keyframes_expr_const_values[FKF_T];
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1253 }
1254
1255 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1256 } else if ( ost->forced_keyframes
1257 && !strncmp(ost->forced_keyframes, "source", 6)
1258 && in_picture->key_frame==1
1259 && !i) {
1260 forced_keyframe = 1;
1261 }
1262
1263 if (forced_keyframe) {
1264 in_picture->pict_type = AV_PICTURE_TYPE_I;
1265 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1266 }
1267
1268 update_benchmark(NULL);
1269 if (debug_ts) {
1270 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273 enc->time_base.num, enc->time_base.den);
1274 }
1275
1276 ost->frames_encoded++;
1277
1278 ret = avcodec_send_frame(enc, in_picture);
1279 if (ret < 0)
1280 goto error;
1281 // Make sure Closed Captions will not be duplicated
1282 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1283
1284 while (1) {
1285 ret = avcodec_receive_packet(enc, &pkt);
1286 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287 if (ret == AVERROR(EAGAIN))
1288 break;
1289 if (ret < 0)
1290 goto error;
1291
1292 if (debug_ts) {
1293 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1297 }
1298
1299 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300 pkt.pts = ost->sync_opts;
1301
1302 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1303
1304 if (debug_ts) {
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1309 }
1310
1311 frame_size = pkt.size;
1312 output_packet(of, &pkt, ost, 0);
1313
1314 /* if two pass, output log */
1315 if (ost->logfile && enc->stats_out) {
1316 fprintf(ost->logfile, "%s", enc->stats_out);
1317 }
1318 }
1319 ost->sync_opts++;
1320 /*
1321 * For video, number of frames in == number of packets out.
1322 * But there may be reordering, so we can't throw away frames on encoder
1323 * flush, we need to limit them here, before they go into encoder.
1324 */
1325 ost->frame_number++;
1326
1327 if (vstats_filename && frame_size)
1328 do_video_stats(ost, frame_size);
1329 }
1330
1331 if (!ost->last_frame)
1332 ost->last_frame = av_frame_alloc();
1333 av_frame_unref(ost->last_frame);
1334 if (next_picture && ost->last_frame)
1335 av_frame_ref(ost->last_frame, next_picture);
1336 else
1337 av_frame_free(&ost->last_frame);
1338
1339 return;
1340 error:
1341 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1342 exit_program(1);
1343 }
1344
psnr(double d)1345 static double psnr(double d)
1346 {
1347 return -10.0 * log10(d);
1348 }
1349
do_video_stats(OutputStream * ost,int frame_size)1350 static void do_video_stats(OutputStream *ost, int frame_size)
1351 {
1352 AVCodecContext *enc;
1353 int frame_number;
1354 double ti1, bitrate, avg_bitrate;
1355
1356 /* this is executed just the first time do_video_stats is called */
1357 if (!vstats_file) {
1358 vstats_file = fopen(vstats_filename, "w");
1359 if (!vstats_file) {
1360 perror("fopen");
1361 exit_program(1);
1362 }
1363 }
1364
1365 enc = ost->enc_ctx;
1366 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1367 frame_number = ost->st->nb_frames;
1368 if (vstats_version <= 1) {
1369 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1370 ost->quality / (float)FF_QP2LAMBDA);
1371 } else {
1372 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1373 ost->quality / (float)FF_QP2LAMBDA);
1374 }
1375
1376 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1377 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1378
1379 fprintf(vstats_file,"f_size= %6d ", frame_size);
1380 /* compute pts value */
1381 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1382 if (ti1 < 0.01)
1383 ti1 = 0.01;
1384
1385 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1386 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1387 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1388 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1389 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1390 }
1391 }
1392
1393 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1394
finish_output_stream(OutputStream * ost)1395 static void finish_output_stream(OutputStream *ost)
1396 {
1397 OutputFile *of = output_files[ost->file_index];
1398 int i;
1399
1400 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1401
1402 if (of->shortest) {
1403 for (i = 0; i < of->ctx->nb_streams; i++)
1404 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1405 }
1406 }
1407
1408 /**
1409 * Get and encode new output from any of the filtergraphs, without causing
1410 * activity.
1411 *
1412 * @return 0 for success, <0 for severe errors
1413 */
reap_filters(int flush)1414 static int reap_filters(int flush)
1415 {
1416 AVFrame *filtered_frame = NULL;
1417 int i;
1418
1419 /* Reap all buffers present in the buffer sinks */
1420 for (i = 0; i < nb_output_streams; i++) {
1421 OutputStream *ost = output_streams[i];
1422 OutputFile *of = output_files[ost->file_index];
1423 AVFilterContext *filter;
1424 AVCodecContext *enc = ost->enc_ctx;
1425 int ret = 0;
1426
1427 if (!ost->filter || !ost->filter->graph->graph)
1428 continue;
1429 filter = ost->filter->filter;
1430
1431 if (!ost->initialized) {
1432 char error[1024] = "";
1433 ret = init_output_stream(ost, error, sizeof(error));
1434 if (ret < 0) {
1435 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1436 ost->file_index, ost->index, error);
1437 exit_program(1);
1438 }
1439 }
1440
1441 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1442 return AVERROR(ENOMEM);
1443 }
1444 filtered_frame = ost->filtered_frame;
1445
1446 while (1) {
1447 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1448 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1449 AV_BUFFERSINK_FLAG_NO_REQUEST);
1450 if (ret < 0) {
1451 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1452 av_log(NULL, AV_LOG_WARNING,
1453 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1454 } else if (flush && ret == AVERROR_EOF) {
1455 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1456 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1457 }
1458 break;
1459 }
1460 if (ost->finished) {
1461 av_frame_unref(filtered_frame);
1462 continue;
1463 }
1464 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1465 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1466 AVRational filter_tb = av_buffersink_get_time_base(filter);
1467 AVRational tb = enc->time_base;
1468 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1469
1470 tb.den <<= extra_bits;
1471 float_pts =
1472 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1473 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1474 float_pts /= 1 << extra_bits;
1475 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1476 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1477
1478 filtered_frame->pts =
1479 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1480 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1481 }
1482
1483 switch (av_buffersink_get_type(filter)) {
1484 case AVMEDIA_TYPE_VIDEO:
1485 if (!ost->frame_aspect_ratio.num)
1486 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1487
1488 if (debug_ts) {
1489 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1490 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1491 float_pts,
1492 enc->time_base.num, enc->time_base.den);
1493 }
1494
1495 do_video_out(of, ost, filtered_frame, float_pts);
1496 break;
1497 case AVMEDIA_TYPE_AUDIO:
1498 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1499 enc->channels != filtered_frame->channels) {
1500 av_log(NULL, AV_LOG_ERROR,
1501 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1502 break;
1503 }
1504 do_audio_out(of, ost, filtered_frame);
1505 break;
1506 default:
1507 // TODO support subtitle filters
1508 av_assert0(0);
1509 }
1510
1511 av_frame_unref(filtered_frame);
1512 }
1513 }
1514
1515 return 0;
1516 }
1517
print_final_stats(int64_t total_size)1518 static void print_final_stats(int64_t total_size)
1519 {
1520 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1521 uint64_t subtitle_size = 0;
1522 uint64_t data_size = 0;
1523 float percent = -1.0;
1524 int i, j;
1525 int pass1_used = 1;
1526
1527 for (i = 0; i < nb_output_streams; i++) {
1528 OutputStream *ost = output_streams[i];
1529 switch (ost->enc_ctx->codec_type) {
1530 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1531 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1532 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1533 default: other_size += ost->data_size; break;
1534 }
1535 extra_size += ost->enc_ctx->extradata_size;
1536 data_size += ost->data_size;
1537 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1538 != AV_CODEC_FLAG_PASS1)
1539 pass1_used = 0;
1540 }
1541
1542 if (data_size && total_size>0 && total_size >= data_size)
1543 percent = 100.0 * (total_size - data_size) / data_size;
1544
1545 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1546 video_size / 1024.0,
1547 audio_size / 1024.0,
1548 subtitle_size / 1024.0,
1549 other_size / 1024.0,
1550 extra_size / 1024.0);
1551 if (percent >= 0.0)
1552 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1553 else
1554 av_log(NULL, AV_LOG_INFO, "unknown");
1555 av_log(NULL, AV_LOG_INFO, "\n");
1556
1557 /* print verbose per-stream stats */
1558 for (i = 0; i < nb_input_files; i++) {
1559 InputFile *f = input_files[i];
1560 uint64_t total_packets = 0, total_size = 0;
1561
1562 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1563 i, f->ctx->url);
1564
1565 for (j = 0; j < f->nb_streams; j++) {
1566 InputStream *ist = input_streams[f->ist_index + j];
1567 enum AVMediaType type = ist->dec_ctx->codec_type;
1568
1569 total_size += ist->data_size;
1570 total_packets += ist->nb_packets;
1571
1572 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1573 i, j, media_type_string(type));
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1575 ist->nb_packets, ist->data_size);
1576
1577 if (ist->decoding_needed) {
1578 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1579 ist->frames_decoded);
1580 if (type == AVMEDIA_TYPE_AUDIO)
1581 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1582 av_log(NULL, AV_LOG_VERBOSE, "; ");
1583 }
1584
1585 av_log(NULL, AV_LOG_VERBOSE, "\n");
1586 }
1587
1588 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1589 total_packets, total_size);
1590 }
1591
1592 for (i = 0; i < nb_output_files; i++) {
1593 OutputFile *of = output_files[i];
1594 uint64_t total_packets = 0, total_size = 0;
1595
1596 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1597 i, of->ctx->url);
1598
1599 for (j = 0; j < of->ctx->nb_streams; j++) {
1600 OutputStream *ost = output_streams[of->ost_index + j];
1601 enum AVMediaType type = ost->enc_ctx->codec_type;
1602
1603 total_size += ost->data_size;
1604 total_packets += ost->packets_written;
1605
1606 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1607 i, j, media_type_string(type));
1608 if (ost->encoding_needed) {
1609 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1610 ost->frames_encoded);
1611 if (type == AVMEDIA_TYPE_AUDIO)
1612 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1613 av_log(NULL, AV_LOG_VERBOSE, "; ");
1614 }
1615
1616 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1617 ost->packets_written, ost->data_size);
1618
1619 av_log(NULL, AV_LOG_VERBOSE, "\n");
1620 }
1621
1622 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1623 total_packets, total_size);
1624 }
1625 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1626 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1627 if (pass1_used) {
1628 av_log(NULL, AV_LOG_WARNING, "\n");
1629 } else {
1630 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1631 }
1632 }
1633 }
1634
print_report(int is_last_report,int64_t timer_start,int64_t cur_time)1635 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1636 {
1637 AVBPrint buf, buf_script;
1638 OutputStream *ost;
1639 AVFormatContext *oc;
1640 int64_t total_size;
1641 AVCodecContext *enc;
1642 int frame_number, vid, i;
1643 double bitrate;
1644 double speed;
1645 int64_t pts = INT64_MIN + 1;
1646 static int64_t last_time = -1;
1647 static int qp_histogram[52];
1648 int hours, mins, secs, us;
1649 const char *hours_sign;
1650 int ret;
1651 float t;
1652
1653 if (!print_stats && !is_last_report && !progress_avio)
1654 return;
1655
1656 if (!is_last_report) {
1657 if (last_time == -1) {
1658 last_time = cur_time;
1659 return;
1660 }
1661 if ((cur_time - last_time) < 500000)
1662 return;
1663 last_time = cur_time;
1664 }
1665
1666 t = (cur_time-timer_start) / 1000000.0;
1667
1668
1669 oc = output_files[0]->ctx;
1670
1671 total_size = avio_size(oc->pb);
1672 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1673 total_size = avio_tell(oc->pb);
1674
1675 vid = 0;
1676 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1677 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1678 for (i = 0; i < nb_output_streams; i++) {
1679 float q = -1;
1680 ost = output_streams[i];
1681 enc = ost->enc_ctx;
1682 if (!ost->stream_copy)
1683 q = ost->quality / (float) FF_QP2LAMBDA;
1684
1685 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686 av_bprintf(&buf, "q=%2.1f ", q);
1687 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1688 ost->file_index, ost->index, q);
1689 }
1690 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1691 float fps;
1692
1693 frame_number = ost->frame_number;
1694 fps = t > 1 ? frame_number / t : 0;
1695 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1696 frame_number, fps < 9.95, fps, q);
1697 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1698 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1701 if (is_last_report)
1702 av_bprintf(&buf, "L");
1703 if (qp_hist) {
1704 int j;
1705 int qp = lrintf(q);
1706 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1707 qp_histogram[qp]++;
1708 for (j = 0; j < 32; j++)
1709 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1710 }
1711
1712 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1713 int j;
1714 double error, error_sum = 0;
1715 double scale, scale_sum = 0;
1716 double p;
1717 char type[3] = { 'Y','U','V' };
1718 av_bprintf(&buf, "PSNR=");
1719 for (j = 0; j < 3; j++) {
1720 if (is_last_report) {
1721 error = enc->error[j];
1722 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1723 } else {
1724 error = ost->error[j];
1725 scale = enc->width * enc->height * 255.0 * 255.0;
1726 }
1727 if (j)
1728 scale /= 4;
1729 error_sum += error;
1730 scale_sum += scale;
1731 p = psnr(error / scale);
1732 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1733 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1734 ost->file_index, ost->index, type[j] | 32, p);
1735 }
1736 p = psnr(error_sum / scale_sum);
1737 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1738 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1739 ost->file_index, ost->index, p);
1740 }
1741 vid = 1;
1742 }
1743 /* compute min output value */
1744 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1745 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1746 ost->st->time_base, AV_TIME_BASE_Q));
1747 if (is_last_report)
1748 nb_frames_drop += ost->last_dropped;
1749 }
1750
1751 secs = FFABS(pts) / AV_TIME_BASE;
1752 us = FFABS(pts) % AV_TIME_BASE;
1753 mins = secs / 60;
1754 secs %= 60;
1755 hours = mins / 60;
1756 mins %= 60;
1757 hours_sign = (pts < 0) ? "-" : "";
1758
1759 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1760 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1761
1762 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1763 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1764 if (pts == AV_NOPTS_VALUE) {
1765 av_bprintf(&buf, "N/A ");
1766 } else {
1767 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1768 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1769 }
1770
1771 if (bitrate < 0) {
1772 av_bprintf(&buf, "bitrate=N/A");
1773 av_bprintf(&buf_script, "bitrate=N/A\n");
1774 }else{
1775 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1776 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1777 }
1778
1779 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1780 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1781 if (pts == AV_NOPTS_VALUE) {
1782 av_bprintf(&buf_script, "out_time_us=N/A\n");
1783 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1784 av_bprintf(&buf_script, "out_time=N/A\n");
1785 } else {
1786 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1787 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1788 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1789 hours_sign, hours, mins, secs, us);
1790 }
1791
1792 if (nb_frames_dup || nb_frames_drop)
1793 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1794 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1795 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1796
1797 if (speed < 0) {
1798 av_bprintf(&buf, " speed=N/A");
1799 av_bprintf(&buf_script, "speed=N/A\n");
1800 } else {
1801 av_bprintf(&buf, " speed=%4.3gx", speed);
1802 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1803 }
1804
1805 if (print_stats || is_last_report) {
1806 const char end = is_last_report ? '\n' : '\r';
1807 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1808 fprintf(stderr, "%s %c", buf.str, end);
1809 } else
1810 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1811
1812 fflush(stderr);
1813 }
1814 av_bprint_finalize(&buf, NULL);
1815
1816 if (progress_avio) {
1817 av_bprintf(&buf_script, "progress=%s\n",
1818 is_last_report ? "end" : "continue");
1819 avio_write(progress_avio, buf_script.str,
1820 FFMIN(buf_script.len, buf_script.size - 1));
1821 avio_flush(progress_avio);
1822 av_bprint_finalize(&buf_script, NULL);
1823 if (is_last_report) {
1824 if ((ret = avio_closep(&progress_avio)) < 0)
1825 av_log(NULL, AV_LOG_ERROR,
1826 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1827 }
1828 }
1829
1830 if (is_last_report)
1831 print_final_stats(total_size);
1832 }
1833
ifilter_parameters_from_codecpar(InputFilter * ifilter,AVCodecParameters * par)1834 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1835 {
1836 // We never got any input. Set a fake format, which will
1837 // come from libavformat.
1838 ifilter->format = par->format;
1839 ifilter->sample_rate = par->sample_rate;
1840 ifilter->channels = par->channels;
1841 ifilter->channel_layout = par->channel_layout;
1842 ifilter->width = par->width;
1843 ifilter->height = par->height;
1844 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1845 }
1846
flush_encoders(void)1847 static void flush_encoders(void)
1848 {
1849 int i, ret;
1850
1851 for (i = 0; i < nb_output_streams; i++) {
1852 OutputStream *ost = output_streams[i];
1853 AVCodecContext *enc = ost->enc_ctx;
1854 OutputFile *of = output_files[ost->file_index];
1855
1856 if (!ost->encoding_needed)
1857 continue;
1858
1859 // Try to enable encoding with no input frames.
1860 // Maybe we should just let encoding fail instead.
1861 if (!ost->initialized) {
1862 FilterGraph *fg = ost->filter->graph;
1863 char error[1024] = "";
1864
1865 av_log(NULL, AV_LOG_WARNING,
1866 "Finishing stream %d:%d without any data written to it.\n",
1867 ost->file_index, ost->st->index);
1868
1869 if (ost->filter && !fg->graph) {
1870 int x;
1871 for (x = 0; x < fg->nb_inputs; x++) {
1872 InputFilter *ifilter = fg->inputs[x];
1873 if (ifilter->format < 0)
1874 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1875 }
1876
1877 if (!ifilter_has_all_input_formats(fg))
1878 continue;
1879
1880 ret = configure_filtergraph(fg);
1881 if (ret < 0) {
1882 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1883 exit_program(1);
1884 }
1885
1886 finish_output_stream(ost);
1887 }
1888
1889 ret = init_output_stream(ost, error, sizeof(error));
1890 if (ret < 0) {
1891 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1892 ost->file_index, ost->index, error);
1893 exit_program(1);
1894 }
1895 }
1896
1897 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1898 continue;
1899
1900 for (;;) {
1901 const char *desc = NULL;
1902 AVPacket pkt;
1903 int pkt_size;
1904
1905 switch (enc->codec_type) {
1906 case AVMEDIA_TYPE_AUDIO:
1907 desc = "audio";
1908 break;
1909 case AVMEDIA_TYPE_VIDEO:
1910 desc = "video";
1911 break;
1912 default:
1913 av_assert0(0);
1914 }
1915
1916 av_init_packet(&pkt);
1917 pkt.data = NULL;
1918 pkt.size = 0;
1919
1920 update_benchmark(NULL);
1921
1922 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1923 ret = avcodec_send_frame(enc, NULL);
1924 if (ret < 0) {
1925 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1926 desc,
1927 av_err2str(ret));
1928 exit_program(1);
1929 }
1930 }
1931
1932 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1933 if (ret < 0 && ret != AVERROR_EOF) {
1934 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935 desc,
1936 av_err2str(ret));
1937 exit_program(1);
1938 }
1939 if (ost->logfile && enc->stats_out) {
1940 fprintf(ost->logfile, "%s", enc->stats_out);
1941 }
1942 if (ret == AVERROR_EOF) {
1943 output_packet(of, &pkt, ost, 1);
1944 break;
1945 }
1946 if (ost->finished & MUXER_FINISHED) {
1947 av_packet_unref(&pkt);
1948 continue;
1949 }
1950 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1951 pkt_size = pkt.size;
1952 output_packet(of, &pkt, ost, 0);
1953 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1954 do_video_stats(ost, pkt_size);
1955 }
1956 }
1957 }
1958 }
1959
1960 /*
1961 * Check whether a packet from ist should be written into ost at this time
1962 */
check_output_constraints(InputStream * ist,OutputStream * ost)1963 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1964 {
1965 OutputFile *of = output_files[ost->file_index];
1966 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1967
1968 if (ost->source_index != ist_index)
1969 return 0;
1970
1971 if (ost->finished)
1972 return 0;
1973
1974 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1975 return 0;
1976
1977 return 1;
1978 }
1979
do_streamcopy(InputStream * ist,OutputStream * ost,const AVPacket * pkt)1980 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1981 {
1982 OutputFile *of = output_files[ost->file_index];
1983 InputFile *f = input_files [ist->file_index];
1984 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1985 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1986 AVPacket opkt;
1987
1988 // EOF: flush output bitstream filters.
1989 if (!pkt) {
1990 av_init_packet(&opkt);
1991 opkt.data = NULL;
1992 opkt.size = 0;
1993 output_packet(of, &opkt, ost, 1);
1994 return;
1995 }
1996
1997 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1998 !ost->copy_initial_nonkeyframes)
1999 return;
2000
2001 if (!ost->frame_number && !ost->copy_prior_start) {
2002 int64_t comp_start = start_time;
2003 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2004 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2005 if (pkt->pts == AV_NOPTS_VALUE ?
2006 ist->pts < comp_start :
2007 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2008 return;
2009 }
2010
2011 if (of->recording_time != INT64_MAX &&
2012 ist->pts >= of->recording_time + start_time) {
2013 close_output_stream(ost);
2014 return;
2015 }
2016
2017 if (f->recording_time != INT64_MAX) {
2018 start_time = f->ctx->start_time;
2019 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2020 start_time += f->start_time;
2021 if (ist->pts >= f->recording_time + start_time) {
2022 close_output_stream(ost);
2023 return;
2024 }
2025 }
2026
2027 /* force the input stream PTS */
2028 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2029 ost->sync_opts++;
2030
2031 if (av_packet_ref(&opkt, pkt) < 0)
2032 exit_program(1);
2033
2034 if (pkt->pts != AV_NOPTS_VALUE)
2035 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036
2037 if (pkt->dts == AV_NOPTS_VALUE) {
2038 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2040 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2041 if(!duration)
2042 duration = ist->dec_ctx->frame_size;
2043 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2044 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2045 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2046 /* dts will be set immediately afterwards to what pts is now */
2047 opkt.pts = opkt.dts - ost_tb_start_time;
2048 } else
2049 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2050 opkt.dts -= ost_tb_start_time;
2051
2052 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053
2054 output_packet(of, &opkt, ost, 0);
2055 }
2056
guess_input_channel_layout(InputStream * ist)2057 int guess_input_channel_layout(InputStream *ist)
2058 {
2059 AVCodecContext *dec = ist->dec_ctx;
2060
2061 if (!dec->channel_layout) {
2062 char layout_name[256];
2063
2064 if (dec->channels > ist->guess_layout_max)
2065 return 0;
2066 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2067 if (!dec->channel_layout)
2068 return 0;
2069 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2070 dec->channels, dec->channel_layout);
2071 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2072 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2073 }
2074 return 1;
2075 }
2076
check_decode_result(InputStream * ist,int * got_output,int ret)2077 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2078 {
2079 if (*got_output || ret<0)
2080 decode_error_stat[ret<0] ++;
2081
2082 if (ret < 0 && exit_on_error)
2083 exit_program(1);
2084
2085 if (*got_output && ist) {
2086 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2087 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2088 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2089 if (exit_on_error)
2090 exit_program(1);
2091 }
2092 }
2093 }
2094
2095 // Filters can be configured only if the formats of all inputs are known.
ifilter_has_all_input_formats(FilterGraph * fg)2096 static int ifilter_has_all_input_formats(FilterGraph *fg)
2097 {
2098 int i;
2099 for (i = 0; i < fg->nb_inputs; i++) {
2100 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2101 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2102 return 0;
2103 }
2104 return 1;
2105 }
2106
ifilter_send_frame(InputFilter * ifilter,AVFrame * frame)2107 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2108 {
2109 FilterGraph *fg = ifilter->graph;
2110 int need_reinit, ret, i;
2111
2112 /* determine if the parameters for this input changed */
2113 need_reinit = ifilter->format != frame->format;
2114
2115 switch (ifilter->ist->st->codecpar->codec_type) {
2116 case AVMEDIA_TYPE_AUDIO:
2117 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2118 ifilter->channels != frame->channels ||
2119 ifilter->channel_layout != frame->channel_layout;
2120 break;
2121 case AVMEDIA_TYPE_VIDEO:
2122 need_reinit |= ifilter->width != frame->width ||
2123 ifilter->height != frame->height;
2124 break;
2125 }
2126
2127 if (!ifilter->ist->reinit_filters && fg->graph)
2128 need_reinit = 0;
2129
2130 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2131 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2132 need_reinit = 1;
2133
2134 if (need_reinit) {
2135 ret = ifilter_parameters_from_frame(ifilter, frame);
2136 if (ret < 0)
2137 return ret;
2138 }
2139
2140 /* (re)init the graph if possible, otherwise buffer the frame and return */
2141 if (need_reinit || !fg->graph) {
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (!ifilter_has_all_input_formats(fg)) {
2144 AVFrame *tmp = av_frame_clone(frame);
2145 if (!tmp)
2146 return AVERROR(ENOMEM);
2147 av_frame_unref(frame);
2148
2149 if (!av_fifo_space(ifilter->frame_queue)) {
2150 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2151 if (ret < 0) {
2152 av_frame_free(&tmp);
2153 return ret;
2154 }
2155 }
2156 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2157 return 0;
2158 }
2159 }
2160
2161 ret = reap_filters(1);
2162 if (ret < 0 && ret != AVERROR_EOF) {
2163 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2164 return ret;
2165 }
2166
2167 ret = configure_filtergraph(fg);
2168 if (ret < 0) {
2169 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2170 return ret;
2171 }
2172 }
2173
2174 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2175 if (ret < 0) {
2176 if (ret != AVERROR_EOF)
2177 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2178 return ret;
2179 }
2180
2181 return 0;
2182 }
2183
ifilter_send_eof(InputFilter * ifilter,int64_t pts)2184 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2185 {
2186 int ret;
2187
2188 ifilter->eof = 1;
2189
2190 if (ifilter->filter) {
2191 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2192 if (ret < 0)
2193 return ret;
2194 } else {
2195 // the filtergraph was never configured
2196 if (ifilter->format < 0)
2197 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2198 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2199 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2200 return AVERROR_INVALIDDATA;
2201 }
2202 }
2203
2204 return 0;
2205 }
2206
2207 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2208 // There is the following difference: if you got a frame, you must call
2209 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2210 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
decode(AVCodecContext * avctx,AVFrame * frame,int * got_frame,AVPacket * pkt)2211 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2212 {
2213 int ret;
2214
2215 *got_frame = 0;
2216
2217 if (pkt) {
2218 ret = avcodec_send_packet(avctx, pkt);
2219 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2220 // decoded frames with avcodec_receive_frame() until done.
2221 if (ret < 0 && ret != AVERROR_EOF)
2222 return ret;
2223 }
2224
2225 ret = avcodec_receive_frame(avctx, frame);
2226 if (ret < 0 && ret != AVERROR(EAGAIN))
2227 return ret;
2228 if (ret >= 0)
2229 *got_frame = 1;
2230
2231 return 0;
2232 }
2233
send_frame_to_filters(InputStream * ist,AVFrame * decoded_frame)2234 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2235 {
2236 int i, ret;
2237 AVFrame *f;
2238
2239 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2240 for (i = 0; i < ist->nb_filters; i++) {
2241 if (i < ist->nb_filters - 1) {
2242 f = ist->filter_frame;
2243 ret = av_frame_ref(f, decoded_frame);
2244 if (ret < 0)
2245 break;
2246 } else
2247 f = decoded_frame;
2248 ret = ifilter_send_frame(ist->filters[i], f);
2249 if (ret == AVERROR_EOF)
2250 ret = 0; /* ignore */
2251 if (ret < 0) {
2252 av_log(NULL, AV_LOG_ERROR,
2253 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2254 break;
2255 }
2256 }
2257 return ret;
2258 }
2259
decode_audio(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2260 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2261 int *decode_failed)
2262 {
2263 AVFrame *decoded_frame;
2264 AVCodecContext *avctx = ist->dec_ctx;
2265 int ret, err = 0;
2266 AVRational decoded_frame_tb;
2267
2268 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2269 return AVERROR(ENOMEM);
2270 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2271 return AVERROR(ENOMEM);
2272 decoded_frame = ist->decoded_frame;
2273
2274 update_benchmark(NULL);
2275 ret = decode(avctx, decoded_frame, got_output, pkt);
2276 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2277 if (ret < 0)
2278 *decode_failed = 1;
2279
2280 if (ret >= 0 && avctx->sample_rate <= 0) {
2281 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2282 ret = AVERROR_INVALIDDATA;
2283 }
2284
2285 if (ret != AVERROR_EOF)
2286 check_decode_result(ist, got_output, ret);
2287
2288 if (!*got_output || ret < 0)
2289 return ret;
2290
2291 ist->samples_decoded += decoded_frame->nb_samples;
2292 ist->frames_decoded++;
2293
2294 /* increment next_dts to use for the case where the input stream does not
2295 have timestamps or there are multiple frames in the packet */
2296 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2297 avctx->sample_rate;
2298 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2299 avctx->sample_rate;
2300
2301 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2302 decoded_frame_tb = ist->st->time_base;
2303 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2304 decoded_frame->pts = pkt->pts;
2305 decoded_frame_tb = ist->st->time_base;
2306 }else {
2307 decoded_frame->pts = ist->dts;
2308 decoded_frame_tb = AV_TIME_BASE_Q;
2309 }
2310 if (decoded_frame->pts != AV_NOPTS_VALUE)
2311 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2312 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2313 (AVRational){1, avctx->sample_rate});
2314 ist->nb_samples = decoded_frame->nb_samples;
2315 err = send_frame_to_filters(ist, decoded_frame);
2316
2317 av_frame_unref(ist->filter_frame);
2318 av_frame_unref(decoded_frame);
2319 return err < 0 ? err : ret;
2320 }
2321
decode_video(InputStream * ist,AVPacket * pkt,int * got_output,int64_t * duration_pts,int eof,int * decode_failed)2322 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2323 int *decode_failed)
2324 {
2325 AVFrame *decoded_frame;
2326 int i, ret = 0, err = 0;
2327 int64_t best_effort_timestamp;
2328 int64_t dts = AV_NOPTS_VALUE;
2329 AVPacket avpkt;
2330
2331 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2332 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2333 // skip the packet.
2334 if (!eof && pkt && pkt->size == 0)
2335 return 0;
2336
2337 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2338 return AVERROR(ENOMEM);
2339 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2340 return AVERROR(ENOMEM);
2341 decoded_frame = ist->decoded_frame;
2342 if (ist->dts != AV_NOPTS_VALUE)
2343 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2344 if (pkt) {
2345 avpkt = *pkt;
2346 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2347 }
2348
2349 // The old code used to set dts on the drain packet, which does not work
2350 // with the new API anymore.
2351 if (eof) {
2352 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2353 if (!new)
2354 return AVERROR(ENOMEM);
2355 ist->dts_buffer = new;
2356 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2357 }
2358
2359 update_benchmark(NULL);
2360 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2361 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2362 if (ret < 0)
2363 *decode_failed = 1;
2364
2365 // The following line may be required in some cases where there is no parser
2366 // or the parser does not has_b_frames correctly
2367 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2368 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2369 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2370 } else
2371 av_log(ist->dec_ctx, AV_LOG_WARNING,
2372 "video_delay is larger in decoder than demuxer %d > %d.\n"
2373 "If you want to help, upload a sample "
2374 "of this file to https://streams.videolan.org/upload/ "
2375 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2376 ist->dec_ctx->has_b_frames,
2377 ist->st->codecpar->video_delay);
2378 }
2379
2380 if (ret != AVERROR_EOF)
2381 check_decode_result(ist, got_output, ret);
2382
2383 if (*got_output && ret >= 0) {
2384 if (ist->dec_ctx->width != decoded_frame->width ||
2385 ist->dec_ctx->height != decoded_frame->height ||
2386 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2387 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2388 decoded_frame->width,
2389 decoded_frame->height,
2390 decoded_frame->format,
2391 ist->dec_ctx->width,
2392 ist->dec_ctx->height,
2393 ist->dec_ctx->pix_fmt);
2394 }
2395 }
2396
2397 if (!*got_output || ret < 0)
2398 return ret;
2399
2400 if(ist->top_field_first>=0)
2401 decoded_frame->top_field_first = ist->top_field_first;
2402
2403 ist->frames_decoded++;
2404
2405 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2406 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2407 if (err < 0)
2408 goto fail;
2409 }
2410 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2411
2412 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2413 *duration_pts = decoded_frame->pkt_duration;
2414
2415 if (ist->framerate.num)
2416 best_effort_timestamp = ist->cfr_next_pts++;
2417
2418 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2419 best_effort_timestamp = ist->dts_buffer[0];
2420
2421 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2422 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2423 ist->nb_dts_buffer--;
2424 }
2425
2426 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2427 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2428
2429 if (ts != AV_NOPTS_VALUE)
2430 ist->next_pts = ist->pts = ts;
2431 }
2432
2433 if (debug_ts) {
2434 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2435 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2436 ist->st->index, av_ts2str(decoded_frame->pts),
2437 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2438 best_effort_timestamp,
2439 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2440 decoded_frame->key_frame, decoded_frame->pict_type,
2441 ist->st->time_base.num, ist->st->time_base.den);
2442 }
2443
2444 if (ist->st->sample_aspect_ratio.num)
2445 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2446
2447 err = send_frame_to_filters(ist, decoded_frame);
2448
2449 fail:
2450 av_frame_unref(ist->filter_frame);
2451 av_frame_unref(decoded_frame);
2452 return err < 0 ? err : ret;
2453 }
2454
transcode_subtitles(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2455 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2456 int *decode_failed)
2457 {
2458 AVSubtitle subtitle;
2459 int free_sub = 1;
2460 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2461 &subtitle, got_output, pkt);
2462
2463 check_decode_result(NULL, got_output, ret);
2464
2465 if (ret < 0 || !*got_output) {
2466 *decode_failed = 1;
2467 if (!pkt->size)
2468 sub2video_flush(ist);
2469 return ret;
2470 }
2471
2472 if (ist->fix_sub_duration) {
2473 int end = 1;
2474 if (ist->prev_sub.got_output) {
2475 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2476 1000, AV_TIME_BASE);
2477 if (end < ist->prev_sub.subtitle.end_display_time) {
2478 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2479 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2480 ist->prev_sub.subtitle.end_display_time, end,
2481 end <= 0 ? ", dropping it" : "");
2482 ist->prev_sub.subtitle.end_display_time = end;
2483 }
2484 }
2485 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2486 FFSWAP(int, ret, ist->prev_sub.ret);
2487 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2488 if (end <= 0)
2489 goto out;
2490 }
2491
2492 if (!*got_output)
2493 return ret;
2494
2495 if (ist->sub2video.frame) {
2496 sub2video_update(ist, INT64_MIN, &subtitle);
2497 } else if (ist->nb_filters) {
2498 if (!ist->sub2video.sub_queue)
2499 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2500 if (!ist->sub2video.sub_queue)
2501 exit_program(1);
2502 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2503 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2504 if (ret < 0)
2505 exit_program(1);
2506 }
2507 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2508 free_sub = 0;
2509 }
2510
2511 if (!subtitle.num_rects)
2512 goto out;
2513
2514 ist->frames_decoded++;
2515
2516 for (i = 0; i < nb_output_streams; i++) {
2517 OutputStream *ost = output_streams[i];
2518
2519 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2520 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2521 continue;
2522
2523 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2524 }
2525
2526 out:
2527 if (free_sub)
2528 avsubtitle_free(&subtitle);
2529 return ret;
2530 }
2531
send_filter_eof(InputStream * ist)2532 static int send_filter_eof(InputStream *ist)
2533 {
2534 int i, ret;
2535 /* TODO keep pts also in stream time base to avoid converting back */
2536 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2537 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2538
2539 for (i = 0; i < ist->nb_filters; i++) {
2540 ret = ifilter_send_eof(ist->filters[i], pts);
2541 if (ret < 0)
2542 return ret;
2543 }
2544 return 0;
2545 }
2546
2547 /* pkt = NULL means EOF (needed to flush decoder buffers) */
process_input_packet(InputStream * ist,const AVPacket * pkt,int no_eof)2548 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2549 {
2550 int ret = 0, i;
2551 int repeating = 0;
2552 int eof_reached = 0;
2553
2554 AVPacket avpkt;
2555 if (!ist->saw_first_ts) {
2556 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2557 ist->pts = 0;
2558 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2559 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2560 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2561 }
2562 ist->saw_first_ts = 1;
2563 }
2564
2565 if (ist->next_dts == AV_NOPTS_VALUE)
2566 ist->next_dts = ist->dts;
2567 if (ist->next_pts == AV_NOPTS_VALUE)
2568 ist->next_pts = ist->pts;
2569
2570 if (!pkt) {
2571 /* EOF handling */
2572 av_init_packet(&avpkt);
2573 avpkt.data = NULL;
2574 avpkt.size = 0;
2575 } else {
2576 avpkt = *pkt;
2577 }
2578
2579 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2580 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2581 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2582 ist->next_pts = ist->pts = ist->dts;
2583 }
2584
2585 // while we have more to decode or while the decoder did output something on EOF
2586 while (ist->decoding_needed) {
2587 int64_t duration_dts = 0;
2588 int64_t duration_pts = 0;
2589 int got_output = 0;
2590 int decode_failed = 0;
2591
2592 ist->pts = ist->next_pts;
2593 ist->dts = ist->next_dts;
2594
2595 switch (ist->dec_ctx->codec_type) {
2596 case AVMEDIA_TYPE_AUDIO:
2597 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2598 &decode_failed);
2599 break;
2600 case AVMEDIA_TYPE_VIDEO:
2601 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2602 &decode_failed);
2603 if (!repeating || !pkt || got_output) {
2604 if (pkt && pkt->duration) {
2605 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2606 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2607 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2608 duration_dts = ((int64_t)AV_TIME_BASE *
2609 ist->dec_ctx->framerate.den * ticks) /
2610 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2611 }
2612
2613 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2614 ist->next_dts += duration_dts;
2615 }else
2616 ist->next_dts = AV_NOPTS_VALUE;
2617 }
2618
2619 if (got_output) {
2620 if (duration_pts > 0) {
2621 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2622 } else {
2623 ist->next_pts += duration_dts;
2624 }
2625 }
2626 break;
2627 case AVMEDIA_TYPE_SUBTITLE:
2628 if (repeating)
2629 break;
2630 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2631 if (!pkt && ret >= 0)
2632 ret = AVERROR_EOF;
2633 break;
2634 default:
2635 return -1;
2636 }
2637
2638 if (ret == AVERROR_EOF) {
2639 eof_reached = 1;
2640 break;
2641 }
2642
2643 if (ret < 0) {
2644 if (decode_failed) {
2645 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2646 ist->file_index, ist->st->index, av_err2str(ret));
2647 } else {
2648 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2649 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2650 }
2651 if (!decode_failed || exit_on_error)
2652 exit_program(1);
2653 break;
2654 }
2655
2656 if (got_output)
2657 ist->got_output = 1;
2658
2659 if (!got_output)
2660 break;
2661
2662 // During draining, we might get multiple output frames in this loop.
2663 // ffmpeg.c does not drain the filter chain on configuration changes,
2664 // which means if we send multiple frames at once to the filters, and
2665 // one of those frames changes configuration, the buffered frames will
2666 // be lost. This can upset certain FATE tests.
2667 // Decode only 1 frame per call on EOF to appease these FATE tests.
2668 // The ideal solution would be to rewrite decoding to use the new
2669 // decoding API in a better way.
2670 if (!pkt)
2671 break;
2672
2673 repeating = 1;
2674 }
2675
2676 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2677 /* except when looping we need to flush but not to send an EOF */
2678 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2679 int ret = send_filter_eof(ist);
2680 if (ret < 0) {
2681 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2682 exit_program(1);
2683 }
2684 }
2685
2686 /* handle stream copy */
2687 if (!ist->decoding_needed && pkt) {
2688 ist->dts = ist->next_dts;
2689 switch (ist->dec_ctx->codec_type) {
2690 case AVMEDIA_TYPE_AUDIO:
2691 av_assert1(pkt->duration >= 0);
2692 if (ist->dec_ctx->sample_rate) {
2693 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2694 ist->dec_ctx->sample_rate;
2695 } else {
2696 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2697 }
2698 break;
2699 case AVMEDIA_TYPE_VIDEO:
2700 if (ist->framerate.num) {
2701 // TODO: Remove work-around for c99-to-c89 issue 7
2702 AVRational time_base_q = AV_TIME_BASE_Q;
2703 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2704 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2705 } else if (pkt->duration) {
2706 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2707 } else if(ist->dec_ctx->framerate.num != 0) {
2708 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2709 ist->next_dts += ((int64_t)AV_TIME_BASE *
2710 ist->dec_ctx->framerate.den * ticks) /
2711 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2712 }
2713 break;
2714 }
2715 ist->pts = ist->dts;
2716 ist->next_pts = ist->next_dts;
2717 }
2718 for (i = 0; i < nb_output_streams; i++) {
2719 OutputStream *ost = output_streams[i];
2720
2721 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2722 continue;
2723
2724 do_streamcopy(ist, ost, pkt);
2725 }
2726
2727 return !eof_reached;
2728 }
2729
print_sdp(void)2730 static void print_sdp(void)
2731 {
2732 char sdp[16384];
2733 int i;
2734 int j;
2735 AVIOContext *sdp_pb;
2736 AVFormatContext **avc;
2737
2738 for (i = 0; i < nb_output_files; i++) {
2739 if (!output_files[i]->header_written)
2740 return;
2741 }
2742
2743 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2744 if (!avc)
2745 exit_program(1);
2746 for (i = 0, j = 0; i < nb_output_files; i++) {
2747 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2748 avc[j] = output_files[i]->ctx;
2749 j++;
2750 }
2751 }
2752
2753 if (!j)
2754 goto fail;
2755
2756 av_sdp_create(avc, j, sdp, sizeof(sdp));
2757
2758 if (!sdp_filename) {
2759 printf("SDP:\n%s\n", sdp);
2760 fflush(stdout);
2761 } else {
2762 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2763 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2764 } else {
2765 avio_print(sdp_pb, sdp);
2766 avio_closep(&sdp_pb);
2767 av_freep(&sdp_filename);
2768 }
2769 }
2770
2771 fail:
2772 av_freep(&avc);
2773 }
2774
get_format(AVCodecContext * s,const enum AVPixelFormat * pix_fmts)2775 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2776 {
2777 InputStream *ist = s->opaque;
2778 const enum AVPixelFormat *p;
2779 int ret;
2780
2781 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2782 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2783 const AVCodecHWConfig *config = NULL;
2784 int i;
2785
2786 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2787 break;
2788
2789 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2790 ist->hwaccel_id == HWACCEL_AUTO) {
2791 for (i = 0;; i++) {
2792 config = avcodec_get_hw_config(s->codec, i);
2793 if (!config)
2794 break;
2795 if (!(config->methods &
2796 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2797 continue;
2798 if (config->pix_fmt == *p)
2799 break;
2800 }
2801 }
2802 if (config) {
2803 if (config->device_type != ist->hwaccel_device_type) {
2804 // Different hwaccel offered, ignore.
2805 continue;
2806 }
2807
2808 ret = hwaccel_decode_init(s);
2809 if (ret < 0) {
2810 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2811 av_log(NULL, AV_LOG_FATAL,
2812 "%s hwaccel requested for input stream #%d:%d, "
2813 "but cannot be initialized.\n",
2814 av_hwdevice_get_type_name(config->device_type),
2815 ist->file_index, ist->st->index);
2816 return AV_PIX_FMT_NONE;
2817 }
2818 continue;
2819 }
2820 } else {
2821 const HWAccel *hwaccel = NULL;
2822 int i;
2823 for (i = 0; hwaccels[i].name; i++) {
2824 if (hwaccels[i].pix_fmt == *p) {
2825 hwaccel = &hwaccels[i];
2826 break;
2827 }
2828 }
2829 if (!hwaccel) {
2830 // No hwaccel supporting this pixfmt.
2831 continue;
2832 }
2833 if (hwaccel->id != ist->hwaccel_id) {
2834 // Does not match requested hwaccel.
2835 continue;
2836 }
2837
2838 ret = hwaccel->init(s);
2839 if (ret < 0) {
2840 av_log(NULL, AV_LOG_FATAL,
2841 "%s hwaccel requested for input stream #%d:%d, "
2842 "but cannot be initialized.\n", hwaccel->name,
2843 ist->file_index, ist->st->index);
2844 return AV_PIX_FMT_NONE;
2845 }
2846 }
2847
2848 if (ist->hw_frames_ctx) {
2849 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2850 if (!s->hw_frames_ctx)
2851 return AV_PIX_FMT_NONE;
2852 }
2853
2854 ist->hwaccel_pix_fmt = *p;
2855 break;
2856 }
2857
2858 return *p;
2859 }
2860
get_buffer(AVCodecContext * s,AVFrame * frame,int flags)2861 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2862 {
2863 InputStream *ist = s->opaque;
2864
2865 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2866 return ist->hwaccel_get_buffer(s, frame, flags);
2867
2868 return avcodec_default_get_buffer2(s, frame, flags);
2869 }
2870
init_input_stream(int ist_index,char * error,int error_len)2871 static int init_input_stream(int ist_index, char *error, int error_len)
2872 {
2873 int ret;
2874 InputStream *ist = input_streams[ist_index];
2875
2876 if (ist->decoding_needed) {
2877 AVCodec *codec = ist->dec;
2878 if (!codec) {
2879 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2880 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2881 return AVERROR(EINVAL);
2882 }
2883
2884 ist->dec_ctx->opaque = ist;
2885 ist->dec_ctx->get_format = get_format;
2886 ist->dec_ctx->get_buffer2 = get_buffer;
2887 ist->dec_ctx->thread_safe_callbacks = 1;
2888
2889 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2890 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2891 (ist->decoding_needed & DECODING_FOR_OST)) {
2892 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2893 if (ist->decoding_needed & DECODING_FOR_FILTER)
2894 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2895 }
2896
2897 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2898
2899 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2900 * audio, and video decoders such as cuvid or mediacodec */
2901 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2902
2903 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2904 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2905 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2906 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2907 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2908
2909 ret = hw_device_setup_for_decode(ist);
2910 if (ret < 0) {
2911 snprintf(error, error_len, "Device setup failed for "
2912 "decoder on input stream #%d:%d : %s",
2913 ist->file_index, ist->st->index, av_err2str(ret));
2914 return ret;
2915 }
2916
2917 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2918 if (ret == AVERROR_EXPERIMENTAL)
2919 abort_codec_experimental(codec, 0);
2920
2921 snprintf(error, error_len,
2922 "Error while opening decoder for input stream "
2923 "#%d:%d : %s",
2924 ist->file_index, ist->st->index, av_err2str(ret));
2925 return ret;
2926 }
2927 assert_avoptions(ist->decoder_opts);
2928 }
2929
2930 ist->next_pts = AV_NOPTS_VALUE;
2931 ist->next_dts = AV_NOPTS_VALUE;
2932
2933 return 0;
2934 }
2935
get_input_stream(OutputStream * ost)2936 static InputStream *get_input_stream(OutputStream *ost)
2937 {
2938 if (ost->source_index >= 0)
2939 return input_streams[ost->source_index];
2940 return NULL;
2941 }
2942
compare_int64(const void * a,const void * b)2943 static int compare_int64(const void *a, const void *b)
2944 {
2945 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2946 }
2947
2948 /* open the muxer when all the streams are initialized */
check_init_output_file(OutputFile * of,int file_index)2949 static int check_init_output_file(OutputFile *of, int file_index)
2950 {
2951 int ret, i;
2952
2953 for (i = 0; i < of->ctx->nb_streams; i++) {
2954 OutputStream *ost = output_streams[of->ost_index + i];
2955 if (!ost->initialized)
2956 return 0;
2957 }
2958
2959 of->ctx->interrupt_callback = int_cb;
2960
2961 ret = avformat_write_header(of->ctx, &of->opts);
2962 if (ret < 0) {
2963 av_log(NULL, AV_LOG_ERROR,
2964 "Could not write header for output file #%d "
2965 "(incorrect codec parameters ?): %s\n",
2966 file_index, av_err2str(ret));
2967 return ret;
2968 }
2969 //assert_avoptions(of->opts);
2970 of->header_written = 1;
2971
2972 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2973
2974 if (sdp_filename || want_sdp)
2975 print_sdp();
2976
2977 /* flush the muxing queues */
2978 for (i = 0; i < of->ctx->nb_streams; i++) {
2979 OutputStream *ost = output_streams[of->ost_index + i];
2980
2981 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2982 if (!av_fifo_size(ost->muxing_queue))
2983 ost->mux_timebase = ost->st->time_base;
2984
2985 while (av_fifo_size(ost->muxing_queue)) {
2986 AVPacket pkt;
2987 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2988 write_packet(of, &pkt, ost, 1);
2989 }
2990 }
2991
2992 return 0;
2993 }
2994
init_output_bsfs(OutputStream * ost)2995 static int init_output_bsfs(OutputStream *ost)
2996 {
2997 AVBSFContext *ctx = ost->bsf_ctx;
2998 int ret;
2999
3000 if (!ctx)
3001 return 0;
3002
3003 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3004 if (ret < 0)
3005 return ret;
3006
3007 ctx->time_base_in = ost->st->time_base;
3008
3009 ret = av_bsf_init(ctx);
3010 if (ret < 0) {
3011 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3012 ctx->filter->name);
3013 return ret;
3014 }
3015
3016 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3017 if (ret < 0)
3018 return ret;
3019 ost->st->time_base = ctx->time_base_out;
3020
3021 return 0;
3022 }
3023
init_output_stream_streamcopy(OutputStream * ost)3024 static int init_output_stream_streamcopy(OutputStream *ost)
3025 {
3026 OutputFile *of = output_files[ost->file_index];
3027 InputStream *ist = get_input_stream(ost);
3028 AVCodecParameters *par_dst = ost->st->codecpar;
3029 AVCodecParameters *par_src = ost->ref_par;
3030 AVRational sar;
3031 int i, ret;
3032 uint32_t codec_tag = par_dst->codec_tag;
3033
3034 av_assert0(ist && !ost->filter);
3035
3036 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3037 if (ret >= 0)
3038 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3039 if (ret < 0) {
3040 av_log(NULL, AV_LOG_FATAL,
3041 "Error setting up codec context options.\n");
3042 return ret;
3043 }
3044
3045 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3046 if (ret < 0) {
3047 av_log(NULL, AV_LOG_FATAL,
3048 "Error getting reference codec parameters.\n");
3049 return ret;
3050 }
3051
3052 if (!codec_tag) {
3053 unsigned int codec_tag_tmp;
3054 if (!of->ctx->oformat->codec_tag ||
3055 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3056 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3057 codec_tag = par_src->codec_tag;
3058 }
3059
3060 ret = avcodec_parameters_copy(par_dst, par_src);
3061 if (ret < 0)
3062 return ret;
3063
3064 par_dst->codec_tag = codec_tag;
3065
3066 if (!ost->frame_rate.num)
3067 ost->frame_rate = ist->framerate;
3068 ost->st->avg_frame_rate = ost->frame_rate;
3069
3070 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3071 if (ret < 0)
3072 return ret;
3073
3074 // copy timebase while removing common factors
3075 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3076 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3077
3078 // copy estimated duration as a hint to the muxer
3079 if (ost->st->duration <= 0 && ist->st->duration > 0)
3080 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3081
3082 // copy disposition
3083 ost->st->disposition = ist->st->disposition;
3084
3085 if (ist->st->nb_side_data) {
3086 for (i = 0; i < ist->st->nb_side_data; i++) {
3087 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3088 uint8_t *dst_data;
3089
3090 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3091 if (!dst_data)
3092 return AVERROR(ENOMEM);
3093 memcpy(dst_data, sd_src->data, sd_src->size);
3094 }
3095 }
3096
3097 if (ost->rotate_overridden) {
3098 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3099 sizeof(int32_t) * 9);
3100 if (sd)
3101 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3102 }
3103
3104 switch (par_dst->codec_type) {
3105 case AVMEDIA_TYPE_AUDIO:
3106 if (audio_volume != 256) {
3107 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3108 exit_program(1);
3109 }
3110 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3111 par_dst->block_align= 0;
3112 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3113 par_dst->block_align= 0;
3114 break;
3115 case AVMEDIA_TYPE_VIDEO:
3116 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3117 sar =
3118 av_mul_q(ost->frame_aspect_ratio,
3119 (AVRational){ par_dst->height, par_dst->width });
3120 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3121 "with stream copy may produce invalid files\n");
3122 }
3123 else if (ist->st->sample_aspect_ratio.num)
3124 sar = ist->st->sample_aspect_ratio;
3125 else
3126 sar = par_src->sample_aspect_ratio;
3127 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3128 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3129 ost->st->r_frame_rate = ist->st->r_frame_rate;
3130 break;
3131 }
3132
3133 ost->mux_timebase = ist->st->time_base;
3134
3135 return 0;
3136 }
3137
set_encoder_id(OutputFile * of,OutputStream * ost)3138 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3139 {
3140 AVDictionaryEntry *e;
3141
3142 uint8_t *encoder_string;
3143 int encoder_string_len;
3144 int format_flags = 0;
3145 int codec_flags = ost->enc_ctx->flags;
3146
3147 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3148 return;
3149
3150 e = av_dict_get(of->opts, "fflags", NULL, 0);
3151 if (e) {
3152 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3153 if (!o)
3154 return;
3155 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3156 }
3157 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3158 if (e) {
3159 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3160 if (!o)
3161 return;
3162 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3163 }
3164
3165 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3166 encoder_string = av_mallocz(encoder_string_len);
3167 if (!encoder_string)
3168 exit_program(1);
3169
3170 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3171 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3172 else
3173 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3174 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3175 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3176 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3177 }
3178
parse_forced_key_frames(char * kf,OutputStream * ost,AVCodecContext * avctx)3179 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3180 AVCodecContext *avctx)
3181 {
3182 char *p;
3183 int n = 1, i, size, index = 0;
3184 int64_t t, *pts;
3185
3186 for (p = kf; *p; p++)
3187 if (*p == ',')
3188 n++;
3189 size = n;
3190 pts = av_malloc_array(size, sizeof(*pts));
3191 if (!pts) {
3192 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3193 exit_program(1);
3194 }
3195
3196 p = kf;
3197 for (i = 0; i < n; i++) {
3198 char *next = strchr(p, ',');
3199
3200 if (next)
3201 *next++ = 0;
3202
3203 if (!memcmp(p, "chapters", 8)) {
3204
3205 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3206 int j;
3207
3208 if (avf->nb_chapters > INT_MAX - size ||
3209 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3210 sizeof(*pts)))) {
3211 av_log(NULL, AV_LOG_FATAL,
3212 "Could not allocate forced key frames array.\n");
3213 exit_program(1);
3214 }
3215 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3216 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3217
3218 for (j = 0; j < avf->nb_chapters; j++) {
3219 AVChapter *c = avf->chapters[j];
3220 av_assert1(index < size);
3221 pts[index++] = av_rescale_q(c->start, c->time_base,
3222 avctx->time_base) + t;
3223 }
3224
3225 } else {
3226
3227 t = parse_time_or_die("force_key_frames", p, 1);
3228 av_assert1(index < size);
3229 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3230
3231 }
3232
3233 p = next;
3234 }
3235
3236 av_assert0(index == size);
3237 qsort(pts, size, sizeof(*pts), compare_int64);
3238 ost->forced_kf_count = size;
3239 ost->forced_kf_pts = pts;
3240 }
3241
init_encoder_time_base(OutputStream * ost,AVRational default_time_base)3242 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3243 {
3244 InputStream *ist = get_input_stream(ost);
3245 AVCodecContext *enc_ctx = ost->enc_ctx;
3246 AVFormatContext *oc;
3247
3248 if (ost->enc_timebase.num > 0) {
3249 enc_ctx->time_base = ost->enc_timebase;
3250 return;
3251 }
3252
3253 if (ost->enc_timebase.num < 0) {
3254 if (ist) {
3255 enc_ctx->time_base = ist->st->time_base;
3256 return;
3257 }
3258
3259 oc = output_files[ost->file_index]->ctx;
3260 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3261 }
3262
3263 enc_ctx->time_base = default_time_base;
3264 }
3265
init_output_stream_encode(OutputStream * ost)3266 static int init_output_stream_encode(OutputStream *ost)
3267 {
3268 InputStream *ist = get_input_stream(ost);
3269 AVCodecContext *enc_ctx = ost->enc_ctx;
3270 AVCodecContext *dec_ctx = NULL;
3271 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3272 int j, ret;
3273
3274 set_encoder_id(output_files[ost->file_index], ost);
3275
3276 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3277 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3278 // which have to be filtered out to prevent leaking them to output files.
3279 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3280
3281 if (ist) {
3282 ost->st->disposition = ist->st->disposition;
3283
3284 dec_ctx = ist->dec_ctx;
3285
3286 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3287 } else {
3288 for (j = 0; j < oc->nb_streams; j++) {
3289 AVStream *st = oc->streams[j];
3290 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3291 break;
3292 }
3293 if (j == oc->nb_streams)
3294 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3295 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3296 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3297 }
3298
3299 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3300 if (!ost->frame_rate.num)
3301 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3302 if (ist && !ost->frame_rate.num)
3303 ost->frame_rate = ist->framerate;
3304 if (ist && !ost->frame_rate.num)
3305 ost->frame_rate = ist->st->r_frame_rate;
3306 if (ist && !ost->frame_rate.num) {
3307 ost->frame_rate = (AVRational){25, 1};
3308 av_log(NULL, AV_LOG_WARNING,
3309 "No information "
3310 "about the input framerate is available. Falling "
3311 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3312 "if you want a different framerate.\n",
3313 ost->file_index, ost->index);
3314 }
3315
3316 if (ost->enc->supported_framerates && !ost->force_fps) {
3317 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3318 ost->frame_rate = ost->enc->supported_framerates[idx];
3319 }
3320 // reduce frame rate for mpeg4 to be within the spec limits
3321 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3322 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3323 ost->frame_rate.num, ost->frame_rate.den, 65535);
3324 }
3325 }
3326
3327 switch (enc_ctx->codec_type) {
3328 case AVMEDIA_TYPE_AUDIO:
3329 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3330 if (dec_ctx)
3331 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3332 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3333 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3334 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3335 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3336
3337 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3338 break;
3339
3340 case AVMEDIA_TYPE_VIDEO:
3341 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3342
3343 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3344 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3345 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3346 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3347 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3348 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3349 }
3350
3351 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3352 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3353 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3354 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3355 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3356 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3357
3358 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3359 if (dec_ctx)
3360 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3361 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3362
3363 enc_ctx->framerate = ost->frame_rate;
3364
3365 ost->st->avg_frame_rate = ost->frame_rate;
3366
3367 if (!dec_ctx ||
3368 enc_ctx->width != dec_ctx->width ||
3369 enc_ctx->height != dec_ctx->height ||
3370 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3371 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3372 }
3373
3374 if (ost->top_field_first == 0) {
3375 enc_ctx->field_order = AV_FIELD_BB;
3376 } else if (ost->top_field_first == 1) {
3377 enc_ctx->field_order = AV_FIELD_TT;
3378 }
3379
3380 if (ost->forced_keyframes) {
3381 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3382 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3383 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3384 if (ret < 0) {
3385 av_log(NULL, AV_LOG_ERROR,
3386 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3387 return ret;
3388 }
3389 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3390 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3391 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3392 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3393
3394 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3395 // parse it only for static kf timings
3396 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3397 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3398 }
3399 }
3400 break;
3401 case AVMEDIA_TYPE_SUBTITLE:
3402 enc_ctx->time_base = AV_TIME_BASE_Q;
3403 if (!enc_ctx->width) {
3404 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3405 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3406 }
3407 break;
3408 case AVMEDIA_TYPE_DATA:
3409 break;
3410 default:
3411 abort();
3412 break;
3413 }
3414
3415 ost->mux_timebase = enc_ctx->time_base;
3416
3417 return 0;
3418 }
3419
init_output_stream(OutputStream * ost,char * error,int error_len)3420 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3421 {
3422 int ret = 0;
3423
3424 if (ost->encoding_needed) {
3425 AVCodec *codec = ost->enc;
3426 AVCodecContext *dec = NULL;
3427 InputStream *ist;
3428
3429 ret = init_output_stream_encode(ost);
3430 if (ret < 0)
3431 return ret;
3432
3433 if ((ist = get_input_stream(ost)))
3434 dec = ist->dec_ctx;
3435 if (dec && dec->subtitle_header) {
3436 /* ASS code assumes this buffer is null terminated so add extra byte. */
3437 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3438 if (!ost->enc_ctx->subtitle_header)
3439 return AVERROR(ENOMEM);
3440 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3441 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3442 }
3443 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3444 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3445 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3446 !codec->defaults &&
3447 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3448 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3449 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3450
3451 ret = hw_device_setup_for_encode(ost);
3452 if (ret < 0) {
3453 snprintf(error, error_len, "Device setup failed for "
3454 "encoder on output stream #%d:%d : %s",
3455 ost->file_index, ost->index, av_err2str(ret));
3456 return ret;
3457 }
3458
3459 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3460 int input_props = 0, output_props = 0;
3461 AVCodecDescriptor const *input_descriptor =
3462 avcodec_descriptor_get(dec->codec_id);
3463 AVCodecDescriptor const *output_descriptor =
3464 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3465 if (input_descriptor)
3466 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3467 if (output_descriptor)
3468 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3469 if (input_props && output_props && input_props != output_props) {
3470 snprintf(error, error_len,
3471 "Subtitle encoding currently only possible from text to text "
3472 "or bitmap to bitmap");
3473 return AVERROR_INVALIDDATA;
3474 }
3475 }
3476
3477 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3478 if (ret == AVERROR_EXPERIMENTAL)
3479 abort_codec_experimental(codec, 1);
3480 snprintf(error, error_len,
3481 "Error while opening encoder for output stream #%d:%d - "
3482 "maybe incorrect parameters such as bit_rate, rate, width or height",
3483 ost->file_index, ost->index);
3484 return ret;
3485 }
3486 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3487 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3488 av_buffersink_set_frame_size(ost->filter->filter,
3489 ost->enc_ctx->frame_size);
3490 assert_avoptions(ost->encoder_opts);
3491 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3492 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3493 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3494 " It takes bits/s as argument, not kbits/s\n");
3495
3496 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3497 if (ret < 0) {
3498 av_log(NULL, AV_LOG_FATAL,
3499 "Error initializing the output stream codec context.\n");
3500 exit_program(1);
3501 }
3502 /*
3503 * FIXME: ost->st->codec should't be needed here anymore.
3504 */
3505 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3506 if (ret < 0)
3507 return ret;
3508
3509 if (ost->enc_ctx->nb_coded_side_data) {
3510 int i;
3511
3512 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3513 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3514 uint8_t *dst_data;
3515
3516 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3517 if (!dst_data)
3518 return AVERROR(ENOMEM);
3519 memcpy(dst_data, sd_src->data, sd_src->size);
3520 }
3521 }
3522
3523 /*
3524 * Add global input side data. For now this is naive, and copies it
3525 * from the input stream's global side data. All side data should
3526 * really be funneled over AVFrame and libavfilter, then added back to
3527 * packet side data, and then potentially using the first packet for
3528 * global side data.
3529 */
3530 if (ist) {
3531 int i;
3532 for (i = 0; i < ist->st->nb_side_data; i++) {
3533 AVPacketSideData *sd = &ist->st->side_data[i];
3534 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3535 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3536 if (!dst)
3537 return AVERROR(ENOMEM);
3538 memcpy(dst, sd->data, sd->size);
3539 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3540 av_display_rotation_set((uint32_t *)dst, 0);
3541 }
3542 }
3543 }
3544
3545 // copy timebase while removing common factors
3546 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3547 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3548
3549 // copy estimated duration as a hint to the muxer
3550 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3551 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3552
3553 ost->st->codec->codec= ost->enc_ctx->codec;
3554 } else if (ost->stream_copy) {
3555 ret = init_output_stream_streamcopy(ost);
3556 if (ret < 0)
3557 return ret;
3558 }
3559
3560 // parse user provided disposition, and update stream values
3561 if (ost->disposition) {
3562 static const AVOption opts[] = {
3563 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3564 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3565 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3566 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3567 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3568 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3569 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3570 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3571 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3572 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3573 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3574 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3575 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3576 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3577 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3578 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3579 { NULL },
3580 };
3581 static const AVClass class = {
3582 .class_name = "",
3583 .item_name = av_default_item_name,
3584 .option = opts,
3585 .version = LIBAVUTIL_VERSION_INT,
3586 };
3587 const AVClass *pclass = &class;
3588
3589 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3590 if (ret < 0)
3591 return ret;
3592 }
3593
3594 /* initialize bitstream filters for the output stream
3595 * needs to be done here, because the codec id for streamcopy is not
3596 * known until now */
3597 ret = init_output_bsfs(ost);
3598 if (ret < 0)
3599 return ret;
3600
3601 ost->initialized = 1;
3602
3603 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3604 if (ret < 0)
3605 return ret;
3606
3607 return ret;
3608 }
3609
report_new_stream(int input_index,AVPacket * pkt)3610 static void report_new_stream(int input_index, AVPacket *pkt)
3611 {
3612 InputFile *file = input_files[input_index];
3613 AVStream *st = file->ctx->streams[pkt->stream_index];
3614
3615 if (pkt->stream_index < file->nb_streams_warn)
3616 return;
3617 av_log(file->ctx, AV_LOG_WARNING,
3618 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3619 av_get_media_type_string(st->codecpar->codec_type),
3620 input_index, pkt->stream_index,
3621 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3622 file->nb_streams_warn = pkt->stream_index + 1;
3623 }
3624
transcode_init(void)3625 static int transcode_init(void)
3626 {
3627 int ret = 0, i, j, k;
3628 AVFormatContext *oc;
3629 OutputStream *ost;
3630 InputStream *ist;
3631 char error[1024] = {0};
3632
3633 for (i = 0; i < nb_filtergraphs; i++) {
3634 FilterGraph *fg = filtergraphs[i];
3635 for (j = 0; j < fg->nb_outputs; j++) {
3636 OutputFilter *ofilter = fg->outputs[j];
3637 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3638 continue;
3639 if (fg->nb_inputs != 1)
3640 continue;
3641 for (k = nb_input_streams-1; k >= 0 ; k--)
3642 if (fg->inputs[0]->ist == input_streams[k])
3643 break;
3644 ofilter->ost->source_index = k;
3645 }
3646 }
3647
3648 /* init framerate emulation */
3649 for (i = 0; i < nb_input_files; i++) {
3650 InputFile *ifile = input_files[i];
3651 if (ifile->rate_emu)
3652 for (j = 0; j < ifile->nb_streams; j++)
3653 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3654 }
3655
3656 /* init input streams */
3657 for (i = 0; i < nb_input_streams; i++)
3658 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3659 for (i = 0; i < nb_output_streams; i++) {
3660 ost = output_streams[i];
3661 avcodec_close(ost->enc_ctx);
3662 }
3663 goto dump_format;
3664 }
3665
3666 /* open each encoder */
3667 for (i = 0; i < nb_output_streams; i++) {
3668 // skip streams fed from filtergraphs until we have a frame for them
3669 if (output_streams[i]->filter)
3670 continue;
3671
3672 ret = init_output_stream(output_streams[i], error, sizeof(error));
3673 if (ret < 0)
3674 goto dump_format;
3675 }
3676
3677 /* discard unused programs */
3678 for (i = 0; i < nb_input_files; i++) {
3679 InputFile *ifile = input_files[i];
3680 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3681 AVProgram *p = ifile->ctx->programs[j];
3682 int discard = AVDISCARD_ALL;
3683
3684 for (k = 0; k < p->nb_stream_indexes; k++)
3685 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3686 discard = AVDISCARD_DEFAULT;
3687 break;
3688 }
3689 p->discard = discard;
3690 }
3691 }
3692
3693 /* write headers for files with no streams */
3694 for (i = 0; i < nb_output_files; i++) {
3695 oc = output_files[i]->ctx;
3696 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3697 ret = check_init_output_file(output_files[i], i);
3698 if (ret < 0)
3699 goto dump_format;
3700 }
3701 }
3702
3703 dump_format:
3704 /* dump the stream mapping */
3705 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3706 for (i = 0; i < nb_input_streams; i++) {
3707 ist = input_streams[i];
3708
3709 for (j = 0; j < ist->nb_filters; j++) {
3710 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3711 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3712 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3713 ist->filters[j]->name);
3714 if (nb_filtergraphs > 1)
3715 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3716 av_log(NULL, AV_LOG_INFO, "\n");
3717 }
3718 }
3719 }
3720
3721 for (i = 0; i < nb_output_streams; i++) {
3722 ost = output_streams[i];
3723
3724 if (ost->attachment_filename) {
3725 /* an attached file */
3726 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3727 ost->attachment_filename, ost->file_index, ost->index);
3728 continue;
3729 }
3730
3731 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3732 /* output from a complex graph */
3733 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3734 if (nb_filtergraphs > 1)
3735 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3736
3737 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3738 ost->index, ost->enc ? ost->enc->name : "?");
3739 continue;
3740 }
3741
3742 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3743 input_streams[ost->source_index]->file_index,
3744 input_streams[ost->source_index]->st->index,
3745 ost->file_index,
3746 ost->index);
3747 if (ost->sync_ist != input_streams[ost->source_index])
3748 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3749 ost->sync_ist->file_index,
3750 ost->sync_ist->st->index);
3751 if (ost->stream_copy)
3752 av_log(NULL, AV_LOG_INFO, " (copy)");
3753 else {
3754 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3755 const AVCodec *out_codec = ost->enc;
3756 const char *decoder_name = "?";
3757 const char *in_codec_name = "?";
3758 const char *encoder_name = "?";
3759 const char *out_codec_name = "?";
3760 const AVCodecDescriptor *desc;
3761
3762 if (in_codec) {
3763 decoder_name = in_codec->name;
3764 desc = avcodec_descriptor_get(in_codec->id);
3765 if (desc)
3766 in_codec_name = desc->name;
3767 if (!strcmp(decoder_name, in_codec_name))
3768 decoder_name = "native";
3769 }
3770
3771 if (out_codec) {
3772 encoder_name = out_codec->name;
3773 desc = avcodec_descriptor_get(out_codec->id);
3774 if (desc)
3775 out_codec_name = desc->name;
3776 if (!strcmp(encoder_name, out_codec_name))
3777 encoder_name = "native";
3778 }
3779
3780 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3781 in_codec_name, decoder_name,
3782 out_codec_name, encoder_name);
3783 }
3784 av_log(NULL, AV_LOG_INFO, "\n");
3785 }
3786
3787 if (ret) {
3788 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3789 return ret;
3790 }
3791
3792 atomic_store(&transcode_init_done, 1);
3793
3794 return 0;
3795 }
3796
3797 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
need_output(void)3798 static int need_output(void)
3799 {
3800 int i;
3801
3802 for (i = 0; i < nb_output_streams; i++) {
3803 OutputStream *ost = output_streams[i];
3804 OutputFile *of = output_files[ost->file_index];
3805 AVFormatContext *os = output_files[ost->file_index]->ctx;
3806
3807 if (ost->finished ||
3808 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3809 continue;
3810 if (ost->frame_number >= ost->max_frames) {
3811 int j;
3812 for (j = 0; j < of->ctx->nb_streams; j++)
3813 close_output_stream(output_streams[of->ost_index + j]);
3814 continue;
3815 }
3816
3817 return 1;
3818 }
3819
3820 return 0;
3821 }
3822
3823 /**
3824 * Select the output stream to process.
3825 *
3826 * @return selected output stream, or NULL if none available
3827 */
choose_output(void)3828 static OutputStream *choose_output(void)
3829 {
3830 int i;
3831 int64_t opts_min = INT64_MAX;
3832 OutputStream *ost_min = NULL;
3833
3834 for (i = 0; i < nb_output_streams; i++) {
3835 OutputStream *ost = output_streams[i];
3836 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3837 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3838 AV_TIME_BASE_Q);
3839 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3840 av_log(NULL, AV_LOG_DEBUG,
3841 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3842 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3843
3844 if (!ost->initialized && !ost->inputs_done)
3845 return ost;
3846
3847 if (!ost->finished && opts < opts_min) {
3848 opts_min = opts;
3849 ost_min = ost->unavailable ? NULL : ost;
3850 }
3851 }
3852 return ost_min;
3853 }
3854
set_tty_echo(int on)3855 static void set_tty_echo(int on)
3856 {
3857 #if HAVE_TERMIOS_H
3858 struct termios tty;
3859 if (tcgetattr(0, &tty) == 0) {
3860 if (on) tty.c_lflag |= ECHO;
3861 else tty.c_lflag &= ~ECHO;
3862 tcsetattr(0, TCSANOW, &tty);
3863 }
3864 #endif
3865 }
3866
check_keyboard_interaction(int64_t cur_time)3867 static int check_keyboard_interaction(int64_t cur_time)
3868 {
3869 int i, ret, key;
3870 static int64_t last_time;
3871 if (received_nb_signals)
3872 return AVERROR_EXIT;
3873 /* read_key() returns 0 on EOF */
3874 if(cur_time - last_time >= 100000 && !run_as_daemon){
3875 key = read_key();
3876 last_time = cur_time;
3877 }else
3878 key = -1;
3879 if (key == 'q')
3880 return AVERROR_EXIT;
3881 if (key == '+') av_log_set_level(av_log_get_level()+10);
3882 if (key == '-') av_log_set_level(av_log_get_level()-10);
3883 if (key == 's') qp_hist ^= 1;
3884 if (key == 'h'){
3885 if (do_hex_dump){
3886 do_hex_dump = do_pkt_dump = 0;
3887 } else if(do_pkt_dump){
3888 do_hex_dump = 1;
3889 } else
3890 do_pkt_dump = 1;
3891 av_log_set_level(AV_LOG_DEBUG);
3892 }
3893 if (key == 'c' || key == 'C'){
3894 char buf[4096], target[64], command[256], arg[256] = {0};
3895 double time;
3896 int k, n = 0;
3897 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3898 i = 0;
3899 set_tty_echo(1);
3900 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3901 if (k > 0)
3902 buf[i++] = k;
3903 buf[i] = 0;
3904 set_tty_echo(0);
3905 fprintf(stderr, "\n");
3906 if (k > 0 &&
3907 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3908 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3909 target, time, command, arg);
3910 for (i = 0; i < nb_filtergraphs; i++) {
3911 FilterGraph *fg = filtergraphs[i];
3912 if (fg->graph) {
3913 if (time < 0) {
3914 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3915 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3916 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3917 } else if (key == 'c') {
3918 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3919 ret = AVERROR_PATCHWELCOME;
3920 } else {
3921 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3922 if (ret < 0)
3923 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3924 }
3925 }
3926 }
3927 } else {
3928 av_log(NULL, AV_LOG_ERROR,
3929 "Parse error, at least 3 arguments were expected, "
3930 "only %d given in string '%s'\n", n, buf);
3931 }
3932 }
3933 if (key == 'd' || key == 'D'){
3934 int debug=0;
3935 if(key == 'D') {
3936 debug = input_streams[0]->st->codec->debug<<1;
3937 if(!debug) debug = 1;
3938 while(debug & (FF_DEBUG_DCT_COEFF
3939 #if FF_API_DEBUG_MV
3940 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3941 #endif
3942 )) //unsupported, would just crash
3943 debug += debug;
3944 }else{
3945 char buf[32];
3946 int k = 0;
3947 i = 0;
3948 set_tty_echo(1);
3949 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3950 if (k > 0)
3951 buf[i++] = k;
3952 buf[i] = 0;
3953 set_tty_echo(0);
3954 fprintf(stderr, "\n");
3955 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3956 fprintf(stderr,"error parsing debug value\n");
3957 }
3958 for(i=0;i<nb_input_streams;i++) {
3959 input_streams[i]->st->codec->debug = debug;
3960 }
3961 for(i=0;i<nb_output_streams;i++) {
3962 OutputStream *ost = output_streams[i];
3963 ost->enc_ctx->debug = debug;
3964 }
3965 if(debug) av_log_set_level(AV_LOG_DEBUG);
3966 fprintf(stderr,"debug=%d\n", debug);
3967 }
3968 if (key == '?'){
3969 fprintf(stderr, "key function\n"
3970 "? show this help\n"
3971 "+ increase verbosity\n"
3972 "- decrease verbosity\n"
3973 "c Send command to first matching filter supporting it\n"
3974 "C Send/Queue command to all matching filters\n"
3975 "D cycle through available debug modes\n"
3976 "h dump packets/hex press to cycle through the 3 states\n"
3977 "q quit\n"
3978 "s Show QP histogram\n"
3979 );
3980 }
3981 return 0;
3982 }
3983
3984 #if HAVE_THREADS
input_thread(void * arg)3985 static void *input_thread(void *arg)
3986 {
3987 InputFile *f = arg;
3988 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3989 int ret = 0;
3990
3991 while (1) {
3992 AVPacket pkt;
3993 ret = av_read_frame(f->ctx, &pkt);
3994
3995 if (ret == AVERROR(EAGAIN)) {
3996 av_usleep(10000);
3997 continue;
3998 }
3999 if (ret < 0) {
4000 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4001 break;
4002 }
4003 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4004 if (flags && ret == AVERROR(EAGAIN)) {
4005 flags = 0;
4006 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4007 av_log(f->ctx, AV_LOG_WARNING,
4008 "Thread message queue blocking; consider raising the "
4009 "thread_queue_size option (current value: %d)\n",
4010 f->thread_queue_size);
4011 }
4012 if (ret < 0) {
4013 if (ret != AVERROR_EOF)
4014 av_log(f->ctx, AV_LOG_ERROR,
4015 "Unable to send packet to main thread: %s\n",
4016 av_err2str(ret));
4017 av_packet_unref(&pkt);
4018 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4019 break;
4020 }
4021 }
4022
4023 return NULL;
4024 }
4025
free_input_thread(int i)4026 static void free_input_thread(int i)
4027 {
4028 InputFile *f = input_files[i];
4029 AVPacket pkt;
4030
4031 if (!f || !f->in_thread_queue)
4032 return;
4033 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4034 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4035 av_packet_unref(&pkt);
4036
4037 pthread_join(f->thread, NULL);
4038 f->joined = 1;
4039 av_thread_message_queue_free(&f->in_thread_queue);
4040 }
4041
free_input_threads(void)4042 static void free_input_threads(void)
4043 {
4044 int i;
4045
4046 for (i = 0; i < nb_input_files; i++)
4047 free_input_thread(i);
4048 }
4049
init_input_thread(int i)4050 static int init_input_thread(int i)
4051 {
4052 int ret;
4053 InputFile *f = input_files[i];
4054
4055 if (f->thread_queue_size < 0)
4056 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4057 if (!f->thread_queue_size)
4058 return 0;
4059
4060 if (f->ctx->pb ? !f->ctx->pb->seekable :
4061 strcmp(f->ctx->iformat->name, "lavfi"))
4062 f->non_blocking = 1;
4063 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4064 f->thread_queue_size, sizeof(AVPacket));
4065 if (ret < 0)
4066 return ret;
4067
4068 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4069 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4070 av_thread_message_queue_free(&f->in_thread_queue);
4071 return AVERROR(ret);
4072 }
4073
4074 return 0;
4075 }
4076
init_input_threads(void)4077 static int init_input_threads(void)
4078 {
4079 int i, ret;
4080
4081 for (i = 0; i < nb_input_files; i++) {
4082 ret = init_input_thread(i);
4083 if (ret < 0)
4084 return ret;
4085 }
4086 return 0;
4087 }
4088
get_input_packet_mt(InputFile * f,AVPacket * pkt)4089 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4090 {
4091 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4092 f->non_blocking ?
4093 AV_THREAD_MESSAGE_NONBLOCK : 0);
4094 }
4095 #endif
4096
get_input_packet(InputFile * f,AVPacket * pkt)4097 static int get_input_packet(InputFile *f, AVPacket *pkt)
4098 {
4099 if (f->rate_emu) {
4100 int i;
4101 for (i = 0; i < f->nb_streams; i++) {
4102 InputStream *ist = input_streams[f->ist_index + i];
4103 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4104 int64_t now = av_gettime_relative() - ist->start;
4105 if (pts > now)
4106 return AVERROR(EAGAIN);
4107 }
4108 }
4109
4110 #if HAVE_THREADS
4111 if (f->thread_queue_size)
4112 return get_input_packet_mt(f, pkt);
4113 #endif
4114 return av_read_frame(f->ctx, pkt);
4115 }
4116
got_eagain(void)4117 static int got_eagain(void)
4118 {
4119 int i;
4120 for (i = 0; i < nb_output_streams; i++)
4121 if (output_streams[i]->unavailable)
4122 return 1;
4123 return 0;
4124 }
4125
reset_eagain(void)4126 static void reset_eagain(void)
4127 {
4128 int i;
4129 for (i = 0; i < nb_input_files; i++)
4130 input_files[i]->eagain = 0;
4131 for (i = 0; i < nb_output_streams; i++)
4132 output_streams[i]->unavailable = 0;
4133 }
4134
4135 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
duration_max(int64_t tmp,int64_t * duration,AVRational tmp_time_base,AVRational time_base)4136 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4137 AVRational time_base)
4138 {
4139 int ret;
4140
4141 if (!*duration) {
4142 *duration = tmp;
4143 return tmp_time_base;
4144 }
4145
4146 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4147 if (ret < 0) {
4148 *duration = tmp;
4149 return tmp_time_base;
4150 }
4151
4152 return time_base;
4153 }
4154
seek_to_start(InputFile * ifile,AVFormatContext * is)4155 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4156 {
4157 InputStream *ist;
4158 AVCodecContext *avctx;
4159 int i, ret, has_audio = 0;
4160 int64_t duration = 0;
4161
4162 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4163 if (ret < 0)
4164 return ret;
4165
4166 for (i = 0; i < ifile->nb_streams; i++) {
4167 ist = input_streams[ifile->ist_index + i];
4168 avctx = ist->dec_ctx;
4169
4170 /* duration is the length of the last frame in a stream
4171 * when audio stream is present we don't care about
4172 * last video frame length because it's not defined exactly */
4173 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4174 has_audio = 1;
4175 }
4176
4177 for (i = 0; i < ifile->nb_streams; i++) {
4178 ist = input_streams[ifile->ist_index + i];
4179 avctx = ist->dec_ctx;
4180
4181 if (has_audio) {
4182 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4183 AVRational sample_rate = {1, avctx->sample_rate};
4184
4185 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4186 } else {
4187 continue;
4188 }
4189 } else {
4190 if (ist->framerate.num) {
4191 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4192 } else if (ist->st->avg_frame_rate.num) {
4193 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4194 } else {
4195 duration = 1;
4196 }
4197 }
4198 if (!ifile->duration)
4199 ifile->time_base = ist->st->time_base;
4200 /* the total duration of the stream, max_pts - min_pts is
4201 * the duration of the stream without the last frame */
4202 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4203 duration += ist->max_pts - ist->min_pts;
4204 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4205 ifile->time_base);
4206 }
4207
4208 if (ifile->loop > 0)
4209 ifile->loop--;
4210
4211 return ret;
4212 }
4213
4214 /*
4215 * Return
4216 * - 0 -- one packet was read and processed
4217 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4218 * this function should be called again
4219 * - AVERROR_EOF -- this function should not be called again
4220 */
process_input(int file_index)4221 static int process_input(int file_index)
4222 {
4223 InputFile *ifile = input_files[file_index];
4224 AVFormatContext *is;
4225 InputStream *ist;
4226 AVPacket pkt;
4227 int ret, thread_ret, i, j;
4228 int64_t duration;
4229 int64_t pkt_dts;
4230 int disable_discontinuity_correction = copy_ts;
4231
4232 is = ifile->ctx;
4233 ret = get_input_packet(ifile, &pkt);
4234
4235 if (ret == AVERROR(EAGAIN)) {
4236 ifile->eagain = 1;
4237 return ret;
4238 }
4239 if (ret < 0 && ifile->loop) {
4240 AVCodecContext *avctx;
4241 for (i = 0; i < ifile->nb_streams; i++) {
4242 ist = input_streams[ifile->ist_index + i];
4243 avctx = ist->dec_ctx;
4244 if (ist->decoding_needed) {
4245 ret = process_input_packet(ist, NULL, 1);
4246 if (ret>0)
4247 return 0;
4248 avcodec_flush_buffers(avctx);
4249 }
4250 }
4251 #if HAVE_THREADS
4252 free_input_thread(file_index);
4253 #endif
4254 ret = seek_to_start(ifile, is);
4255 #if HAVE_THREADS
4256 thread_ret = init_input_thread(file_index);
4257 if (thread_ret < 0)
4258 return thread_ret;
4259 #endif
4260 if (ret < 0)
4261 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4262 else
4263 ret = get_input_packet(ifile, &pkt);
4264 if (ret == AVERROR(EAGAIN)) {
4265 ifile->eagain = 1;
4266 return ret;
4267 }
4268 }
4269 if (ret < 0) {
4270 if (ret != AVERROR_EOF) {
4271 print_error(is->url, ret);
4272 if (exit_on_error)
4273 exit_program(1);
4274 }
4275
4276 for (i = 0; i < ifile->nb_streams; i++) {
4277 ist = input_streams[ifile->ist_index + i];
4278 if (ist->decoding_needed) {
4279 ret = process_input_packet(ist, NULL, 0);
4280 if (ret>0)
4281 return 0;
4282 }
4283
4284 /* mark all outputs that don't go through lavfi as finished */
4285 for (j = 0; j < nb_output_streams; j++) {
4286 OutputStream *ost = output_streams[j];
4287
4288 if (ost->source_index == ifile->ist_index + i &&
4289 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4290 finish_output_stream(ost);
4291 }
4292 }
4293
4294 ifile->eof_reached = 1;
4295 return AVERROR(EAGAIN);
4296 }
4297
4298 reset_eagain();
4299
4300 if (do_pkt_dump) {
4301 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4302 is->streams[pkt.stream_index]);
4303 }
4304 /* the following test is needed in case new streams appear
4305 dynamically in stream : we ignore them */
4306 if (pkt.stream_index >= ifile->nb_streams) {
4307 report_new_stream(file_index, &pkt);
4308 goto discard_packet;
4309 }
4310
4311 ist = input_streams[ifile->ist_index + pkt.stream_index];
4312
4313 ist->data_size += pkt.size;
4314 ist->nb_packets++;
4315
4316 if (ist->discard)
4317 goto discard_packet;
4318
4319 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4320 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4321 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4322 if (exit_on_error)
4323 exit_program(1);
4324 }
4325
4326 if (debug_ts) {
4327 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4328 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4329 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4330 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4331 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4332 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4333 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4334 av_ts2str(input_files[ist->file_index]->ts_offset),
4335 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4336 }
4337
4338 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4339 int64_t stime, stime2;
4340 // Correcting starttime based on the enabled streams
4341 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4342 // so we instead do it here as part of discontinuity handling
4343 if ( ist->next_dts == AV_NOPTS_VALUE
4344 && ifile->ts_offset == -is->start_time
4345 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4346 int64_t new_start_time = INT64_MAX;
4347 for (i=0; i<is->nb_streams; i++) {
4348 AVStream *st = is->streams[i];
4349 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4350 continue;
4351 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4352 }
4353 if (new_start_time > is->start_time) {
4354 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4355 ifile->ts_offset = -new_start_time;
4356 }
4357 }
4358
4359 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4360 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4361 ist->wrap_correction_done = 1;
4362
4363 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4364 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4365 ist->wrap_correction_done = 0;
4366 }
4367 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4368 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4369 ist->wrap_correction_done = 0;
4370 }
4371 }
4372
4373 /* add the stream-global side data to the first packet */
4374 if (ist->nb_packets == 1) {
4375 for (i = 0; i < ist->st->nb_side_data; i++) {
4376 AVPacketSideData *src_sd = &ist->st->side_data[i];
4377 uint8_t *dst_data;
4378
4379 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4380 continue;
4381
4382 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4383 continue;
4384
4385 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4386 if (!dst_data)
4387 exit_program(1);
4388
4389 memcpy(dst_data, src_sd->data, src_sd->size);
4390 }
4391 }
4392
4393 if (pkt.dts != AV_NOPTS_VALUE)
4394 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4395 if (pkt.pts != AV_NOPTS_VALUE)
4396 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4397
4398 if (pkt.pts != AV_NOPTS_VALUE)
4399 pkt.pts *= ist->ts_scale;
4400 if (pkt.dts != AV_NOPTS_VALUE)
4401 pkt.dts *= ist->ts_scale;
4402
4403 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4404 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4405 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4406 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4407 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4408 int64_t delta = pkt_dts - ifile->last_ts;
4409 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4410 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4411 ifile->ts_offset -= delta;
4412 av_log(NULL, AV_LOG_DEBUG,
4413 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4414 delta, ifile->ts_offset);
4415 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4416 if (pkt.pts != AV_NOPTS_VALUE)
4417 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4418 }
4419 }
4420
4421 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4422 if (pkt.pts != AV_NOPTS_VALUE) {
4423 pkt.pts += duration;
4424 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4425 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4426 }
4427
4428 if (pkt.dts != AV_NOPTS_VALUE)
4429 pkt.dts += duration;
4430
4431 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4432
4433 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4434 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4435 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4436 ist->st->time_base, AV_TIME_BASE_Q,
4437 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4438 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4439 disable_discontinuity_correction = 0;
4440 }
4441
4442 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4443 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4444 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4445 !disable_discontinuity_correction) {
4446 int64_t delta = pkt_dts - ist->next_dts;
4447 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4448 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4449 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4450 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4451 ifile->ts_offset -= delta;
4452 av_log(NULL, AV_LOG_DEBUG,
4453 "timestamp discontinuity for stream #%d:%d "
4454 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4455 ist->file_index, ist->st->index, ist->st->id,
4456 av_get_media_type_string(ist->dec_ctx->codec_type),
4457 delta, ifile->ts_offset);
4458 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4459 if (pkt.pts != AV_NOPTS_VALUE)
4460 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4461 }
4462 } else {
4463 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4464 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4465 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4466 pkt.dts = AV_NOPTS_VALUE;
4467 }
4468 if (pkt.pts != AV_NOPTS_VALUE){
4469 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4470 delta = pkt_pts - ist->next_dts;
4471 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4472 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4473 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4474 pkt.pts = AV_NOPTS_VALUE;
4475 }
4476 }
4477 }
4478 }
4479
4480 if (pkt.dts != AV_NOPTS_VALUE)
4481 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4482
4483 if (debug_ts) {
4484 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4485 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4486 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4487 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4488 av_ts2str(input_files[ist->file_index]->ts_offset),
4489 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4490 }
4491
4492 sub2video_heartbeat(ist, pkt.pts);
4493
4494 process_input_packet(ist, &pkt, 0);
4495
4496 discard_packet:
4497 av_packet_unref(&pkt);
4498
4499 return 0;
4500 }
4501
4502 /**
4503 * Perform a step of transcoding for the specified filter graph.
4504 *
4505 * @param[in] graph filter graph to consider
4506 * @param[out] best_ist input stream where a frame would allow to continue
4507 * @return 0 for success, <0 for error
4508 */
transcode_from_filter(FilterGraph * graph,InputStream ** best_ist)4509 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4510 {
4511 int i, ret;
4512 int nb_requests, nb_requests_max = 0;
4513 InputFilter *ifilter;
4514 InputStream *ist;
4515
4516 *best_ist = NULL;
4517 ret = avfilter_graph_request_oldest(graph->graph);
4518 if (ret >= 0)
4519 return reap_filters(0);
4520
4521 if (ret == AVERROR_EOF) {
4522 ret = reap_filters(1);
4523 for (i = 0; i < graph->nb_outputs; i++)
4524 close_output_stream(graph->outputs[i]->ost);
4525 return ret;
4526 }
4527 if (ret != AVERROR(EAGAIN))
4528 return ret;
4529
4530 for (i = 0; i < graph->nb_inputs; i++) {
4531 ifilter = graph->inputs[i];
4532 ist = ifilter->ist;
4533 if (input_files[ist->file_index]->eagain ||
4534 input_files[ist->file_index]->eof_reached)
4535 continue;
4536 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4537 if (nb_requests > nb_requests_max) {
4538 nb_requests_max = nb_requests;
4539 *best_ist = ist;
4540 }
4541 }
4542
4543 if (!*best_ist)
4544 for (i = 0; i < graph->nb_outputs; i++)
4545 graph->outputs[i]->ost->unavailable = 1;
4546
4547 return 0;
4548 }
4549
4550 /**
4551 * Run a single step of transcoding.
4552 *
4553 * @return 0 for success, <0 for error
4554 */
transcode_step(void)4555 static int transcode_step(void)
4556 {
4557 OutputStream *ost;
4558 InputStream *ist = NULL;
4559 int ret;
4560
4561 ost = choose_output();
4562 if (!ost) {
4563 if (got_eagain()) {
4564 reset_eagain();
4565 av_usleep(10000);
4566 return 0;
4567 }
4568 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4569 return AVERROR_EOF;
4570 }
4571
4572 if (ost->filter && !ost->filter->graph->graph) {
4573 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4574 ret = configure_filtergraph(ost->filter->graph);
4575 if (ret < 0) {
4576 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4577 return ret;
4578 }
4579 }
4580 }
4581
4582 if (ost->filter && ost->filter->graph->graph) {
4583 if (!ost->initialized) {
4584 char error[1024] = {0};
4585 ret = init_output_stream(ost, error, sizeof(error));
4586 if (ret < 0) {
4587 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4588 ost->file_index, ost->index, error);
4589 exit_program(1);
4590 }
4591 }
4592 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4593 return ret;
4594 if (!ist)
4595 return 0;
4596 } else if (ost->filter) {
4597 int i;
4598 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4599 InputFilter *ifilter = ost->filter->graph->inputs[i];
4600 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4601 ist = ifilter->ist;
4602 break;
4603 }
4604 }
4605 if (!ist) {
4606 ost->inputs_done = 1;
4607 return 0;
4608 }
4609 } else {
4610 av_assert0(ost->source_index >= 0);
4611 ist = input_streams[ost->source_index];
4612 }
4613
4614 ret = process_input(ist->file_index);
4615 if (ret == AVERROR(EAGAIN)) {
4616 if (input_files[ist->file_index]->eagain)
4617 ost->unavailable = 1;
4618 return 0;
4619 }
4620
4621 if (ret < 0)
4622 return ret == AVERROR_EOF ? 0 : ret;
4623
4624 return reap_filters(0);
4625 }
4626
4627 /*
4628 * The following code is the main loop of the file converter
4629 */
transcode(void)4630 static int transcode(void)
4631 {
4632 int ret, i;
4633 AVFormatContext *os;
4634 OutputStream *ost;
4635 InputStream *ist;
4636 int64_t timer_start;
4637 int64_t total_packets_written = 0;
4638
4639 ret = transcode_init();
4640 if (ret < 0)
4641 goto fail;
4642
4643 if (stdin_interaction) {
4644 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4645 }
4646
4647 timer_start = av_gettime_relative();
4648
4649 #if HAVE_THREADS
4650 if ((ret = init_input_threads()) < 0)
4651 goto fail;
4652 #endif
4653
4654 while (!received_sigterm) {
4655 int64_t cur_time= av_gettime_relative();
4656
4657 /* if 'q' pressed, exits */
4658 if (stdin_interaction)
4659 if (check_keyboard_interaction(cur_time) < 0)
4660 break;
4661
4662 /* check if there's any stream where output is still needed */
4663 if (!need_output()) {
4664 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4665 break;
4666 }
4667
4668 ret = transcode_step();
4669 if (ret < 0 && ret != AVERROR_EOF) {
4670 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4671 break;
4672 }
4673
4674 /* dump report by using the output first video and audio streams */
4675 print_report(0, timer_start, cur_time);
4676 }
4677 #if HAVE_THREADS
4678 free_input_threads();
4679 #endif
4680
4681 /* at the end of stream, we must flush the decoder buffers */
4682 for (i = 0; i < nb_input_streams; i++) {
4683 ist = input_streams[i];
4684 if (!input_files[ist->file_index]->eof_reached) {
4685 process_input_packet(ist, NULL, 0);
4686 }
4687 }
4688 flush_encoders();
4689
4690 term_exit();
4691
4692 /* write the trailer if needed and close file */
4693 for (i = 0; i < nb_output_files; i++) {
4694 os = output_files[i]->ctx;
4695 if (!output_files[i]->header_written) {
4696 av_log(NULL, AV_LOG_ERROR,
4697 "Nothing was written into output file %d (%s), because "
4698 "at least one of its streams received no packets.\n",
4699 i, os->url);
4700 continue;
4701 }
4702 if ((ret = av_write_trailer(os)) < 0) {
4703 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4704 if (exit_on_error)
4705 exit_program(1);
4706 }
4707 }
4708
4709 /* dump report by using the first video and audio streams */
4710 print_report(1, timer_start, av_gettime_relative());
4711
4712 /* close each encoder */
4713 for (i = 0; i < nb_output_streams; i++) {
4714 ost = output_streams[i];
4715 if (ost->encoding_needed) {
4716 av_freep(&ost->enc_ctx->stats_in);
4717 }
4718 total_packets_written += ost->packets_written;
4719 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4720 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4721 exit_program(1);
4722 }
4723 }
4724
4725 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4726 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4727 exit_program(1);
4728 }
4729
4730 /* close each decoder */
4731 for (i = 0; i < nb_input_streams; i++) {
4732 ist = input_streams[i];
4733 if (ist->decoding_needed) {
4734 avcodec_close(ist->dec_ctx);
4735 if (ist->hwaccel_uninit)
4736 ist->hwaccel_uninit(ist->dec_ctx);
4737 }
4738 }
4739
4740 hw_device_free_all();
4741
4742 /* finished ! */
4743 ret = 0;
4744
4745 fail:
4746 #if HAVE_THREADS
4747 free_input_threads();
4748 #endif
4749
4750 if (output_streams) {
4751 for (i = 0; i < nb_output_streams; i++) {
4752 ost = output_streams[i];
4753 if (ost) {
4754 if (ost->logfile) {
4755 if (fclose(ost->logfile))
4756 av_log(NULL, AV_LOG_ERROR,
4757 "Error closing logfile, loss of information possible: %s\n",
4758 av_err2str(AVERROR(errno)));
4759 ost->logfile = NULL;
4760 }
4761 av_freep(&ost->forced_kf_pts);
4762 av_freep(&ost->apad);
4763 av_freep(&ost->disposition);
4764 av_dict_free(&ost->encoder_opts);
4765 av_dict_free(&ost->sws_dict);
4766 av_dict_free(&ost->swr_opts);
4767 av_dict_free(&ost->resample_opts);
4768 }
4769 }
4770 }
4771 return ret;
4772 }
4773
get_benchmark_time_stamps(void)4774 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4775 {
4776 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4777 #if HAVE_GETRUSAGE
4778 struct rusage rusage;
4779
4780 getrusage(RUSAGE_SELF, &rusage);
4781 time_stamps.user_usec =
4782 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4783 time_stamps.sys_usec =
4784 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4785 #elif HAVE_GETPROCESSTIMES
4786 HANDLE proc;
4787 FILETIME c, e, k, u;
4788 proc = GetCurrentProcess();
4789 GetProcessTimes(proc, &c, &e, &k, &u);
4790 time_stamps.user_usec =
4791 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4792 time_stamps.sys_usec =
4793 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4794 #else
4795 time_stamps.user_usec = time_stamps.sys_usec = 0;
4796 #endif
4797 return time_stamps;
4798 }
4799
getmaxrss(void)4800 static int64_t getmaxrss(void)
4801 {
4802 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4803 struct rusage rusage;
4804 getrusage(RUSAGE_SELF, &rusage);
4805 return (int64_t)rusage.ru_maxrss * 1024;
4806 #elif HAVE_GETPROCESSMEMORYINFO
4807 HANDLE proc;
4808 PROCESS_MEMORY_COUNTERS memcounters;
4809 proc = GetCurrentProcess();
4810 memcounters.cb = sizeof(memcounters);
4811 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4812 return memcounters.PeakPagefileUsage;
4813 #else
4814 return 0;
4815 #endif
4816 }
4817
log_callback_null(void * ptr,int level,const char * fmt,va_list vl)4818 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4819 {
4820 }
4821
main(int argc,char ** argv)4822 int main(int argc, char **argv)
4823 {
4824 int i, ret;
4825 BenchmarkTimeStamps ti;
4826
4827 init_dynload();
4828
4829 register_exit(ffmpeg_cleanup);
4830
4831 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4832
4833 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4834 parse_loglevel(argc, argv, options);
4835
4836 if(argc>1 && !strcmp(argv[1], "-d")){
4837 run_as_daemon=1;
4838 av_log_set_callback(log_callback_null);
4839 argc--;
4840 argv++;
4841 }
4842
4843 #if CONFIG_AVDEVICE
4844 avdevice_register_all();
4845 #endif
4846 avformat_network_init();
4847
4848 show_banner(argc, argv, options);
4849
4850 /* parse options and open all input/output files */
4851 ret = ffmpeg_parse_options(argc, argv);
4852 if (ret < 0)
4853 exit_program(1);
4854
4855 if (nb_output_files <= 0 && nb_input_files == 0) {
4856 show_usage();
4857 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4858 exit_program(1);
4859 }
4860
4861 /* file converter / grab */
4862 if (nb_output_files <= 0) {
4863 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4864 exit_program(1);
4865 }
4866
4867 for (i = 0; i < nb_output_files; i++) {
4868 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4869 want_sdp = 0;
4870 }
4871
4872 current_time = ti = get_benchmark_time_stamps();
4873 if (transcode() < 0)
4874 exit_program(1);
4875 if (do_benchmark) {
4876 int64_t utime, stime, rtime;
4877 current_time = get_benchmark_time_stamps();
4878 utime = current_time.user_usec - ti.user_usec;
4879 stime = current_time.sys_usec - ti.sys_usec;
4880 rtime = current_time.real_usec - ti.real_usec;
4881 av_log(NULL, AV_LOG_INFO,
4882 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4883 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4884 }
4885 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4886 decode_error_stat[0], decode_error_stat[1]);
4887 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4888 exit_program(69);
4889
4890 exit_program(received_nb_signals ? 255 : main_return_code);
4891 return main_return_code;
4892 }
4893