1 // LiVES - libav stream engine
2 // (c) G. Finch 2017 <salsaman+lives@gmail.com>
3 // released under the GNU GPL 3 or later
4 // see file COPYING or www.gnu.org for details
5
6 #include <stdio.h>
7 #include <pthread.h>
8
9 #include <libavformat/avformat.h>
10 #include <libavcodec/avcodec.h>
11 #include <libavcodec/version.h>
12 #include <libavutil/samplefmt.h>
13 #include <libavutil/mathematics.h>
14
15 #include <libavutil/avassert.h>
16 #include <libavutil/channel_layout.h>
17 #include <libavutil/opt.h>
18 #include <libavutil/timestamp.h>
19 #include <libswscale/swscale.h>
20 #include <libswresample/swresample.h>
21
22 #include "videoplugin.h"
23
24 #define HAVE_AVUTIL
25 #define HAVE_AVCODEC
26
27 #ifdef HAVE_SYSTEM_WEED
28 #include <weed/weed-compat.h>
29 #else
30 #include "../../../../libweed/weed-compat.h"
31 #endif
32
33 #include "../../decoders/libav_helper.h"
34
35 static int intent;
36
37 static int mypalette = WEED_PALETTE_END;
38 static int palette_list[3];
39
40 static int avpalette;
41
42 static int clampings[3];
43 static int myclamp = WEED_YUV_CLAMPING_UNCLAMPED;
44
45 static char plugin_version[64] = "LiVES libav stream engine version 1.1";
46
47 static boolean(*render_fn)(int hsize, int vsize, void **pixel_data);
48
49 static boolean render_frame_yuv420(int hsize, int vsize, void **pixel_data);
50 static boolean render_frame_unknown(int hsize, int vsize, void **pixel_data);
51
52 static int ovsize, ohsize;
53 static int in_nchans, out_nchans;
54 static int in_sample_rate, out_sample_rate;
55 static int in_nb_samples, out_nb_samples;
56 static int maxabitrate, maxvbitrate;
57
58 static float **spill_buffers;
59 static int spb_len;
60
61 static double target_fps;
62
63 static pthread_mutex_t write_mutex;
64
65 /////////////////////////////////////////////////////////////////////////
66
67 static AVFormatContext *fmtctx;
68 static AVCodecContext *encctx, *aencctx;
69 static AVStream *vStream, *aStream;
70
71 #define DEFAULT_FRAME_RATE 10. /* 10 images/s */
72 #define SCALE_FLAGS SWS_BICUBIC
73
74 boolean stream_encode;
75
76 // a wrapper around a single output AVStream
77 typedef struct OutputStream {
78 AVStream *st;
79 AVCodecContext *enc;
80 AVCodec *codec;
81 /* pts of the next frame that will be generated */
82 int64_t next_pts;
83 int samples_count;
84 AVFrame *frame;
85 AVFrame *tmp_frame;
86 float t, tincr, tincr2;
87 struct SwsContext *sws_ctx;
88 struct SwrContext *swr_ctx;
89 } OutputStream;
90
91 static OutputStream ostv; // video
92 static OutputStream osta; // audio
93
94 static int inited = 0;
95
96 //////////////////////////////////////////////
97
98 #define STR_EXPAND(tok) #tok
99 #define STR(tok) STR_EXPAND(tok)
100
get_fps_list(int palette)101 const char *get_fps_list(int palette) {
102 return STR(DEFAULT_FRAME_RATE);
103 }
104
105 ////////////////////////////////////////////////
106
module_check_init(void)107 const char *module_check_init(void) {
108 render_fn = &render_frame_unknown;
109 ovsize = ohsize = 0;
110
111 fmtctx = NULL;
112 encctx = NULL;
113
114 av_register_all();
115 avformat_network_init();
116
117 target_fps = DEFAULT_FRAME_RATE;
118
119 in_sample_rate = 0;
120
121 intent = 0;
122
123 pthread_mutex_init(&write_mutex, NULL);
124 inited = 1;
125
126 return NULL;
127 }
128
129
version(void)130 const char *version(void) {
131 return plugin_version;
132 }
133
134
get_description(void)135 const char *get_description(void) {
136 return "The libav_stream plugin provides realtime streaming over a local network (UDP)\n";
137 }
138
139
get_palette_list(void)140 const int *get_palette_list(void) {
141 palette_list[0] = WEED_PALETTE_RGB24;
142 palette_list[1] = WEED_PALETTE_YUV420P;
143 palette_list[2] = WEED_PALETTE_END;
144 return palette_list;
145 }
146
147
get_capabilities(int palette)148 uint64_t get_capabilities(int palette) {
149 return 0;//VPP_CAN_RESIZE;
150 }
151
152
153 /*
154 parameter template, these are returned as argc, argv in init_screen() and init_audio()
155 */
get_init_rfx(int intention)156 const char *get_init_rfx(int intention) {
157 // intention allows switching between different tailored interfaces
158 intent = intention;
159
160 switch (intent) {
161 case LIVES_INTENTION_PLAY: // for now...
162 case LIVES_INTENTION_STREAM: // LiVES VPP (streaming output)
163 return
164 "<define>\\n\
165 |1.7\\n\
166 </define>\\n\
167 <params>\\n\
168 form|_Format|string_list|0|mp4/h264/aac|ogm/theora/vorbis||\\n\
169 \
170 mbitv|Max bitrate (_video)|num0|500000|100000|1000000000|\\n\
171 \
172 achans|Audio _layout|string_list|1|mono|stereo||\\n\
173 arate|Audio _rate (Hz)|string_list|1|22050|44100|48000||\\n\
174 mbita|Max bitrate (_audio)|num0|320000|16000|10000000|\\n\
175 \
176 ip1|_Address to stream to|string|127|3|\\n\
177 ip2||string|0|3|\\n\
178 ip3||string|0|3|\\n\
179 ip4||string|1|3|\\n\
180 port|_port|num0|8000|1024|65535|\\n\
181 </params>\\n\
182 <param_window>\\n\
183 layout|\\\"Enter an IP address and port to stream to LiVES output to.\\\"|\\n\
184 layout|\\\"You can play the stream on the remote / local machine with e.g:\\\"|\\n\
185 layout|\\\"mplayer udp://127.0.0.1:8000\\\"| \\n\
186 layout|\\\"You are advised to start with a small frame size and low framerate,\\\"|\\n\
187 layout|\\\"and increase this if your network bandwidth allows it.\\\"|\\n\
188 layout|p0||\\n\
189 layout|p1||\\n\
190 layout|p2||\\n\
191 layout|p3||\\n\
192 layout|p4||\\n\
193 layout|p5|\\\".\\\"|p6|\\\".\\\"|p7|\\\".\\\"|p8|fill|fill|fill|fill|\\n\
194 </param_window>\\n\
195 <onchange>\\n\
196 </onchange>\\n\
197 ";
198
199 case LIVES_INTENTION_TRANSCODE: // LiVES transcoding (test)
200 return
201 "<define>\\n\
202 |1.8.1\\n\
203 </define>\\n\
204 <language_code>\\n\
205 0xF0\\n\
206 </language_code>\\n\
207 <params>\\n\
208 form|_Format|string_list|0|mp4/h264/aac|ogm/theora/vorbis|webm/vp9/opus||\\n\
209 \
210 mbitv|Max bitrate (_video)|num0|500000|100000|1000000000|\\n\
211 \
212 achans|Audio _layout|string_list|1|mono|stereo||\\n\
213 arate|Audio _rate (Hz)|string_list|1|22050|44100|48000||\\n\
214 mbita|Max bitrate (_audio)|num0|320000|16000|10000000|\\n\
215 \
216 fname|_Output file|string||\\n\
217 highq|_High quality (larger file size)|bool|0|0|\\n\
218 </params>\\n\
219 <param_window>\\n\
220 special|filewrite|5|\\n\
221 layout|hseparator|\\n\
222 layout|p5|\\n\
223 layout|p6|\\n\
224 layout|p0|\\n\
225 layout|hseparator|\\n\
226 </param_window>\\n\
227 <onchange>\\n\
228 init|$p5 = (split(/\\./,$p5))[0]; if ($p0 == 0) {$p5 .= \".mp4\";} elsif ($p0 == 2) {$p5 .= \".webm\";} else {$p5 .= \".ogm\";}\\n\
229 0|$p5 = (split(/\\./,$p5))[0]; if ($p0 == 0) {$p5 .= \".mp4\";} elsif ($p0 == 2) {$p5 .= \".webm\";} else {$p5 .= \".ogm\";}\\n\
230 </onchange>\\n\
231 ";
232 default:
233 return "";
234 }
235 }
236
237
get_yuv_palette_clamping(int palette)238 const int *get_yuv_palette_clamping(int palette) {
239 if (palette == WEED_PALETTE_YUV420P) {
240 clampings[0] = WEED_YUV_CLAMPING_UNCLAMPED;
241 clampings[1] = -1;
242 } else clampings[0] = -1;
243 return clampings;
244 }
245
246
set_yuv_palette_clamping(int clamping_type)247 boolean set_yuv_palette_clamping(int clamping_type) {
248 myclamp = clamping_type;
249 avpalette = weed_palette_to_avi_pix_fmt(WEED_PALETTE_YUV420P, &myclamp);
250 return TRUE;
251 }
252
253
set_palette(int palette)254 boolean set_palette(int palette) {
255 mypalette = palette;
256 render_fn = &render_frame_yuv420;
257 avpalette = weed_palette_to_avi_pix_fmt(WEED_PALETTE_YUV420P, &myclamp);
258 return TRUE;
259 }
260
261
set_fps(double in_fps)262 boolean set_fps(double in_fps) {
263 target_fps = in_fps;
264 return TRUE;
265 }
266
267
alloc_picture(enum AVPixelFormat pix_fmt,int width,int height)268 static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) {
269 AVFrame *picture;
270 int ret;
271 picture = av_frame_alloc();
272 if (!picture)
273 return NULL;
274 picture->format = pix_fmt;
275 picture->width = width;
276 picture->height = height;
277 /* allocate the buffers for the frame data */
278 ret = av_frame_get_buffer(picture, 32);
279 if (ret < 0) {
280 fprintf(stderr, "Could not allocate frame data.\n");
281 return NULL;
282 }
283 return picture;
284 }
285
286
alloc_audio_frame(enum AVSampleFormat sample_fmt,uint64_t channel_layout,int sample_rate,int nb_samples)287 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
288 uint64_t channel_layout,
289 int sample_rate, int nb_samples) {
290 AVFrame *frame = av_frame_alloc();
291 int ret;
292
293 if (!frame) {
294 fprintf(stderr, "Error allocating an audio frame\n");
295 return NULL;
296 }
297
298 frame->format = sample_fmt;
299 frame->channel_layout = channel_layout;
300 frame->sample_rate = sample_rate;
301 frame->nb_samples = nb_samples;
302
303 ret = av_frame_get_buffer(frame, 0);
304 if (ret < 0) {
305 fprintf(stderr, "Error allocating an audio buffer\n");
306 return NULL;
307 }
308
309 return frame;
310 }
311
312
open_audio()313 static boolean open_audio() {
314 AVCodecContext *c;
315 AVCodec *codec;
316 AVDictionary *opt = NULL;
317 int ret;
318 int i;
319
320 codec = osta.codec;
321 c = osta.enc;
322
323 c->sample_fmt = AV_SAMPLE_FMT_FLTP;
324 if (codec->sample_fmts) {
325 c->sample_fmt = codec->sample_fmts[0];
326 for (i = 0; codec->sample_fmts[i]; i++) {
327 if (codec->sample_fmts[i] == AV_SAMPLE_FMT_FLTP) {
328 c->sample_fmt = AV_SAMPLE_FMT_FLTP;
329 break;
330 }
331 }
332 }
333
334 c->sample_rate = out_sample_rate;
335 if (codec->supported_samplerates) {
336 c->sample_rate = codec->supported_samplerates[0];
337 for (i = 0; codec->supported_samplerates[i]; i++) {
338 if (codec->supported_samplerates[i] == out_sample_rate) {
339 c->sample_rate = out_sample_rate;
340 break;
341 }
342 }
343 }
344 out_sample_rate = c->sample_rate;
345
346 c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
347 c->channel_layout = (out_nchans == 2 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO);
348 if (codec->channel_layouts) {
349 c->channel_layout = codec->channel_layouts[0];
350 for (i = 0; codec->channel_layouts[i]; i++) {
351 if (codec->channel_layouts[i] == (out_nchans == 2 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO)) {
352 c->channel_layout = (out_nchans == 2 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO);
353 break;
354 }
355 }
356 }
357 c->channels = out_nchans = av_get_channel_layout_nb_channels(c->channel_layout);
358
359 c->bit_rate = maxabitrate;
360 c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
361 ret = avcodec_open2(c, codec, &opt);
362 if (ret < 0) {
363 fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
364 return FALSE;
365 }
366
367 if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) {
368 fprintf(stderr, "varaudio\n");
369 } else {
370 out_nb_samples = c->frame_size;
371 fprintf(stderr, "nb samples is %d\n", out_nb_samples);
372 }
373
374 /* create resampler context */
375 osta.swr_ctx = swr_alloc();
376 if (!osta.swr_ctx) {
377 fprintf(stderr, "Could not allocate resampler context\n");
378 return FALSE;
379 }
380
381 /* set options */
382 av_opt_set_int(osta.swr_ctx, "in_channel_count", in_nchans, 0);
383 av_opt_set_int(osta.swr_ctx, "in_sample_rate", in_sample_rate, 0);
384 av_opt_set_sample_fmt(osta.swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
385 av_opt_set_int(osta.swr_ctx, "out_channel_count", c->channels, 0);
386 av_opt_set_int(osta.swr_ctx, "out_sample_rate", c->sample_rate, 0);
387 av_opt_set_sample_fmt(osta.swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
388
389 /* initialize the resampling context */
390 if ((ret = swr_init(osta.swr_ctx)) < 0) {
391 fprintf(stderr, "Failed to initialize the resampling context\n");
392 fprintf(stderr, "%d %d - %d %d %d\n", in_nchans, in_sample_rate, c->channels, c->sample_rate, c->sample_fmt);
393 return FALSE;
394 }
395
396 in_nb_samples = out_nb_samples;
397 if (out_nb_samples != 0) {
398 /* compute src number of samples */
399 in_nb_samples = av_rescale_rnd(swr_get_delay(osta.swr_ctx, c->sample_rate) + out_nb_samples,
400 in_sample_rate, c->sample_rate, AV_ROUND_UP);
401
402 /* confirm destination number of samples */
403 int dst_nb_samples = av_rescale_rnd(in_nb_samples,
404 c->sample_rate, in_sample_rate, AV_ROUND_DOWN);
405
406 av_assert0(dst_nb_samples == out_nb_samples);
407 }
408
409 if (out_nb_samples > 0)
410 osta.frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, out_nb_samples);
411 else
412 osta.frame = NULL;
413
414 spill_buffers = NULL;
415
416 if (in_nb_samples != 0) {
417 spill_buffers = (float **) malloc(in_nchans * sizeof(float *));
418 for (i = 0; i < in_nchans; i++) {
419 spill_buffers[i] = (float *) malloc(in_nb_samples * sizeof(float));
420 }
421 }
422 spb_len = 0;
423
424 osta.samples_count = 0;
425
426 osta.st->time_base = (AVRational) {
427 1, c->sample_rate
428 };
429
430 fprintf(stderr, "Opened audio stream\n");
431 fprintf(stderr, "%d %d - %d %d %d\n", in_nchans, in_sample_rate, c->channels, c->sample_rate, c->sample_fmt);
432 return TRUE;
433 }
434
435
add_stream(OutputStream * ost,AVFormatContext * oc,AVCodec ** codec,enum AVCodecID codec_id)436 static boolean add_stream(OutputStream *ost, AVFormatContext *oc,
437 AVCodec **codec,
438 enum AVCodecID codec_id) {
439 AVCodecContext *c;
440
441 *codec = avcodec_find_encoder(codec_id);
442 if (!(*codec)) {
443 fprintf(stderr, "Could not find encoder for '%s'\n",
444 #ifdef HAVE_AVCODEC_GET_NAME
445 avcodec_get_name(codec_id)
446 #else
447 ((AVCodec *)(*codec))->name
448 #endif
449 );
450 return FALSE;
451 }
452
453 c = avcodec_alloc_context3(*codec);
454 if (!c) {
455 fprintf(stderr, "Could not allocate video / audio codec context\n");
456 return FALSE;
457 }
458
459 ost->st = avformat_new_stream(oc, *codec); // stream(s) created from format_ctx and codec
460 if (!ost->st) {
461 fprintf(stderr, "Could not allocate stream\n");
462 return FALSE;
463 }
464
465 ost->st->codec = ost->enc = c;
466 ost->st->id = oc->nb_streams - 1;
467
468 /* Some formats want stream headers to be separate. */
469 if (!stream_encode && oc->oformat->flags & AVFMT_GLOBALHEADER)
470 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
471
472 return TRUE;
473 }
474
475
init_audio(int sample_rate,int nchans,int argc,char ** argv)476 boolean init_audio(int sample_rate, int nchans, int argc, char **argv) {
477 // must be called before init_screen()
478 // gets the same argc, argv as init_screen() [created from get_init_rfx() template]
479 in_sample_rate = sample_rate;
480 in_nchans = nchans;
481 return TRUE;
482 }
483
484
init_screen(int width,int height,boolean fullscreen,uint64_t window_id,int argc,char ** argv)485 boolean init_screen(int width, int height, boolean fullscreen, uint64_t window_id, int argc, char **argv) {
486 AVCodec *codec, *acodec;
487
488 const char *fmtstring;
489
490 AVDictionary *fmt_opts = NULL;
491 char uri[PATH_MAX];
492
493 int vcodec_id;
494 int acodec_id;
495 int ret;
496
497 uri[0] = 0;
498
499 fprintf(stderr, "init_screen %d x %d %d\n", width, height, argc);
500
501 ostv.frame = osta.frame = NULL;
502 vStream = aStream = NULL;
503
504 ostv.sws_ctx = NULL;
505 osta.swr_ctx = NULL;
506
507 if (mypalette == WEED_PALETTE_END) {
508 fprintf(stderr, "libav stream plugin error: No palette was set !\n");
509 return FALSE;
510 }
511 if (intent == LIVES_INTENTION_STREAM)
512 fmtstring = "flv";
513 else
514 fmtstring = "mp4";
515 vcodec_id = AV_CODEC_ID_H264;
516 acodec_id = AV_CODEC_ID_MP3;
517 maxvbitrate = 500000;
518
519 if (argc == 1) {
520 snprintf(uri, PATH_MAX, "%s", argv[0]);
521 snprintf(uri + strlen(argv[0]), PATH_MAX, ".%s", fmtstring);
522 argc = 0;
523 }
524
525 if (argc > 0) {
526 switch (atoi(argv[0])) {
527 case 0:
528 fmtstring = "mp4";
529 vcodec_id = AV_CODEC_ID_H264;
530 //acodec_id = AV_CODEC_ID_MP3;
531 acodec_id = AV_CODEC_ID_AAC;
532 break;
533 case 1:
534 fmtstring = "ogg";
535 vcodec_id = AV_CODEC_ID_THEORA;
536 acodec_id = AV_CODEC_ID_VORBIS;
537 break;
538 case 2:
539 fmtstring = "webm";
540 vcodec_id = AV_CODEC_ID_VP9;
541 acodec_id = AV_CODEC_ID_OPUS;
542 break;
543 default:
544 return FALSE;
545 }
546
547 maxvbitrate = atoi(argv[1]);
548
549 switch (intent) {
550 case LIVES_INTENTION_STREAM:
551 stream_encode = TRUE;
552 snprintf(uri, PATH_MAX, "udp://%s.%s.%s.%s:%s", argv[5], argv[6], argv[7], argv[8], argv[9]);
553 break;
554 default:
555 stream_encode = FALSE;
556 snprintf(uri, PATH_MAX, "%s", argv[5]);
557 break;
558 }
559 }
560
561 if (!*uri) {
562 fprintf(stderr, "No output location set\n");
563 return FALSE;
564 }
565
566 ret = avformat_alloc_output_context2(&fmtctx, NULL, fmtstring, uri);
567 if (ret < 0) {
568 fprintf(stderr, "Could not open fmt '%s': %s\n", fmtstring,
569 av_err2str(ret));
570 }
571
572 if (!fmtctx) {
573 printf("Could not deduce output format from file extension %s: using flv.\n", fmtstring);
574 avformat_alloc_output_context2(&fmtctx, NULL, "flv", uri);
575 }
576 if (!fmtctx) return FALSE;
577
578 // add the video stream
579 if (!add_stream(&ostv, fmtctx, &codec, vcodec_id)) {
580 avformat_free_context(fmtctx);
581 fmtctx = NULL;
582 return FALSE;
583 }
584
585 vStream = ostv.st;
586 ostv.codec = codec;
587
588 ostv.enc = encctx = vStream->codec;
589
590 #ifdef API_3_1
591 // needs testing
592 ret = avcodec_parameters_from_context(vStream->codecpar, encctx);
593 if (ret < 0) {
594 fprintf(stderr, "avcodec_decoder: avparms from context failed\n");
595 return FALSE;
596 }
597 #endif
598
599 // override defaults
600 if (fabs(target_fps * 100100. - (double)((int)(target_fps + .5) * 100000)) < 1.) {
601 vStream->time_base = (AVRational) {
602 1001, (int)(target_fps + .5) * 1000
603 };
604 } else {
605 vStream->time_base = (AVRational) {
606 1000, (int)(target_fps + .5) * 1000
607 };
608 }
609
610 vStream->codec->time_base = vStream->time_base;
611
612 vStream->codec->width = width;
613 vStream->codec->height = height;
614 vStream->codec->pix_fmt = avpalette;
615
616 // seems not to make a difference
617 //vStream->codec->color_trc = AVCOL_TRC_IEC61966_2_1;
618
619 vStream->codec->bit_rate = maxvbitrate;
620 // vStream->codec->bit_rate_tolerance = 0;
621
622 if (vcodec_id == AV_CODEC_ID_H264) {
623 av_opt_set(encctx->priv_data, "preset", "ultrafast", 0);
624 //av_opt_set(encctx->priv_data, "crf", "0", 0);
625 av_opt_set(encctx->priv_data, "qscale", "1", 0);
626 av_opt_set(encctx->priv_data, "profile", "main", 0);
627 av_opt_set(encctx->priv_data, "crf", "1", 0);
628
629 if (!argc || !atoi(argv[6])) {
630 // lower q, about half the size
631 vStream->codec->qmin = 10;
632 vStream->codec->qmax = 51;
633 }
634
635 /* // highest quality - may break compliance */
636 /* vStream->codec->me_subpel_quality = 11; */
637 /* vStream->codec->trellis = 2; */
638
639 /* // 3 for black enhance */
640 /* av_opt_set(encctx->priv_data, "aq-mode", "2", 0); */
641
642 /* if (mypalette == WEED_PALETTE_YUV444P) */
643 /* av_opt_set(encctx->priv_data, "profile", "high444", 0); */
644 /* else */
645 /* av_opt_set(encctx->priv_data, "profile", "main", 0); */
646 }
647
648 //vStream->codec->gop_size = 10; // maybe only streaming, breaks whatsapp
649
650 if (vcodec_id == AV_CODEC_ID_MPEG2VIDEO) {
651 /* just for testing, we also add B frames */
652 vStream->codec->max_b_frames = 2;
653 }
654 if (vcodec_id == AV_CODEC_ID_MPEG1VIDEO) {
655 /* Needed to avoid using macroblocks in which some coeffs overflow.
656 This does not happen with normal video, it just happens here as
657 the motion of the chroma plane does not match the luma plane. */
658 vStream->codec->mb_decision = 2;
659 }
660
661 fprintf(stderr, "init_screen2 %d x %d %d\n", width, height, argc);
662
663 /* open video codec */
664 if (avcodec_open2(encctx, codec, NULL) < 0) {
665 fprintf(stderr, "Could not open codec\n");
666 avformat_free_context(fmtctx);
667 fmtctx = NULL;
668 return FALSE;
669 }
670
671 // audio
672
673 if (in_sample_rate > 0) {
674 if (!add_stream(&osta, fmtctx, &acodec, acodec_id)) {
675 avformat_free_context(fmtctx);
676 fmtctx = NULL;
677 return FALSE;
678 }
679 osta.codec = acodec;
680 aStream = osta.st;
681 osta.enc = aencctx = aStream->codec;
682
683 #ifdef API_3_1
684 ret = avcodec_parameters_from_context(aStream->codecpar, aencctx);
685 if (ret < 0) {
686 fprintf(stderr, "avcodec_decoder: avparms from context failed\n");
687 avformat_free_context(fmtctx);
688 fmtctx = NULL;
689 return FALSE;
690 }
691 #endif
692
693 out_nchans = 2;
694 out_sample_rate = 44100;
695 maxabitrate = 320000;
696
697 if (argc > 0) {
698 out_nchans = atoi(argv[2]) + 1;
699 switch (atoi(argv[3])) {
700 case 0:
701 out_sample_rate = 22050;
702 break;
703 case 1:
704 out_sample_rate = 44100;
705 break;
706 case 2:
707 out_sample_rate = 48000;
708 break;
709 default:
710 break;
711 }
712 maxabitrate = atoi(argv[4]);
713 }
714 fprintf(stderr, "added audio stream\n");
715 if (!open_audio()) {
716 avformat_free_context(fmtctx);
717 fmtctx = NULL;
718 return FALSE;
719 }
720 }
721
722 av_dump_format(fmtctx, 0, uri, 1);
723
724 // container
725
726 /* open output file */
727 if (!(fmtctx->oformat->flags & AVFMT_NOFILE)) {
728 fprintf(stderr, "opening file %s\n", uri);
729 ret = avio_open(&fmtctx->pb, uri, AVIO_FLAG_WRITE);
730 if (ret < 0) {
731 fprintf(stderr, "Could not open '%s': %s\n", uri,
732 av_err2str(ret));
733 avformat_free_context(fmtctx);
734 fmtctx = NULL;
735 return FALSE;
736 }
737
738 av_dict_set(&fmt_opts, "movflags", "faststart", 0);
739 av_dict_set(&fmt_opts, "movflags", "frag_keyframe", 0);
740 ret = avformat_write_header(fmtctx, &fmt_opts);
741 if (ret < 0) {
742 fprintf(stderr, "Error occurred when writing header: %s\n",
743 av_err2str(ret));
744 avformat_free_context(fmtctx);
745 fmtctx = NULL;
746 return FALSE;
747 }
748 }
749
750 /* create (container) libav video frame */
751 ostv.frame = alloc_picture(avpalette, width, height);
752 if (ostv.frame == NULL) {
753 fprintf(stderr, "Could not allocate video frame\n");
754 avformat_free_context(fmtctx);
755 fmtctx = NULL;
756 return FALSE;
757 }
758
759 ostv.next_pts = osta.next_pts = 0;
760 return TRUE;
761 }
762
763
render_frame(int hsize,int vsize,int64_t tc,void ** pixel_data,void ** rd,void ** pp)764 boolean render_frame(int hsize, int vsize, int64_t tc, void **pixel_data, void **rd, void **pp) {
765 // call the function which was set in set_palette
766 return render_fn(hsize, vsize, pixel_data);
767 }
768
769 /*
770 static void log_packet(const AVPacket *pkt) {
771 AVRational *time_base = &fmtctx->streams[pkt->stream_index]->time_base;
772 printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
773 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
774 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
775 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
776 pkt->stream_index);
777 }
778 */
779
780
write_frame(const AVRational * time_base,AVStream * stream,AVPacket * pkt)781 static int write_frame(const AVRational *time_base, AVStream *stream, AVPacket *pkt) {
782 int ret;
783 /* rescale output packet timestamp values from codec to stream timebase */
784 av_packet_rescale_ts(pkt, *time_base, stream->time_base);
785 pkt->stream_index = stream->index;
786 /* Write the compressed frame to the media file. */
787 //log_packet(pkt);
788 pthread_mutex_lock(&write_mutex);
789 ret = av_interleaved_write_frame(fmtctx, pkt);
790 pthread_mutex_unlock(&write_mutex);
791 return ret;
792 }
793
794
copy_yuv_image(AVFrame * pict,int width,int height,const uint8_t * const * pixel_data)795 static void copy_yuv_image(AVFrame *pict, int width, int height, const uint8_t *const *pixel_data) {
796 int y, ret;
797 int hwidth = width >> 1;
798 int hheight = height >> 1;
799
800 /* when we pass a frame to the encoder, it may keep a reference to it
801 internally;
802 make sure we do not overwrite it here
803 */
804 ret = av_frame_make_writable(pict);
805 if (ret < 0) return;
806
807 /* Y */
808 for (y = 0; y < height; y++)
809 memcpy(&pict->data[0][y * pict->linesize[0]], &pixel_data[0][y * width], width);
810 /* Cb and Cr */
811 for (y = 0; y < hheight; y++) {
812 memcpy(&pict->data[1][y * pict->linesize[1]], &pixel_data[1][y * hwidth], hwidth);
813 memcpy(&pict->data[2][y * pict->linesize[2]], &pixel_data[2][y * hwidth], hwidth);
814 }
815 }
816
817
get_video_frame(const uint8_t * const * pixel_data,int hsize,int vsize)818 static AVFrame *get_video_frame(const uint8_t *const *pixel_data, int hsize, int vsize) {
819 AVCodecContext *c = ostv.enc;
820 static int istrides[3];
821
822 if (ostv.sws_ctx != NULL && (hsize != ohsize || vsize != ovsize)) {
823 sws_freeContext(ostv.sws_ctx);
824 ostv.sws_ctx = NULL;
825 }
826
827 if (hsize != c->width || vsize != c->height || mypalette != avpalette || ostv.sws_ctx == NULL) {
828 if (ostv.sws_ctx == NULL) {
829 ostv.sws_ctx = sws_getContext(hsize, vsize,
830 weed_palette_to_avi_pix_fmt(mypalette, &myclamp),
831 c->width, c->height,
832 avpalette,
833 SCALE_FLAGS, NULL, NULL, NULL);
834 if (ostv.sws_ctx == NULL) {
835 fprintf(stderr,
836 "libav_stream: Could not initialize the conversion context\n");
837 return NULL;
838 }
839 ohsize = hsize;
840 ovsize = vsize;
841 if (mypalette == WEED_PALETTE_YUV420P) {
842 istrides[0] = hsize;
843 istrides[1] = istrides[2] = hsize >> 1;
844 } else {
845 istrides[0] = hsize * 3;
846 }
847 }
848 sws_scale(ostv.sws_ctx,
849 (const uint8_t *const *)pixel_data, istrides,
850 0, vsize, ostv.frame->data, ostv.frame->linesize);
851 } else {
852 copy_yuv_image(ostv.frame, hsize, vsize, pixel_data);
853 }
854
855 ostv.frame->pts = ostv.next_pts++;
856 return ostv.frame;
857 }
858
859
render_audio_frame_float(float ** audio,int nsamps)860 boolean render_audio_frame_float(float **audio, int nsamps) {
861 AVCodecContext *c = osta.enc;
862 AVPacket pkt = { 0 }; // data and size must be 0;
863
864 float *abuff[in_nchans];
865
866 int ret;
867 int got_packet;
868 int nb_samples;
869 int i;
870
871 av_init_packet(&pkt);
872
873 if (audio == NULL || nsamps == 0) {
874 // flush buffers
875 ret = avcodec_encode_audio2(c, &pkt, NULL, &got_packet);
876 if (ret < 0) {
877 fprintf(stderr, "Error 1 encoding audio frame: %s %d %d %d %ld\n", av_err2str(ret), nsamps, c->sample_rate, c->sample_fmt,
878 c->channel_layout);
879 return FALSE;
880 }
881
882 if (got_packet) {
883 ret = write_frame(&c->time_base, aStream, &pkt);
884 if (ret < 0) {
885 fprintf(stderr, "Error while writing audio frame: %s\n",
886 av_err2str(ret));
887 return FALSE;
888 }
889 }
890 return TRUE;
891 }
892
893 for (i = 0; i < in_nchans; i++) {
894 abuff[i] = audio[i];
895 }
896
897 while (nsamps > 0) {
898 if (out_nb_samples != 0) {
899 if (nsamps + spb_len < in_nb_samples) {
900 // have l.t. one full buffer to send, store this for next time
901 for (i = 0; i < in_nchans; i++) {
902 memcpy(&(spill_buffers[i][spb_len]), abuff[i], nsamps * sizeof(float));
903 }
904 spb_len += nsamps;
905 return TRUE;
906 }
907 if (spb_len > 0) {
908 // have data in buffers from last call. fill these up and clear them first
909 for (i = 0; i < in_nchans; i++) {
910 memcpy(&(spill_buffers[i][spb_len]), audio[i], (in_nb_samples - spb_len) * sizeof(float));
911 }
912 }
913 nb_samples = out_nb_samples;
914 } else {
915 // codec accepts variable nb_samples, so encode all
916 in_nb_samples = nsamps;
917 nb_samples = av_rescale_rnd(in_nb_samples,
918 c->sample_rate, in_sample_rate, AV_ROUND_DOWN);
919 osta.frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples);
920 }
921
922 ret = av_frame_make_writable(osta.frame);
923 if (ret < 0) return FALSE;
924
925 ret = swr_convert(osta.swr_ctx,
926 osta.frame->data, nb_samples,
927 spb_len == 0 ? (const uint8_t **)abuff : (const uint8_t **)spill_buffers, in_nb_samples);
928 if (ret < 0) {
929 fprintf(stderr, "Error while converting audio\n");
930 return FALSE;
931 }
932
933 osta.frame->pts = av_rescale_q(osta.samples_count, (AVRational) {
934 1, c->sample_rate
935 }, c->time_base);
936
937 osta.samples_count += nb_samples;
938
939 ret = avcodec_encode_audio2(c, &pkt, osta.frame, &got_packet);
940 if (ret < 0) {
941 fprintf(stderr, "Error 2 encoding audio frame: %s %d %d %d %d %ld\n", av_err2str(ret), nsamps,
942 nb_samples, c->sample_rate, c->sample_fmt,
943 c->channel_layout);
944 return FALSE;
945 }
946
947 if (got_packet) {
948 ret = write_frame(&c->time_base, aStream, &pkt);
949 if (ret < 0) {
950 fprintf(stderr, "Error 2 while writing audio frame: %s\n",
951 av_err2str(ret));
952 return FALSE;
953 }
954 }
955
956 for (i = 0; i < in_nchans; i++) {
957 abuff[i] += in_nb_samples - spb_len;
958 }
959
960 nsamps -= in_nb_samples - spb_len;
961 spb_len = 0;
962
963 if (out_nb_samples == 0) {
964 if (osta.frame != NULL) av_frame_unref(osta.frame);
965 osta.frame = NULL;
966 in_nb_samples = 0;
967 return TRUE;
968 }
969 }
970 return TRUE;
971 }
972
973
render_frame_yuv420(int hsize,int vsize,void ** pixel_data)974 boolean render_frame_yuv420(int hsize, int vsize, void **pixel_data) {
975 AVCodecContext *c;
976 AVPacket pkt = { 0 };
977
978 int got_packet = 0;
979 int ret;
980
981 c = ostv.enc;
982
983 // copy and scale pixel_data
984 if ((ostv.frame = get_video_frame((const uint8_t *const *)pixel_data, hsize, vsize)) != NULL) {
985 av_init_packet(&pkt);
986
987 /* encode the image */
988 ret = avcodec_encode_video2(c, &pkt, ostv.frame, &got_packet);
989
990 if (ret < 0) {
991 fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
992 return FALSE;
993 }
994 if (got_packet) {
995 ret = write_frame(&c->time_base, vStream, &pkt);
996 } else {
997 ret = 0;
998 }
999 if (ret < 0) {
1000 fprintf(stderr, "Error writing video frame: %s\n", av_err2str(ret));
1001 return FALSE;
1002 }
1003 }
1004
1005 return TRUE;
1006 }
1007
1008
render_frame_unknown(int hsize,int vsize,void ** pixel_data)1009 boolean render_frame_unknown(int hsize, int vsize, void **pixel_data) {
1010 if (mypalette == WEED_PALETTE_END) {
1011 fprintf(stderr, "libav_stream plugin error: No palette was set !\n");
1012 }
1013 return FALSE;
1014 }
1015
1016
exit_screen(int16_t mouse_x,int16_t mouse_y)1017 void exit_screen(int16_t mouse_x, int16_t mouse_y) {
1018 AVCodecContext *c;
1019 AVPacket pkt = { 0 };
1020
1021 int got_packet = 0;
1022 int ret;
1023
1024 int i;
1025
1026 if (fmtctx != NULL) {
1027 if (!stream_encode && !(fmtctx->oformat->flags & AVFMT_NOFILE)) {
1028
1029 if (in_sample_rate != 0) {
1030 // flush final audio
1031 c = osta.enc;
1032
1033 do {
1034 av_init_packet(&pkt);
1035
1036 ret = avcodec_encode_audio2(c, &pkt, NULL, &got_packet);
1037 if (ret < 0) {
1038 fprintf(stderr, "Error encoding audio frame: %s %d %d %d %d %ld\n", av_err2str(ret), 0, 0, c->sample_rate, c->sample_fmt,
1039 c->channel_layout);
1040 break;
1041 }
1042
1043 if (got_packet) {
1044 ret = write_frame(&c->time_base, aStream, &pkt);
1045 if (ret < 0) {
1046 fprintf(stderr, "Error while writing audio frame: %s\n",
1047 av_err2str(ret));
1048 break;
1049 }
1050 }
1051 } while (got_packet);
1052 }
1053
1054 // flush final few frames
1055 c = ostv.enc;
1056
1057 do {
1058 av_init_packet(&pkt);
1059
1060 ret = avcodec_encode_video2(c, &pkt, NULL, &got_packet);
1061
1062 if (ret < 0) {
1063 fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
1064 break;
1065 }
1066 if (got_packet) {
1067 ret = write_frame(&c->time_base, vStream, &pkt);
1068 } else {
1069 ret = 0;
1070 }
1071 if (ret < 0) {
1072 break;
1073 }
1074 } while (got_packet);
1075 }
1076
1077 if (!(fmtctx->oformat->flags & AVFMT_NOFILE))
1078 /* Write the trailer, if any. The trailer must be written before you
1079 close the CodecContexts open when you wrote the header; otherwise
1080 av_write_trailer() may try to use memory that was freed on
1081 av_codec_close(). */
1082 av_write_trailer(fmtctx);
1083
1084 /* Close the output file. */
1085 avio_closep(&fmtctx->pb);
1086 }
1087
1088
1089 if (vStream != NULL) {
1090 avcodec_close(vStream->codec);
1091 vStream = NULL;
1092 }
1093
1094 if (aStream != NULL) {
1095 avcodec_close(aStream->codec);
1096 aStream = NULL;
1097 }
1098
1099 if (fmtctx != NULL) {
1100 avformat_free_context(fmtctx);
1101 fmtctx = NULL;
1102 }
1103
1104 if (ostv.frame != NULL) av_frame_unref(ostv.frame);
1105 if (osta.frame != NULL) av_frame_unref(osta.frame);
1106
1107 if (ostv.sws_ctx != NULL) sws_freeContext(ostv.sws_ctx);
1108 if (osta.swr_ctx != NULL) swr_free(&(osta.swr_ctx));
1109
1110 ostv.sws_ctx = NULL;
1111 osta.swr_ctx = NULL;
1112
1113 if (spill_buffers != NULL) {
1114 for (i = 0; i < in_nchans; i++) {
1115 free(spill_buffers[i]);
1116 }
1117 free(spill_buffers);
1118 spill_buffers = NULL;
1119 }
1120
1121 in_sample_rate = 0;
1122 }
1123
1124
module_unload(void)1125 void module_unload(void) {
1126 if (inited)
1127 avformat_network_deinit();
1128 pthread_mutex_destroy(&write_mutex);
1129 inited = 0;
1130 }
1131
1132