1 /*
2 * generic decoding-related code
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <stdint.h>
22 #include <string.h>
23
24 #include "config.h"
25
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwaccel.h"
45 #include "internal.h"
46 #include "thread.h"
47
apply_param_change(AVCodecContext * avctx,const AVPacket * avpkt)48 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
49 {
50 int size = 0, ret;
51 const uint8_t *data;
52 uint32_t flags;
53 int64_t val;
54
55 data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
56 if (!data)
57 return 0;
58
59 if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
60 av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
61 "changes, but PARAM_CHANGE side data was sent to it.\n");
62 ret = AVERROR(EINVAL);
63 goto fail2;
64 }
65
66 if (size < 4)
67 goto fail;
68
69 flags = bytestream_get_le32(&data);
70 size -= 4;
71
72 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
73 if (size < 4)
74 goto fail;
75 val = bytestream_get_le32(&data);
76 if (val <= 0 || val > INT_MAX) {
77 av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
78 ret = AVERROR_INVALIDDATA;
79 goto fail2;
80 }
81 avctx->channels = val;
82 size -= 4;
83 }
84 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
85 if (size < 8)
86 goto fail;
87 avctx->channel_layout = bytestream_get_le64(&data);
88 size -= 8;
89 }
90 if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
91 if (size < 4)
92 goto fail;
93 val = bytestream_get_le32(&data);
94 if (val <= 0 || val > INT_MAX) {
95 av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
96 ret = AVERROR_INVALIDDATA;
97 goto fail2;
98 }
99 avctx->sample_rate = val;
100 size -= 4;
101 }
102 if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
103 if (size < 8)
104 goto fail;
105 avctx->width = bytestream_get_le32(&data);
106 avctx->height = bytestream_get_le32(&data);
107 size -= 8;
108 ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
109 if (ret < 0)
110 goto fail2;
111 }
112
113 return 0;
114 fail:
115 av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
116 ret = AVERROR_INVALIDDATA;
117 fail2:
118 if (ret < 0) {
119 av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
120 if (avctx->err_recognition & AV_EF_EXPLODE)
121 return ret;
122 }
123 return 0;
124 }
125
extract_packet_props(AVCodecInternal * avci,const AVPacket * pkt)126 static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
127 {
128 int ret = 0;
129
130 av_packet_unref(avci->last_pkt_props);
131 if (pkt) {
132 ret = av_packet_copy_props(avci->last_pkt_props, pkt);
133 if (!ret)
134 avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
135 }
136 return ret;
137 }
138
unrefcount_frame(AVCodecInternal * avci,AVFrame * frame)139 static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
140 {
141 int ret;
142
143 /* move the original frame to our backup */
144 av_frame_unref(avci->to_free);
145 av_frame_move_ref(avci->to_free, frame);
146
147 /* now copy everything except the AVBufferRefs back
148 * note that we make a COPY of the side data, so calling av_frame_free() on
149 * the caller's frame will work properly */
150 ret = av_frame_copy_props(frame, avci->to_free);
151 if (ret < 0)
152 return ret;
153
154 memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
155 memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
156 if (avci->to_free->extended_data != avci->to_free->data) {
157 int planes = avci->to_free->channels;
158 int size = planes * sizeof(*frame->extended_data);
159
160 if (!size) {
161 av_frame_unref(frame);
162 return AVERROR_BUG;
163 }
164
165 frame->extended_data = av_malloc(size);
166 if (!frame->extended_data) {
167 av_frame_unref(frame);
168 return AVERROR(ENOMEM);
169 }
170 memcpy(frame->extended_data, avci->to_free->extended_data,
171 size);
172 } else
173 frame->extended_data = frame->data;
174
175 frame->format = avci->to_free->format;
176 frame->width = avci->to_free->width;
177 frame->height = avci->to_free->height;
178 frame->channel_layout = avci->to_free->channel_layout;
179 frame->nb_samples = avci->to_free->nb_samples;
180 frame->channels = avci->to_free->channels;
181
182 return 0;
183 }
184
ff_decode_bsfs_init(AVCodecContext * avctx)185 int ff_decode_bsfs_init(AVCodecContext *avctx)
186 {
187 AVCodecInternal *avci = avctx->internal;
188 DecodeFilterContext *s = &avci->filter;
189 const char *bsfs_str;
190 int ret;
191
192 if (s->nb_bsfs)
193 return 0;
194
195 bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
196 while (bsfs_str && *bsfs_str) {
197 AVBSFContext **tmp;
198 const AVBitStreamFilter *filter;
199 char *bsf, *bsf_options_str, *bsf_name;
200
201 bsf = av_get_token(&bsfs_str, ",");
202 if (!bsf) {
203 ret = AVERROR(ENOMEM);
204 goto fail;
205 }
206 bsf_name = av_strtok(bsf, "=", &bsf_options_str);
207 if (!bsf_name) {
208 av_freep(&bsf);
209 ret = AVERROR(ENOMEM);
210 goto fail;
211 }
212
213 filter = av_bsf_get_by_name(bsf_name);
214 if (!filter) {
215 av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
216 "requested by a decoder. This is a bug, please report it.\n",
217 bsf_name);
218 av_freep(&bsf);
219 ret = AVERROR_BUG;
220 goto fail;
221 }
222
223 tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
224 if (!tmp) {
225 av_freep(&bsf);
226 ret = AVERROR(ENOMEM);
227 goto fail;
228 }
229 s->bsfs = tmp;
230 s->nb_bsfs++;
231
232 ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
233 if (ret < 0) {
234 av_freep(&bsf);
235 goto fail;
236 }
237
238 if (s->nb_bsfs == 1) {
239 /* We do not currently have an API for passing the input timebase into decoders,
240 * but no filters used here should actually need it.
241 * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
242 s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
243 ret = avcodec_parameters_from_context(s->bsfs[s->nb_bsfs - 1]->par_in,
244 avctx);
245 } else {
246 s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
247 ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
248 s->bsfs[s->nb_bsfs - 2]->par_out);
249 }
250 if (ret < 0) {
251 av_freep(&bsf);
252 goto fail;
253 }
254
255 if (bsf_options_str && filter->priv_class) {
256 const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL);
257 const char * shorthand[2] = {NULL};
258
259 if (opt)
260 shorthand[0] = opt->name;
261
262 ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":");
263 if (ret < 0) {
264 if (ret != AVERROR(ENOMEM)) {
265 av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s "
266 "requested by the decoder. This is a bug, please report it.\n",
267 bsf_name);
268 ret = AVERROR_BUG;
269 }
270 av_freep(&bsf);
271 goto fail;
272 }
273 }
274 av_freep(&bsf);
275
276 ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
277 if (ret < 0)
278 goto fail;
279
280 if (*bsfs_str)
281 bsfs_str++;
282 }
283
284 return 0;
285 fail:
286 ff_decode_bsfs_uninit(avctx);
287 return ret;
288 }
289
290 /* try to get one output packet from the filter chain */
bsfs_poll(AVCodecContext * avctx,AVPacket * pkt)291 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
292 {
293 DecodeFilterContext *s = &avctx->internal->filter;
294 int idx, ret;
295
296 /* start with the last filter in the chain */
297 idx = s->nb_bsfs - 1;
298 while (idx >= 0) {
299 /* request a packet from the currently selected filter */
300 ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
301 if (ret == AVERROR(EAGAIN)) {
302 /* no packets available, try the next filter up the chain */
303 ret = 0;
304 idx--;
305 continue;
306 } else if (ret < 0 && ret != AVERROR_EOF) {
307 return ret;
308 }
309
310 /* got a packet or EOF -- pass it to the caller or to the next filter
311 * down the chain */
312 if (idx == s->nb_bsfs - 1) {
313 return ret;
314 } else {
315 idx++;
316 ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
317 if (ret < 0) {
318 av_log(avctx, AV_LOG_ERROR,
319 "Error pre-processing a packet before decoding\n");
320 av_packet_unref(pkt);
321 return ret;
322 }
323 }
324 }
325
326 return AVERROR(EAGAIN);
327 }
328
ff_decode_get_packet(AVCodecContext * avctx,AVPacket * pkt)329 int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
330 {
331 AVCodecInternal *avci = avctx->internal;
332 int ret;
333
334 if (avci->draining)
335 return AVERROR_EOF;
336
337 ret = bsfs_poll(avctx, pkt);
338 if (ret == AVERROR_EOF)
339 avci->draining = 1;
340 if (ret < 0)
341 return ret;
342
343 ret = extract_packet_props(avctx->internal, pkt);
344 if (ret < 0)
345 goto finish;
346
347 ret = apply_param_change(avctx, pkt);
348 if (ret < 0)
349 goto finish;
350
351 if (avctx->codec->receive_frame)
352 avci->compat_decode_consumed += pkt->size;
353
354 return 0;
355 finish:
356 av_packet_unref(pkt);
357 return ret;
358 }
359
360 /**
361 * Attempt to guess proper monotonic timestamps for decoded video frames
362 * which might have incorrect times. Input timestamps may wrap around, in
363 * which case the output will as well.
364 *
365 * @param pts the pts field of the decoded AVPacket, as passed through
366 * AVFrame.pts
367 * @param dts the dts field of the decoded AVPacket
368 * @return one of the input values, may be AV_NOPTS_VALUE
369 */
guess_correct_pts(AVCodecContext * ctx,int64_t reordered_pts,int64_t dts)370 static int64_t guess_correct_pts(AVCodecContext *ctx,
371 int64_t reordered_pts, int64_t dts)
372 {
373 int64_t pts = AV_NOPTS_VALUE;
374
375 if (dts != AV_NOPTS_VALUE) {
376 ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
377 ctx->pts_correction_last_dts = dts;
378 } else if (reordered_pts != AV_NOPTS_VALUE)
379 ctx->pts_correction_last_dts = reordered_pts;
380
381 if (reordered_pts != AV_NOPTS_VALUE) {
382 ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
383 ctx->pts_correction_last_pts = reordered_pts;
384 } else if(dts != AV_NOPTS_VALUE)
385 ctx->pts_correction_last_pts = dts;
386
387 if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
388 && reordered_pts != AV_NOPTS_VALUE)
389 pts = reordered_pts;
390 else
391 pts = dts;
392
393 return pts;
394 }
395
396 /*
397 * The core of the receive_frame_wrapper for the decoders implementing
398 * the simple API. Certain decoders might consume partial packets without
399 * returning any output, so this function needs to be called in a loop until it
400 * returns EAGAIN.
401 **/
decode_simple_internal(AVCodecContext * avctx,AVFrame * frame)402 static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
403 {
404 AVCodecInternal *avci = avctx->internal;
405 DecodeSimpleContext *ds = &avci->ds;
406 AVPacket *pkt = ds->in_pkt;
407 // copy to ensure we do not change pkt
408 int got_frame, actual_got_frame;
409 int ret;
410
411 if (!pkt->data && !avci->draining) {
412 av_packet_unref(pkt);
413 ret = ff_decode_get_packet(avctx, pkt);
414 if (ret < 0 && ret != AVERROR_EOF)
415 return ret;
416 }
417
418 // Some codecs (at least wma lossless) will crash when feeding drain packets
419 // after EOF was signaled.
420 if (avci->draining_done)
421 return AVERROR_EOF;
422
423 if (!pkt->data &&
424 !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
425 avctx->active_thread_type & FF_THREAD_FRAME))
426 return AVERROR_EOF;
427
428 got_frame = 0;
429
430 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
431 ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
432 } else {
433 ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
434
435 if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
436 frame->pkt_dts = pkt->dts;
437 if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
438 if(!avctx->has_b_frames)
439 frame->pkt_pos = pkt->pos;
440 //FIXME these should be under if(!avctx->has_b_frames)
441 /* get_buffer is supposed to set frame parameters */
442 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
443 if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
444 if (!frame->width) frame->width = avctx->width;
445 if (!frame->height) frame->height = avctx->height;
446 if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
447 }
448 }
449 }
450 emms_c();
451 actual_got_frame = got_frame;
452
453 if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
454 if (frame->flags & AV_FRAME_FLAG_DISCARD)
455 got_frame = 0;
456 if (got_frame)
457 frame->best_effort_timestamp = guess_correct_pts(avctx,
458 frame->pts,
459 frame->pkt_dts);
460 } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
461 uint8_t *side;
462 int side_size;
463 uint32_t discard_padding = 0;
464 uint8_t skip_reason = 0;
465 uint8_t discard_reason = 0;
466
467 if (ret >= 0 && got_frame) {
468 frame->best_effort_timestamp = guess_correct_pts(avctx,
469 frame->pts,
470 frame->pkt_dts);
471 if (frame->format == AV_SAMPLE_FMT_NONE)
472 frame->format = avctx->sample_fmt;
473 if (!frame->channel_layout)
474 frame->channel_layout = avctx->channel_layout;
475 if (!frame->channels)
476 frame->channels = avctx->channels;
477 if (!frame->sample_rate)
478 frame->sample_rate = avctx->sample_rate;
479 }
480
481 side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
482 if(side && side_size>=10) {
483 avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
484 discard_padding = AV_RL32(side + 4);
485 av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
486 avctx->internal->skip_samples, (int)discard_padding);
487 skip_reason = AV_RL8(side + 8);
488 discard_reason = AV_RL8(side + 9);
489 }
490
491 if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
492 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
493 avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
494 got_frame = 0;
495 }
496
497 if (avctx->internal->skip_samples > 0 && got_frame &&
498 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
499 if(frame->nb_samples <= avctx->internal->skip_samples){
500 got_frame = 0;
501 avctx->internal->skip_samples -= frame->nb_samples;
502 av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
503 avctx->internal->skip_samples);
504 } else {
505 av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
506 frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
507 if(avctx->pkt_timebase.num && avctx->sample_rate) {
508 int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
509 (AVRational){1, avctx->sample_rate},
510 avctx->pkt_timebase);
511 if(frame->pts!=AV_NOPTS_VALUE)
512 frame->pts += diff_ts;
513 #if FF_API_PKT_PTS
514 FF_DISABLE_DEPRECATION_WARNINGS
515 if(frame->pkt_pts!=AV_NOPTS_VALUE)
516 frame->pkt_pts += diff_ts;
517 FF_ENABLE_DEPRECATION_WARNINGS
518 #endif
519 if(frame->pkt_dts!=AV_NOPTS_VALUE)
520 frame->pkt_dts += diff_ts;
521 if (frame->pkt_duration >= diff_ts)
522 frame->pkt_duration -= diff_ts;
523 } else {
524 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
525 }
526 av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
527 avctx->internal->skip_samples, frame->nb_samples);
528 frame->nb_samples -= avctx->internal->skip_samples;
529 avctx->internal->skip_samples = 0;
530 }
531 }
532
533 if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
534 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
535 if (discard_padding == frame->nb_samples) {
536 got_frame = 0;
537 } else {
538 if(avctx->pkt_timebase.num && avctx->sample_rate) {
539 int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
540 (AVRational){1, avctx->sample_rate},
541 avctx->pkt_timebase);
542 frame->pkt_duration = diff_ts;
543 } else {
544 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
545 }
546 av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
547 (int)discard_padding, frame->nb_samples);
548 frame->nb_samples -= discard_padding;
549 }
550 }
551
552 if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
553 AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
554 if (fside) {
555 AV_WL32(fside->data, avctx->internal->skip_samples);
556 AV_WL32(fside->data + 4, discard_padding);
557 AV_WL8(fside->data + 8, skip_reason);
558 AV_WL8(fside->data + 9, discard_reason);
559 avctx->internal->skip_samples = 0;
560 }
561 }
562 }
563
564 if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
565 !avci->showed_multi_packet_warning &&
566 ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
567 av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
568 avci->showed_multi_packet_warning = 1;
569 }
570
571 if (!got_frame)
572 av_frame_unref(frame);
573
574 if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
575 ret = pkt->size;
576
577 #if FF_API_AVCTX_TIMEBASE
578 if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
579 avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
580 #endif
581
582 /* do not stop draining when actual_got_frame != 0 or ret < 0 */
583 /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
584 if (avctx->internal->draining && !actual_got_frame) {
585 if (ret < 0) {
586 /* prevent infinite loop if a decoder wrongly always return error on draining */
587 /* reasonable nb_errors_max = maximum b frames + thread count */
588 int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
589 avctx->thread_count : 1);
590
591 if (avci->nb_draining_errors++ >= nb_errors_max) {
592 av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
593 "Stop draining and force EOF.\n");
594 avci->draining_done = 1;
595 ret = AVERROR_BUG;
596 }
597 } else {
598 avci->draining_done = 1;
599 }
600 }
601
602 avci->compat_decode_consumed += ret;
603
604 if (ret >= pkt->size || ret < 0) {
605 av_packet_unref(pkt);
606 } else {
607 int consumed = ret;
608
609 pkt->data += consumed;
610 pkt->size -= consumed;
611 avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
612 pkt->pts = AV_NOPTS_VALUE;
613 pkt->dts = AV_NOPTS_VALUE;
614 avci->last_pkt_props->pts = AV_NOPTS_VALUE;
615 avci->last_pkt_props->dts = AV_NOPTS_VALUE;
616 }
617
618 if (got_frame)
619 av_assert0(frame->buf[0]);
620
621 return ret < 0 ? ret : 0;
622 }
623
decode_simple_receive_frame(AVCodecContext * avctx,AVFrame * frame)624 static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
625 {
626 int ret;
627
628 while (!frame->buf[0]) {
629 ret = decode_simple_internal(avctx, frame);
630 if (ret < 0)
631 return ret;
632 }
633
634 return 0;
635 }
636
decode_receive_frame_internal(AVCodecContext * avctx,AVFrame * frame)637 static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
638 {
639 AVCodecInternal *avci = avctx->internal;
640 int ret;
641
642 av_assert0(!frame->buf[0]);
643
644 if (avctx->codec->receive_frame)
645 ret = avctx->codec->receive_frame(avctx, frame);
646 else
647 ret = decode_simple_receive_frame(avctx, frame);
648
649 if (ret == AVERROR_EOF)
650 avci->draining_done = 1;
651
652 if (!ret) {
653 /* the only case where decode data is not set should be decoders
654 * that do not call ff_get_buffer() */
655 av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
656 !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
657
658 if (frame->private_ref) {
659 FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
660
661 if (fdd->post_process) {
662 ret = fdd->post_process(avctx, frame);
663 if (ret < 0) {
664 av_frame_unref(frame);
665 return ret;
666 }
667 }
668 }
669 }
670
671 /* free the per-frame decode data */
672 av_buffer_unref(&frame->private_ref);
673
674 return ret;
675 }
676
avcodec_send_packet(AVCodecContext * avctx,const AVPacket * avpkt)677 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
678 {
679 AVCodecInternal *avci = avctx->internal;
680 int ret;
681
682 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
683 return AVERROR(EINVAL);
684
685 if (avctx->internal->draining)
686 return AVERROR_EOF;
687
688 if (avpkt && !avpkt->size && avpkt->data)
689 return AVERROR(EINVAL);
690
691 av_packet_unref(avci->buffer_pkt);
692 if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
693 ret = av_packet_ref(avci->buffer_pkt, avpkt);
694 if (ret < 0)
695 return ret;
696 }
697
698 ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
699 if (ret < 0) {
700 av_packet_unref(avci->buffer_pkt);
701 return ret;
702 }
703
704 if (!avci->buffer_frame->buf[0]) {
705 ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
706 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
707 return ret;
708 }
709
710 return 0;
711 }
712
apply_cropping(AVCodecContext * avctx,AVFrame * frame)713 static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
714 {
715 /* make sure we are noisy about decoders returning invalid cropping data */
716 if (frame->crop_left >= INT_MAX - frame->crop_right ||
717 frame->crop_top >= INT_MAX - frame->crop_bottom ||
718 (frame->crop_left + frame->crop_right) >= frame->width ||
719 (frame->crop_top + frame->crop_bottom) >= frame->height) {
720 av_log(avctx, AV_LOG_WARNING,
721 "Invalid cropping information set by a decoder: "
722 "%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER" "
723 "(frame size %dx%d). This is a bug, please report it\n",
724 frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
725 frame->width, frame->height);
726 frame->crop_left = 0;
727 frame->crop_right = 0;
728 frame->crop_top = 0;
729 frame->crop_bottom = 0;
730 return 0;
731 }
732
733 if (!avctx->apply_cropping)
734 return 0;
735
736 return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
737 AV_FRAME_CROP_UNALIGNED : 0);
738 }
739
avcodec_receive_frame(AVCodecContext * avctx,AVFrame * frame)740 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
741 {
742 AVCodecInternal *avci = avctx->internal;
743 int ret, changed;
744
745 av_frame_unref(frame);
746
747 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
748 return AVERROR(EINVAL);
749
750 if (avci->buffer_frame->buf[0]) {
751 av_frame_move_ref(frame, avci->buffer_frame);
752 } else {
753 ret = decode_receive_frame_internal(avctx, frame);
754 if (ret < 0)
755 return ret;
756 }
757
758 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
759 ret = apply_cropping(avctx, frame);
760 if (ret < 0) {
761 av_frame_unref(frame);
762 return ret;
763 }
764 }
765
766 avctx->frame_number++;
767
768 if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
769
770 if (avctx->frame_number == 1) {
771 avci->initial_format = frame->format;
772 switch(avctx->codec_type) {
773 case AVMEDIA_TYPE_VIDEO:
774 avci->initial_width = frame->width;
775 avci->initial_height = frame->height;
776 break;
777 case AVMEDIA_TYPE_AUDIO:
778 avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
779 avctx->sample_rate;
780 avci->initial_channels = frame->channels;
781 avci->initial_channel_layout = frame->channel_layout;
782 break;
783 }
784 }
785
786 if (avctx->frame_number > 1) {
787 changed = avci->initial_format != frame->format;
788
789 switch(avctx->codec_type) {
790 case AVMEDIA_TYPE_VIDEO:
791 changed |= avci->initial_width != frame->width ||
792 avci->initial_height != frame->height;
793 break;
794 case AVMEDIA_TYPE_AUDIO:
795 changed |= avci->initial_sample_rate != frame->sample_rate ||
796 avci->initial_sample_rate != avctx->sample_rate ||
797 avci->initial_channels != frame->channels ||
798 avci->initial_channel_layout != frame->channel_layout;
799 break;
800 }
801
802 if (changed) {
803 avci->changed_frames_dropped++;
804 av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
805 " drop count: %d \n",
806 avctx->frame_number, frame->pts,
807 avci->changed_frames_dropped);
808 av_frame_unref(frame);
809 return AVERROR_INPUT_CHANGED;
810 }
811 }
812 }
813 return 0;
814 }
815
compat_decode(AVCodecContext * avctx,AVFrame * frame,int * got_frame,const AVPacket * pkt)816 static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
817 int *got_frame, const AVPacket *pkt)
818 {
819 AVCodecInternal *avci = avctx->internal;
820 int ret = 0;
821
822 av_assert0(avci->compat_decode_consumed == 0);
823
824 if (avci->draining_done && pkt && pkt->size != 0) {
825 av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
826 avcodec_flush_buffers(avctx);
827 }
828
829 *got_frame = 0;
830 avci->compat_decode = 1;
831
832 if (avci->compat_decode_partial_size > 0 &&
833 avci->compat_decode_partial_size != pkt->size) {
834 av_log(avctx, AV_LOG_ERROR,
835 "Got unexpected packet size after a partial decode\n");
836 ret = AVERROR(EINVAL);
837 goto finish;
838 }
839
840 if (!avci->compat_decode_partial_size) {
841 ret = avcodec_send_packet(avctx, pkt);
842 if (ret == AVERROR_EOF)
843 ret = 0;
844 else if (ret == AVERROR(EAGAIN)) {
845 /* we fully drain all the output in each decode call, so this should not
846 * ever happen */
847 ret = AVERROR_BUG;
848 goto finish;
849 } else if (ret < 0)
850 goto finish;
851 }
852
853 while (ret >= 0) {
854 ret = avcodec_receive_frame(avctx, frame);
855 if (ret < 0) {
856 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
857 ret = 0;
858 goto finish;
859 }
860
861 if (frame != avci->compat_decode_frame) {
862 if (!avctx->refcounted_frames) {
863 ret = unrefcount_frame(avci, frame);
864 if (ret < 0)
865 goto finish;
866 }
867
868 *got_frame = 1;
869 frame = avci->compat_decode_frame;
870 } else {
871 if (!avci->compat_decode_warned) {
872 av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
873 "API cannot return all the frames for this decoder. "
874 "Some frames will be dropped. Update your code to the "
875 "new decoding API to fix this.\n");
876 avci->compat_decode_warned = 1;
877 }
878 }
879
880 if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
881 break;
882 }
883
884 finish:
885 if (ret == 0) {
886 /* if there are any bsfs then assume full packet is always consumed */
887 if (avctx->codec->bsfs)
888 ret = pkt->size;
889 else
890 ret = FFMIN(avci->compat_decode_consumed, pkt->size);
891 }
892 avci->compat_decode_consumed = 0;
893 avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
894
895 return ret;
896 }
897
avcodec_decode_video2(AVCodecContext * avctx,AVFrame * picture,int * got_picture_ptr,const AVPacket * avpkt)898 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
899 int *got_picture_ptr,
900 const AVPacket *avpkt)
901 {
902 return compat_decode(avctx, picture, got_picture_ptr, avpkt);
903 }
904
avcodec_decode_audio4(AVCodecContext * avctx,AVFrame * frame,int * got_frame_ptr,const AVPacket * avpkt)905 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
906 AVFrame *frame,
907 int *got_frame_ptr,
908 const AVPacket *avpkt)
909 {
910 return compat_decode(avctx, frame, got_frame_ptr, avpkt);
911 }
912
get_subtitle_defaults(AVSubtitle * sub)913 static void get_subtitle_defaults(AVSubtitle *sub)
914 {
915 memset(sub, 0, sizeof(*sub));
916 sub->pts = AV_NOPTS_VALUE;
917 }
918
919 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
recode_subtitle(AVCodecContext * avctx,AVPacket * outpkt,const AVPacket * inpkt)920 static int recode_subtitle(AVCodecContext *avctx,
921 AVPacket *outpkt, const AVPacket *inpkt)
922 {
923 #if CONFIG_ICONV
924 iconv_t cd = (iconv_t)-1;
925 int ret = 0;
926 char *inb, *outb;
927 size_t inl, outl;
928 AVPacket tmp;
929 #endif
930
931 if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
932 return 0;
933
934 #if CONFIG_ICONV
935 cd = iconv_open("UTF-8", avctx->sub_charenc);
936 av_assert0(cd != (iconv_t)-1);
937
938 inb = inpkt->data;
939 inl = inpkt->size;
940
941 if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
942 av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
943 ret = AVERROR(ENOMEM);
944 goto end;
945 }
946
947 ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
948 if (ret < 0)
949 goto end;
950 outpkt->buf = tmp.buf;
951 outpkt->data = tmp.data;
952 outpkt->size = tmp.size;
953 outb = outpkt->data;
954 outl = outpkt->size;
955
956 if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
957 iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
958 outl >= outpkt->size || inl != 0) {
959 ret = FFMIN(AVERROR(errno), -1);
960 av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
961 "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
962 av_packet_unref(&tmp);
963 goto end;
964 }
965 outpkt->size -= outl;
966 memset(outpkt->data + outpkt->size, 0, outl);
967
968 end:
969 if (cd != (iconv_t)-1)
970 iconv_close(cd);
971 return ret;
972 #else
973 av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
974 return AVERROR(EINVAL);
975 #endif
976 }
977
utf8_check(const uint8_t * str)978 static int utf8_check(const uint8_t *str)
979 {
980 const uint8_t *byte;
981 uint32_t codepoint, min;
982
983 while (*str) {
984 byte = str;
985 GET_UTF8(codepoint, *(byte++), return 0;);
986 min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
987 1 << (5 * (byte - str) - 4);
988 if (codepoint < min || codepoint >= 0x110000 ||
989 codepoint == 0xFFFE /* BOM */ ||
990 codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
991 return 0;
992 str = byte;
993 }
994 return 1;
995 }
996
997 #if FF_API_ASS_TIMING
insert_ts(AVBPrint * buf,int ts)998 static void insert_ts(AVBPrint *buf, int ts)
999 {
1000 if (ts == -1) {
1001 av_bprintf(buf, "9:59:59.99,");
1002 } else {
1003 int h, m, s;
1004
1005 h = ts/360000; ts -= 360000*h;
1006 m = ts/ 6000; ts -= 6000*m;
1007 s = ts/ 100; ts -= 100*s;
1008 av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
1009 }
1010 }
1011
convert_sub_to_old_ass_form(AVSubtitle * sub,const AVPacket * pkt,AVRational tb)1012 static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
1013 {
1014 int i;
1015 AVBPrint buf;
1016
1017 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
1018
1019 for (i = 0; i < sub->num_rects; i++) {
1020 char *final_dialog;
1021 const char *dialog;
1022 AVSubtitleRect *rect = sub->rects[i];
1023 int ts_start, ts_duration = -1;
1024 long int layer;
1025
1026 if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
1027 continue;
1028
1029 av_bprint_clear(&buf);
1030
1031 /* skip ReadOrder */
1032 dialog = strchr(rect->ass, ',');
1033 if (!dialog)
1034 continue;
1035 dialog++;
1036
1037 /* extract Layer or Marked */
1038 layer = strtol(dialog, (char**)&dialog, 10);
1039 if (*dialog != ',')
1040 continue;
1041 dialog++;
1042
1043 /* rescale timing to ASS time base (ms) */
1044 ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
1045 if (pkt->duration != -1)
1046 ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
1047 sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
1048
1049 /* construct ASS (standalone file form with timestamps) string */
1050 av_bprintf(&buf, "Dialogue: %ld,", layer);
1051 insert_ts(&buf, ts_start);
1052 insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1053 av_bprintf(&buf, "%s\r\n", dialog);
1054
1055 final_dialog = av_strdup(buf.str);
1056 if (!av_bprint_is_complete(&buf) || !final_dialog) {
1057 av_freep(&final_dialog);
1058 av_bprint_finalize(&buf, NULL);
1059 return AVERROR(ENOMEM);
1060 }
1061 av_freep(&rect->ass);
1062 rect->ass = final_dialog;
1063 }
1064
1065 av_bprint_finalize(&buf, NULL);
1066 return 0;
1067 }
1068 #endif
1069
avcodec_decode_subtitle2(AVCodecContext * avctx,AVSubtitle * sub,int * got_sub_ptr,AVPacket * avpkt)1070 int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
1071 int *got_sub_ptr,
1072 AVPacket *avpkt)
1073 {
1074 int i, ret = 0;
1075
1076 if (!avpkt->data && avpkt->size) {
1077 av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1078 return AVERROR(EINVAL);
1079 }
1080 if (!avctx->codec)
1081 return AVERROR(EINVAL);
1082 if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1083 av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1084 return AVERROR(EINVAL);
1085 }
1086
1087 *got_sub_ptr = 0;
1088 get_subtitle_defaults(sub);
1089
1090 if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1091 AVPacket pkt_recoded = *avpkt;
1092
1093 ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1094 if (ret < 0) {
1095 *got_sub_ptr = 0;
1096 } else {
1097 ret = extract_packet_props(avctx->internal, &pkt_recoded);
1098 if (ret < 0)
1099 return ret;
1100
1101 if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1102 sub->pts = av_rescale_q(avpkt->pts,
1103 avctx->pkt_timebase, AV_TIME_BASE_Q);
1104 ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1105 av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1106 !!*got_sub_ptr >= !!sub->num_rects);
1107
1108 #if FF_API_ASS_TIMING
1109 if (avctx->sub_text_format == FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
1110 && *got_sub_ptr && sub->num_rects) {
1111 const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1112 : avctx->time_base;
1113 int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1114 if (err < 0)
1115 ret = err;
1116 }
1117 #endif
1118
1119 if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1120 avctx->pkt_timebase.num) {
1121 AVRational ms = { 1, 1000 };
1122 sub->end_display_time = av_rescale_q(avpkt->duration,
1123 avctx->pkt_timebase, ms);
1124 }
1125
1126 if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
1127 sub->format = 0;
1128 else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1129 sub->format = 1;
1130
1131 for (i = 0; i < sub->num_rects; i++) {
1132 if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
1133 sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1134 av_log(avctx, AV_LOG_ERROR,
1135 "Invalid UTF-8 in decoded subtitles text; "
1136 "maybe missing -sub_charenc option\n");
1137 avsubtitle_free(sub);
1138 ret = AVERROR_INVALIDDATA;
1139 break;
1140 }
1141 }
1142
1143 if (avpkt->data != pkt_recoded.data) { // did we recode?
1144 /* prevent from destroying side data from original packet */
1145 pkt_recoded.side_data = NULL;
1146 pkt_recoded.side_data_elems = 0;
1147
1148 av_packet_unref(&pkt_recoded);
1149 }
1150 }
1151
1152 if (*got_sub_ptr)
1153 avctx->frame_number++;
1154 }
1155
1156 return ret;
1157 }
1158
avcodec_default_get_format(struct AVCodecContext * avctx,const enum AVPixelFormat * fmt)1159 enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
1160 const enum AVPixelFormat *fmt)
1161 {
1162 const AVPixFmtDescriptor *desc;
1163 const AVCodecHWConfig *config;
1164 int i, n;
1165
1166 // If a device was supplied when the codec was opened, assume that the
1167 // user wants to use it.
1168 if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1169 AVHWDeviceContext *device_ctx =
1170 (AVHWDeviceContext*)avctx->hw_device_ctx->data;
1171 for (i = 0;; i++) {
1172 config = &avctx->codec->hw_configs[i]->public;
1173 if (!config)
1174 break;
1175 if (!(config->methods &
1176 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
1177 continue;
1178 if (device_ctx->type != config->device_type)
1179 continue;
1180 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1181 if (config->pix_fmt == fmt[n])
1182 return fmt[n];
1183 }
1184 }
1185 }
1186 // No device or other setup, so we have to choose from things which
1187 // don't any other external information.
1188
1189 // If the last element of the list is a software format, choose it
1190 // (this should be best software format if any exist).
1191 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1192 desc = av_pix_fmt_desc_get(fmt[n - 1]);
1193 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1194 return fmt[n - 1];
1195
1196 // Finally, traverse the list in order and choose the first entry
1197 // with no external dependencies (if there is no hardware configuration
1198 // information available then this just picks the first entry).
1199 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1200 for (i = 0;; i++) {
1201 config = avcodec_get_hw_config(avctx->codec, i);
1202 if (!config)
1203 break;
1204 if (config->pix_fmt == fmt[n])
1205 break;
1206 }
1207 if (!config) {
1208 // No specific config available, so the decoder must be able
1209 // to handle this format without any additional setup.
1210 return fmt[n];
1211 }
1212 if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
1213 // Usable with only internal setup.
1214 return fmt[n];
1215 }
1216 }
1217
1218 // Nothing is usable, give up.
1219 return AV_PIX_FMT_NONE;
1220 }
1221
ff_decode_get_hw_frames_ctx(AVCodecContext * avctx,enum AVHWDeviceType dev_type)1222 int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
1223 enum AVHWDeviceType dev_type)
1224 {
1225 AVHWDeviceContext *device_ctx;
1226 AVHWFramesContext *frames_ctx;
1227 int ret;
1228
1229 if (!avctx->hwaccel)
1230 return AVERROR(ENOSYS);
1231
1232 if (avctx->hw_frames_ctx)
1233 return 0;
1234 if (!avctx->hw_device_ctx) {
1235 av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1236 "required for hardware accelerated decoding.\n");
1237 return AVERROR(EINVAL);
1238 }
1239
1240 device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1241 if (device_ctx->type != dev_type) {
1242 av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1243 "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1244 av_hwdevice_get_type_name(device_ctx->type));
1245 return AVERROR(EINVAL);
1246 }
1247
1248 ret = avcodec_get_hw_frames_parameters(avctx,
1249 avctx->hw_device_ctx,
1250 avctx->hwaccel->pix_fmt,
1251 &avctx->hw_frames_ctx);
1252 if (ret < 0)
1253 return ret;
1254
1255 frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1256
1257
1258 if (frames_ctx->initial_pool_size) {
1259 // We guarantee 4 base work surfaces. The function above guarantees 1
1260 // (the absolute minimum), so add the missing count.
1261 frames_ctx->initial_pool_size += 3;
1262 }
1263
1264 ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1265 if (ret < 0) {
1266 av_buffer_unref(&avctx->hw_frames_ctx);
1267 return ret;
1268 }
1269
1270 return 0;
1271 }
1272
avcodec_get_hw_frames_parameters(AVCodecContext * avctx,AVBufferRef * device_ref,enum AVPixelFormat hw_pix_fmt,AVBufferRef ** out_frames_ref)1273 int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
1274 AVBufferRef *device_ref,
1275 enum AVPixelFormat hw_pix_fmt,
1276 AVBufferRef **out_frames_ref)
1277 {
1278 AVBufferRef *frames_ref = NULL;
1279 const AVCodecHWConfigInternal *hw_config;
1280 const AVHWAccel *hwa;
1281 int i, ret;
1282
1283 for (i = 0;; i++) {
1284 hw_config = avctx->codec->hw_configs[i];
1285 if (!hw_config)
1286 return AVERROR(ENOENT);
1287 if (hw_config->public.pix_fmt == hw_pix_fmt)
1288 break;
1289 }
1290
1291 hwa = hw_config->hwaccel;
1292 if (!hwa || !hwa->frame_params)
1293 return AVERROR(ENOENT);
1294
1295 frames_ref = av_hwframe_ctx_alloc(device_ref);
1296 if (!frames_ref)
1297 return AVERROR(ENOMEM);
1298
1299 ret = hwa->frame_params(avctx, frames_ref);
1300 if (ret >= 0) {
1301 AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1302
1303 if (frames_ctx->initial_pool_size) {
1304 // If the user has requested that extra output surfaces be
1305 // available then add them here.
1306 if (avctx->extra_hw_frames > 0)
1307 frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1308
1309 // If frame threading is enabled then an extra surface per thread
1310 // is also required.
1311 if (avctx->active_thread_type & FF_THREAD_FRAME)
1312 frames_ctx->initial_pool_size += avctx->thread_count;
1313 }
1314
1315 *out_frames_ref = frames_ref;
1316 } else {
1317 av_buffer_unref(&frames_ref);
1318 }
1319 return ret;
1320 }
1321
hwaccel_init(AVCodecContext * avctx,const AVCodecHWConfigInternal * hw_config)1322 static int hwaccel_init(AVCodecContext *avctx,
1323 const AVCodecHWConfigInternal *hw_config)
1324 {
1325 const AVHWAccel *hwaccel;
1326 int err;
1327
1328 hwaccel = hw_config->hwaccel;
1329 if (hwaccel->capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
1330 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1331 av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1332 hwaccel->name);
1333 return AVERROR_PATCHWELCOME;
1334 }
1335
1336 if (hwaccel->priv_data_size) {
1337 avctx->internal->hwaccel_priv_data =
1338 av_mallocz(hwaccel->priv_data_size);
1339 if (!avctx->internal->hwaccel_priv_data)
1340 return AVERROR(ENOMEM);
1341 }
1342
1343 avctx->hwaccel = hwaccel;
1344 if (hwaccel->init) {
1345 err = hwaccel->init(avctx);
1346 if (err < 0) {
1347 av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1348 "hwaccel initialisation returned error.\n",
1349 av_get_pix_fmt_name(hw_config->public.pix_fmt));
1350 av_freep(&avctx->internal->hwaccel_priv_data);
1351 avctx->hwaccel = NULL;
1352 return err;
1353 }
1354 }
1355
1356 return 0;
1357 }
1358
hwaccel_uninit(AVCodecContext * avctx)1359 static void hwaccel_uninit(AVCodecContext *avctx)
1360 {
1361 if (avctx->hwaccel && avctx->hwaccel->uninit)
1362 avctx->hwaccel->uninit(avctx);
1363
1364 av_freep(&avctx->internal->hwaccel_priv_data);
1365
1366 avctx->hwaccel = NULL;
1367
1368 av_buffer_unref(&avctx->hw_frames_ctx);
1369 }
1370
ff_get_format(AVCodecContext * avctx,const enum AVPixelFormat * fmt)1371 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1372 {
1373 const AVPixFmtDescriptor *desc;
1374 enum AVPixelFormat *choices;
1375 enum AVPixelFormat ret, user_choice;
1376 const AVCodecHWConfigInternal *hw_config;
1377 const AVCodecHWConfig *config;
1378 int i, n, err;
1379
1380 // Find end of list.
1381 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1382 // Must contain at least one entry.
1383 av_assert0(n >= 1);
1384 // If a software format is available, it must be the last entry.
1385 desc = av_pix_fmt_desc_get(fmt[n - 1]);
1386 if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1387 // No software format is available.
1388 } else {
1389 avctx->sw_pix_fmt = fmt[n - 1];
1390 }
1391
1392 choices = av_malloc_array(n + 1, sizeof(*choices));
1393 if (!choices)
1394 return AV_PIX_FMT_NONE;
1395
1396 memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1397
1398 for (;;) {
1399 // Remove the previous hwaccel, if there was one.
1400 hwaccel_uninit(avctx);
1401
1402 user_choice = avctx->get_format(avctx, choices);
1403 if (user_choice == AV_PIX_FMT_NONE) {
1404 // Explicitly chose nothing, give up.
1405 ret = AV_PIX_FMT_NONE;
1406 break;
1407 }
1408
1409 desc = av_pix_fmt_desc_get(user_choice);
1410 if (!desc) {
1411 av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1412 "get_format() callback.\n");
1413 ret = AV_PIX_FMT_NONE;
1414 break;
1415 }
1416 av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1417 desc->name);
1418
1419 for (i = 0; i < n; i++) {
1420 if (choices[i] == user_choice)
1421 break;
1422 }
1423 if (i == n) {
1424 av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1425 "%s not in possible list.\n", desc->name);
1426 ret = AV_PIX_FMT_NONE;
1427 break;
1428 }
1429
1430 if (avctx->codec->hw_configs) {
1431 for (i = 0;; i++) {
1432 hw_config = avctx->codec->hw_configs[i];
1433 if (!hw_config)
1434 break;
1435 if (hw_config->public.pix_fmt == user_choice)
1436 break;
1437 }
1438 } else {
1439 hw_config = NULL;
1440 }
1441
1442 if (!hw_config) {
1443 // No config available, so no extra setup required.
1444 ret = user_choice;
1445 break;
1446 }
1447 config = &hw_config->public;
1448
1449 if (config->methods &
1450 AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
1451 avctx->hw_frames_ctx) {
1452 const AVHWFramesContext *frames_ctx =
1453 (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1454 if (frames_ctx->format != user_choice) {
1455 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1456 "does not match the format of the provided frames "
1457 "context.\n", desc->name);
1458 goto try_again;
1459 }
1460 } else if (config->methods &
1461 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
1462 avctx->hw_device_ctx) {
1463 const AVHWDeviceContext *device_ctx =
1464 (AVHWDeviceContext*)avctx->hw_device_ctx->data;
1465 if (device_ctx->type != config->device_type) {
1466 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1467 "does not match the type of the provided device "
1468 "context.\n", desc->name);
1469 goto try_again;
1470 }
1471 } else if (config->methods &
1472 AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
1473 // Internal-only setup, no additional configuration.
1474 } else if (config->methods &
1475 AV_CODEC_HW_CONFIG_METHOD_AD_HOC) {
1476 // Some ad-hoc configuration we can't see and can't check.
1477 } else {
1478 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1479 "missing configuration.\n", desc->name);
1480 goto try_again;
1481 }
1482 if (hw_config->hwaccel) {
1483 av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1484 "initialisation.\n", desc->name);
1485 err = hwaccel_init(avctx, hw_config);
1486 if (err < 0)
1487 goto try_again;
1488 }
1489 ret = user_choice;
1490 break;
1491
1492 try_again:
1493 av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1494 "get_format() without it.\n", desc->name);
1495 for (i = 0; i < n; i++) {
1496 if (choices[i] == user_choice)
1497 break;
1498 }
1499 for (; i + 1 < n; i++)
1500 choices[i] = choices[i + 1];
1501 --n;
1502 }
1503
1504 av_freep(&choices);
1505 return ret;
1506 }
1507
update_frame_pool(AVCodecContext * avctx,AVFrame * frame)1508 static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
1509 {
1510 FramePool *pool = avctx->internal->pool;
1511 int i, ret;
1512
1513 switch (avctx->codec_type) {
1514 case AVMEDIA_TYPE_VIDEO: {
1515 uint8_t *data[4];
1516 int linesize[4];
1517 int size[4] = { 0 };
1518 int w = frame->width;
1519 int h = frame->height;
1520 int tmpsize, unaligned;
1521
1522 if (pool->format == frame->format &&
1523 pool->width == frame->width && pool->height == frame->height)
1524 return 0;
1525
1526 avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1527
1528 do {
1529 // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1530 // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1531 ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1532 if (ret < 0)
1533 return ret;
1534 // increase alignment of w for next try (rhs gives the lowest bit set in w)
1535 w += w & ~(w - 1);
1536
1537 unaligned = 0;
1538 for (i = 0; i < 4; i++)
1539 unaligned |= linesize[i] % pool->stride_align[i];
1540 } while (unaligned);
1541
1542 tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1543 NULL, linesize);
1544 if (tmpsize < 0)
1545 return tmpsize;
1546
1547 for (i = 0; i < 3 && data[i + 1]; i++)
1548 size[i] = data[i + 1] - data[i];
1549 size[i] = tmpsize - (data[i] - data[0]);
1550
1551 for (i = 0; i < 4; i++) {
1552 av_buffer_pool_uninit(&pool->pools[i]);
1553 pool->linesize[i] = linesize[i];
1554 if (size[i]) {
1555 pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1556 CONFIG_MEMORY_POISONING ?
1557 NULL :
1558 av_buffer_allocz);
1559 if (!pool->pools[i]) {
1560 ret = AVERROR(ENOMEM);
1561 goto fail;
1562 }
1563 }
1564 }
1565 pool->format = frame->format;
1566 pool->width = frame->width;
1567 pool->height = frame->height;
1568
1569 break;
1570 }
1571 case AVMEDIA_TYPE_AUDIO: {
1572 int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1573 int planar = av_sample_fmt_is_planar(frame->format);
1574 int planes = planar ? ch : 1;
1575
1576 if (pool->format == frame->format && pool->planes == planes &&
1577 pool->channels == ch && frame->nb_samples == pool->samples)
1578 return 0;
1579
1580 av_buffer_pool_uninit(&pool->pools[0]);
1581 ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1582 frame->nb_samples, frame->format, 0);
1583 if (ret < 0)
1584 goto fail;
1585
1586 pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1587 if (!pool->pools[0]) {
1588 ret = AVERROR(ENOMEM);
1589 goto fail;
1590 }
1591
1592 pool->format = frame->format;
1593 pool->planes = planes;
1594 pool->channels = ch;
1595 pool->samples = frame->nb_samples;
1596 break;
1597 }
1598 default: av_assert0(0);
1599 }
1600 return 0;
1601 fail:
1602 for (i = 0; i < 4; i++)
1603 av_buffer_pool_uninit(&pool->pools[i]);
1604 pool->format = -1;
1605 pool->planes = pool->channels = pool->samples = 0;
1606 pool->width = pool->height = 0;
1607 return ret;
1608 }
1609
audio_get_buffer(AVCodecContext * avctx,AVFrame * frame)1610 static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
1611 {
1612 FramePool *pool = avctx->internal->pool;
1613 int planes = pool->planes;
1614 int i;
1615
1616 frame->linesize[0] = pool->linesize[0];
1617
1618 if (planes > AV_NUM_DATA_POINTERS) {
1619 frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1620 frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1621 frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
1622 sizeof(*frame->extended_buf));
1623 if (!frame->extended_data || !frame->extended_buf) {
1624 av_freep(&frame->extended_data);
1625 av_freep(&frame->extended_buf);
1626 return AVERROR(ENOMEM);
1627 }
1628 } else {
1629 frame->extended_data = frame->data;
1630 av_assert0(frame->nb_extended_buf == 0);
1631 }
1632
1633 for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1634 frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1635 if (!frame->buf[i])
1636 goto fail;
1637 frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1638 }
1639 for (i = 0; i < frame->nb_extended_buf; i++) {
1640 frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1641 if (!frame->extended_buf[i])
1642 goto fail;
1643 frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1644 }
1645
1646 if (avctx->debug & FF_DEBUG_BUFFERS)
1647 av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1648
1649 return 0;
1650 fail:
1651 av_frame_unref(frame);
1652 return AVERROR(ENOMEM);
1653 }
1654
video_get_buffer(AVCodecContext * s,AVFrame * pic)1655 static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
1656 {
1657 FramePool *pool = s->internal->pool;
1658 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
1659 int i;
1660
1661 if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1662 av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1663 return -1;
1664 }
1665
1666 if (!desc) {
1667 av_log(s, AV_LOG_ERROR,
1668 "Unable to get pixel format descriptor for format %s\n",
1669 av_get_pix_fmt_name(pic->format));
1670 return AVERROR(EINVAL);
1671 }
1672
1673 memset(pic->data, 0, sizeof(pic->data));
1674 pic->extended_data = pic->data;
1675
1676 for (i = 0; i < 4 && pool->pools[i]; i++) {
1677 pic->linesize[i] = pool->linesize[i];
1678
1679 pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1680 if (!pic->buf[i])
1681 goto fail;
1682
1683 pic->data[i] = pic->buf[i]->data;
1684 }
1685 for (; i < AV_NUM_DATA_POINTERS; i++) {
1686 pic->data[i] = NULL;
1687 pic->linesize[i] = 0;
1688 }
1689 if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1690 ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1691 avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1692
1693 if (s->debug & FF_DEBUG_BUFFERS)
1694 av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1695
1696 return 0;
1697 fail:
1698 av_frame_unref(pic);
1699 return AVERROR(ENOMEM);
1700 }
1701
avcodec_default_get_buffer2(AVCodecContext * avctx,AVFrame * frame,int flags)1702 int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
1703 {
1704 int ret;
1705
1706 if (avctx->hw_frames_ctx) {
1707 ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1708 frame->width = avctx->coded_width;
1709 frame->height = avctx->coded_height;
1710 return ret;
1711 }
1712
1713 if ((ret = update_frame_pool(avctx, frame)) < 0)
1714 return ret;
1715
1716 switch (avctx->codec_type) {
1717 case AVMEDIA_TYPE_VIDEO:
1718 return video_get_buffer(avctx, frame);
1719 case AVMEDIA_TYPE_AUDIO:
1720 return audio_get_buffer(avctx, frame);
1721 default:
1722 return -1;
1723 }
1724 }
1725
add_metadata_from_side_data(const AVPacket * avpkt,AVFrame * frame)1726 static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
1727 {
1728 int size;
1729 const uint8_t *side_metadata;
1730
1731 AVDictionary **frame_md = &frame->metadata;
1732
1733 side_metadata = av_packet_get_side_data(avpkt,
1734 AV_PKT_DATA_STRINGS_METADATA, &size);
1735 return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1736 }
1737
ff_decode_frame_props(AVCodecContext * avctx,AVFrame * frame)1738 int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
1739 {
1740 const AVPacket *pkt = avctx->internal->last_pkt_props;
1741 int i;
1742 static const struct {
1743 enum AVPacketSideDataType packet;
1744 enum AVFrameSideDataType frame;
1745 } sd[] = {
1746 { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
1747 { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
1748 { AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
1749 { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
1750 { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
1751 { AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
1752 { AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
1753 { AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
1754 };
1755
1756 if (pkt) {
1757 frame->pts = pkt->pts;
1758 #if FF_API_PKT_PTS
1759 FF_DISABLE_DEPRECATION_WARNINGS
1760 frame->pkt_pts = pkt->pts;
1761 FF_ENABLE_DEPRECATION_WARNINGS
1762 #endif
1763 frame->pkt_pos = pkt->pos;
1764 frame->pkt_duration = pkt->duration;
1765 frame->pkt_size = pkt->size;
1766
1767 for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1768 int size;
1769 uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1770 if (packet_sd) {
1771 AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1772 sd[i].frame,
1773 size);
1774 if (!frame_sd)
1775 return AVERROR(ENOMEM);
1776
1777 memcpy(frame_sd->data, packet_sd, size);
1778 }
1779 }
1780 add_metadata_from_side_data(pkt, frame);
1781
1782 if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1783 frame->flags |= AV_FRAME_FLAG_DISCARD;
1784 } else {
1785 frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1786 }
1787 }
1788 frame->reordered_opaque = avctx->reordered_opaque;
1789
1790 if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1791 frame->color_primaries = avctx->color_primaries;
1792 if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1793 frame->color_trc = avctx->color_trc;
1794 if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1795 frame->colorspace = avctx->colorspace;
1796 if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1797 frame->color_range = avctx->color_range;
1798 if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
1799 frame->chroma_location = avctx->chroma_sample_location;
1800
1801 switch (avctx->codec->type) {
1802 case AVMEDIA_TYPE_VIDEO:
1803 frame->format = avctx->pix_fmt;
1804 if (!frame->sample_aspect_ratio.num)
1805 frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1806
1807 if (frame->width && frame->height &&
1808 av_image_check_sar(frame->width, frame->height,
1809 frame->sample_aspect_ratio) < 0) {
1810 av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1811 frame->sample_aspect_ratio.num,
1812 frame->sample_aspect_ratio.den);
1813 frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1814 }
1815
1816 break;
1817 case AVMEDIA_TYPE_AUDIO:
1818 if (!frame->sample_rate)
1819 frame->sample_rate = avctx->sample_rate;
1820 if (frame->format < 0)
1821 frame->format = avctx->sample_fmt;
1822 if (!frame->channel_layout) {
1823 if (avctx->channel_layout) {
1824 if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
1825 avctx->channels) {
1826 av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1827 "configuration.\n");
1828 return AVERROR(EINVAL);
1829 }
1830
1831 frame->channel_layout = avctx->channel_layout;
1832 } else {
1833 if (avctx->channels > FF_SANE_NB_CHANNELS) {
1834 av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1835 avctx->channels);
1836 return AVERROR(ENOSYS);
1837 }
1838 }
1839 }
1840 frame->channels = avctx->channels;
1841 break;
1842 }
1843 return 0;
1844 }
1845
validate_avframe_allocation(AVCodecContext * avctx,AVFrame * frame)1846 static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
1847 {
1848 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1849 int i;
1850 int num_planes = av_pix_fmt_count_planes(frame->format);
1851 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
1852 int flags = desc ? desc->flags : 0;
1853 if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1854 num_planes = 2;
1855 if ((flags & FF_PSEUDOPAL) && frame->data[1])
1856 num_planes = 2;
1857 for (i = 0; i < num_planes; i++) {
1858 av_assert0(frame->data[i]);
1859 }
1860 // For formats without data like hwaccel allow unused pointers to be non-NULL.
1861 for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1862 if (frame->data[i])
1863 av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1864 frame->data[i] = NULL;
1865 }
1866 }
1867 }
1868
decode_data_free(void * opaque,uint8_t * data)1869 static void decode_data_free(void *opaque, uint8_t *data)
1870 {
1871 FrameDecodeData *fdd = (FrameDecodeData*)data;
1872
1873 if (fdd->post_process_opaque_free)
1874 fdd->post_process_opaque_free(fdd->post_process_opaque);
1875
1876 if (fdd->hwaccel_priv_free)
1877 fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1878
1879 av_freep(&fdd);
1880 }
1881
ff_attach_decode_data(AVFrame * frame)1882 int ff_attach_decode_data(AVFrame *frame)
1883 {
1884 AVBufferRef *fdd_buf;
1885 FrameDecodeData *fdd;
1886
1887 av_assert1(!frame->private_ref);
1888 av_buffer_unref(&frame->private_ref);
1889
1890 fdd = av_mallocz(sizeof(*fdd));
1891 if (!fdd)
1892 return AVERROR(ENOMEM);
1893
1894 fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1895 NULL, AV_BUFFER_FLAG_READONLY);
1896 if (!fdd_buf) {
1897 av_freep(&fdd);
1898 return AVERROR(ENOMEM);
1899 }
1900
1901 frame->private_ref = fdd_buf;
1902
1903 return 0;
1904 }
1905
get_buffer_internal(AVCodecContext * avctx,AVFrame * frame,int flags)1906 static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
1907 {
1908 const AVHWAccel *hwaccel = avctx->hwaccel;
1909 int override_dimensions = 1;
1910 int ret;
1911
1912 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1913 if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1914 av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1915 return AVERROR(EINVAL);
1916 }
1917
1918 if (frame->width <= 0 || frame->height <= 0) {
1919 frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1920 frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1921 override_dimensions = 0;
1922 }
1923
1924 if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1925 av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1926 return AVERROR(EINVAL);
1927 }
1928 }
1929 ret = ff_decode_frame_props(avctx, frame);
1930 if (ret < 0)
1931 return ret;
1932
1933 if (hwaccel) {
1934 if (hwaccel->alloc_frame) {
1935 ret = hwaccel->alloc_frame(avctx, frame);
1936 goto end;
1937 }
1938 } else
1939 avctx->sw_pix_fmt = avctx->pix_fmt;
1940
1941 ret = avctx->get_buffer2(avctx, frame, flags);
1942 if (ret < 0)
1943 goto end;
1944
1945 validate_avframe_allocation(avctx, frame);
1946
1947 ret = ff_attach_decode_data(frame);
1948 if (ret < 0)
1949 goto end;
1950
1951 end:
1952 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1953 !(avctx->codec->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
1954 frame->width = avctx->width;
1955 frame->height = avctx->height;
1956 }
1957
1958 if (ret < 0)
1959 av_frame_unref(frame);
1960
1961 return ret;
1962 }
1963
ff_get_buffer(AVCodecContext * avctx,AVFrame * frame,int flags)1964 int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
1965 {
1966 int ret = get_buffer_internal(avctx, frame, flags);
1967 if (ret < 0) {
1968 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1969 frame->width = frame->height = 0;
1970 }
1971 return ret;
1972 }
1973
reget_buffer_internal(AVCodecContext * avctx,AVFrame * frame)1974 static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
1975 {
1976 AVFrame *tmp;
1977 int ret;
1978
1979 av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
1980
1981 if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1982 av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1983 frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1984 av_frame_unref(frame);
1985 }
1986
1987 if (!frame->data[0])
1988 return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1989
1990 if (av_frame_is_writable(frame))
1991 return ff_decode_frame_props(avctx, frame);
1992
1993 tmp = av_frame_alloc();
1994 if (!tmp)
1995 return AVERROR(ENOMEM);
1996
1997 av_frame_move_ref(tmp, frame);
1998
1999 ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
2000 if (ret < 0) {
2001 av_frame_free(&tmp);
2002 return ret;
2003 }
2004
2005 av_frame_copy(frame, tmp);
2006 av_frame_free(&tmp);
2007
2008 return 0;
2009 }
2010
ff_reget_buffer(AVCodecContext * avctx,AVFrame * frame)2011 int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
2012 {
2013 int ret = reget_buffer_internal(avctx, frame);
2014 if (ret < 0)
2015 av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
2016 return ret;
2017 }
2018
bsfs_flush(AVCodecContext * avctx)2019 static void bsfs_flush(AVCodecContext *avctx)
2020 {
2021 DecodeFilterContext *s = &avctx->internal->filter;
2022
2023 for (int i = 0; i < s->nb_bsfs; i++)
2024 av_bsf_flush(s->bsfs[i]);
2025 }
2026
avcodec_flush_buffers(AVCodecContext * avctx)2027 void avcodec_flush_buffers(AVCodecContext *avctx)
2028 {
2029 avctx->internal->draining = 0;
2030 avctx->internal->draining_done = 0;
2031 avctx->internal->nb_draining_errors = 0;
2032 av_frame_unref(avctx->internal->buffer_frame);
2033 av_frame_unref(avctx->internal->compat_decode_frame);
2034 av_packet_unref(avctx->internal->buffer_pkt);
2035 avctx->internal->buffer_pkt_valid = 0;
2036
2037 av_packet_unref(avctx->internal->ds.in_pkt);
2038
2039 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
2040 ff_thread_flush(avctx);
2041 else if (avctx->codec->flush)
2042 avctx->codec->flush(avctx);
2043
2044 avctx->pts_correction_last_pts =
2045 avctx->pts_correction_last_dts = INT64_MIN;
2046
2047 bsfs_flush(avctx);
2048
2049 if (!avctx->refcounted_frames)
2050 av_frame_unref(avctx->internal->to_free);
2051 }
2052
ff_decode_bsfs_uninit(AVCodecContext * avctx)2053 void ff_decode_bsfs_uninit(AVCodecContext *avctx)
2054 {
2055 DecodeFilterContext *s = &avctx->internal->filter;
2056 int i;
2057
2058 for (i = 0; i < s->nb_bsfs; i++)
2059 av_bsf_free(&s->bsfs[i]);
2060 av_freep(&s->bsfs);
2061 s->nb_bsfs = 0;
2062 }
2063