1 /*
2 * AV1 video decoder
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "libavutil/pixdesc.h"
22 #include "avcodec.h"
23 #include "av1dec.h"
24 #include "bytestream.h"
25 #include "hwconfig.h"
26 #include "internal.h"
27 #include "profiles.h"
28
setup_past_independence(AV1Frame * f)29 static void setup_past_independence(AV1Frame *f)
30 {
31 f->loop_filter_delta_enabled = 1;
32
33 f->loop_filter_ref_deltas[AV1_REF_FRAME_INTRA] = 1;
34 f->loop_filter_ref_deltas[AV1_REF_FRAME_LAST] = 0;
35 f->loop_filter_ref_deltas[AV1_REF_FRAME_LAST2] = 0;
36 f->loop_filter_ref_deltas[AV1_REF_FRAME_LAST3] = 0;
37 f->loop_filter_ref_deltas[AV1_REF_FRAME_GOLDEN] = -1;
38 f->loop_filter_ref_deltas[AV1_REF_FRAME_BWDREF] = 0;
39 f->loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF2] = -1;
40 f->loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF] = -1;
41
42 f->loop_filter_mode_deltas[0] = 0;
43 f->loop_filter_mode_deltas[1] = 0;
44 }
45
load_previous_and_update(AV1DecContext * s)46 static void load_previous_and_update(AV1DecContext *s)
47 {
48 uint8_t primary_frame, prev_frame;
49
50 primary_frame = s->raw_frame_header->primary_ref_frame;
51 prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
52 memcpy(s->cur_frame.loop_filter_ref_deltas,
53 s->ref[prev_frame].loop_filter_ref_deltas,
54 AV1_NUM_REF_FRAMES * sizeof(int8_t));
55 memcpy(s->cur_frame.loop_filter_mode_deltas,
56 s->ref[prev_frame].loop_filter_mode_deltas,
57 2 * sizeof(int8_t));
58
59 if (s->raw_frame_header->loop_filter_delta_update) {
60 for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
61 if (s->raw_frame_header->update_ref_delta[i])
62 s->cur_frame.loop_filter_ref_deltas[i] =
63 s->raw_frame_header->loop_filter_ref_deltas[i];
64 }
65
66 for (int i = 0; i < 2; i++) {
67 if (s->raw_frame_header->update_mode_delta[i])
68 s->cur_frame.loop_filter_mode_deltas[i] =
69 s->raw_frame_header->loop_filter_mode_deltas[i];
70 }
71 }
72
73 s->cur_frame.loop_filter_delta_enabled =
74 s->raw_frame_header->loop_filter_delta_enabled;
75 }
76
inverse_recenter(int r,uint32_t v)77 static uint32_t inverse_recenter(int r, uint32_t v)
78 {
79 if (v > 2 * r)
80 return v;
81 else if (v & 1)
82 return r - ((v + 1) >> 1);
83 else
84 return r + (v >> 1);
85 }
86
decode_unsigned_subexp_with_ref(uint32_t sub_exp,int mx,int r)87 static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp,
88 int mx, int r)
89 {
90 if ((r << 1) <= mx) {
91 return inverse_recenter(r, sub_exp);
92 } else {
93 return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp);
94 }
95 }
96
decode_signed_subexp_with_ref(uint32_t sub_exp,int low,int high,int r)97 static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low,
98 int high, int r)
99 {
100 int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low);
101 return x + low;
102 }
103
read_global_param(AV1DecContext * s,int type,int ref,int idx)104 static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
105 {
106 uint8_t primary_frame, prev_frame;
107 uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx;
108 int32_t r;
109
110 primary_frame = s->raw_frame_header->primary_ref_frame;
111 prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
112 abs_bits = AV1_GM_ABS_ALPHA_BITS;
113 prec_bits = AV1_GM_ALPHA_PREC_BITS;
114
115 if (idx < 2) {
116 if (type == AV1_WARP_MODEL_TRANSLATION) {
117 abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS -
118 !s->raw_frame_header->allow_high_precision_mv;
119 prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS -
120 !s->raw_frame_header->allow_high_precision_mv;
121 } else {
122 abs_bits = AV1_GM_ABS_TRANS_BITS;
123 prec_bits = AV1_GM_TRANS_PREC_BITS;
124 }
125 }
126 round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0;
127 prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits;
128 sub = (idx % 3) == 2 ? (1 << prec_bits) : 0;
129 mx = 1 << abs_bits;
130 r = (s->ref[prev_frame].gm_params[ref][idx] >> prec_diff) - sub;
131
132 s->cur_frame.gm_params[ref][idx] =
133 (decode_signed_subexp_with_ref(s->raw_frame_header->gm_params[ref][idx],
134 -mx, mx + 1, r) << prec_diff) + round;
135 }
136
137 /**
138 * update gm type/params, since cbs already implemented part of this funcation,
139 * so we don't need to full implement spec.
140 */
global_motion_params(AV1DecContext * s)141 static void global_motion_params(AV1DecContext *s)
142 {
143 const AV1RawFrameHeader *header = s->raw_frame_header;
144 int type, ref;
145
146 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
147 s->cur_frame.gm_type[ref] = AV1_WARP_MODEL_IDENTITY;
148 for (int i = 0; i < 6; i++)
149 s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ?
150 1 << AV1_WARPEDMODEL_PREC_BITS : 0;
151 }
152 if (header->frame_type == AV1_FRAME_KEY ||
153 header->frame_type == AV1_FRAME_INTRA_ONLY)
154 return;
155
156 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
157 if (header->is_global[ref]) {
158 if (header->is_rot_zoom[ref]) {
159 type = AV1_WARP_MODEL_ROTZOOM;
160 } else {
161 type = header->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
162 : AV1_WARP_MODEL_AFFINE;
163 }
164 } else {
165 type = AV1_WARP_MODEL_IDENTITY;
166 }
167 s->cur_frame.gm_type[ref] = type;
168
169 if (type >= AV1_WARP_MODEL_ROTZOOM) {
170 read_global_param(s, type, ref, 2);
171 read_global_param(s, type, ref, 3);
172 if (type == AV1_WARP_MODEL_AFFINE) {
173 read_global_param(s, type, ref, 4);
174 read_global_param(s, type, ref, 5);
175 } else {
176 s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3];
177 s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2];
178 }
179 }
180 if (type >= AV1_WARP_MODEL_TRANSLATION) {
181 read_global_param(s, type, ref, 0);
182 read_global_param(s, type, ref, 1);
183 }
184 }
185 }
186
init_tile_data(AV1DecContext * s)187 static int init_tile_data(AV1DecContext *s)
188
189 {
190 int cur_tile_num =
191 s->raw_frame_header->tile_cols * s->raw_frame_header->tile_rows;
192 if (s->tile_num < cur_tile_num) {
193 int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num,
194 sizeof(TileGroupInfo));
195 if (ret < 0) {
196 s->tile_num = 0;
197 return ret;
198 }
199 }
200 s->tile_num = cur_tile_num;
201
202 return 0;
203 }
204
get_tiles_info(AVCodecContext * avctx,const AV1RawTileGroup * tile_group)205 static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
206 {
207 AV1DecContext *s = avctx->priv_data;
208 GetByteContext gb;
209 uint16_t tile_num, tile_row, tile_col;
210 uint32_t size = 0, size_bytes = 0;
211
212 bytestream2_init(&gb, tile_group->tile_data.data,
213 tile_group->tile_data.data_size);
214 s->tg_start = tile_group->tg_start;
215 s->tg_end = tile_group->tg_end;
216
217 for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) {
218 tile_row = tile_num / s->raw_frame_header->tile_cols;
219 tile_col = tile_num % s->raw_frame_header->tile_cols;
220
221 if (tile_num == tile_group->tg_end) {
222 s->tile_group_info[tile_num].tile_size = bytestream2_get_bytes_left(&gb);
223 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
224 s->tile_group_info[tile_num].tile_row = tile_row;
225 s->tile_group_info[tile_num].tile_column = tile_col;
226 return 0;
227 }
228 size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1;
229 if (bytestream2_get_bytes_left(&gb) < size_bytes)
230 return AVERROR_INVALIDDATA;
231 size = 0;
232 for (int i = 0; i < size_bytes; i++)
233 size |= bytestream2_get_byteu(&gb) << 8 * i;
234 if (bytestream2_get_bytes_left(&gb) <= size)
235 return AVERROR_INVALIDDATA;
236 size++;
237
238 s->tile_group_info[tile_num].tile_size = size;
239 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
240 s->tile_group_info[tile_num].tile_row = tile_row;
241 s->tile_group_info[tile_num].tile_column = tile_col;
242
243 bytestream2_skipu(&gb, size);
244 }
245
246 return 0;
247
248 }
249
get_pixel_format(AVCodecContext * avctx)250 static int get_pixel_format(AVCodecContext *avctx)
251 {
252 AV1DecContext *s = avctx->priv_data;
253 const AV1RawSequenceHeader *seq = s->raw_seq;
254 uint8_t bit_depth;
255 int ret;
256 enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
257 #define HWACCEL_MAX (0)
258 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
259
260 if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
261 bit_depth = seq->color_config.twelve_bit ? 12 : 10;
262 else if (seq->seq_profile <= 2)
263 bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
264 else {
265 av_log(avctx, AV_LOG_ERROR,
266 "Unknown AV1 profile %d.\n", seq->seq_profile);
267 return -1;
268 }
269
270 if (!seq->color_config.mono_chrome) {
271 // 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1
272 if (seq->color_config.subsampling_x == 0 &&
273 seq->color_config.subsampling_y == 0) {
274 if (bit_depth == 8)
275 pix_fmt = AV_PIX_FMT_YUV444P;
276 else if (bit_depth == 10)
277 pix_fmt = AV_PIX_FMT_YUV444P10;
278 else if (bit_depth == 12)
279 pix_fmt = AV_PIX_FMT_YUV444P12;
280 else
281 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
282 } else if (seq->color_config.subsampling_x == 1 &&
283 seq->color_config.subsampling_y == 0) {
284 if (bit_depth == 8)
285 pix_fmt = AV_PIX_FMT_YUV422P;
286 else if (bit_depth == 10)
287 pix_fmt = AV_PIX_FMT_YUV422P10;
288 else if (bit_depth == 12)
289 pix_fmt = AV_PIX_FMT_YUV422P12;
290 else
291 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
292 } else if (seq->color_config.subsampling_x == 1 &&
293 seq->color_config.subsampling_y == 1) {
294 if (bit_depth == 8)
295 pix_fmt = AV_PIX_FMT_YUV420P;
296 else if (bit_depth == 10)
297 pix_fmt = AV_PIX_FMT_YUV420P10;
298 else if (bit_depth == 12)
299 pix_fmt = AV_PIX_FMT_YUV420P12;
300 else
301 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
302 }
303 } else {
304 if (seq->color_config.subsampling_x == 1 &&
305 seq->color_config.subsampling_y == 1)
306 pix_fmt = AV_PIX_FMT_YUV440P;
307 else
308 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
309 }
310
311 av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n",
312 av_get_pix_fmt_name(pix_fmt));
313
314 if (pix_fmt == AV_PIX_FMT_NONE)
315 return -1;
316 s->pix_fmt = pix_fmt;
317
318 *fmtp++ = s->pix_fmt;
319 *fmtp = AV_PIX_FMT_NONE;
320
321 ret = ff_thread_get_format(avctx, pix_fmts);
322 if (ret < 0)
323 return ret;
324
325 /**
326 * check if the HW accel is inited correctly. If not, return un-implemented.
327 * Since now the av1 decoder doesn't support native decode, if it will be
328 * implemented in the future, need remove this check.
329 */
330 if (!avctx->hwaccel) {
331 av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport"
332 " hardware accelerated AV1 decoding.\n");
333 return AVERROR(ENOSYS);
334 }
335
336 avctx->pix_fmt = ret;
337
338 return 0;
339 }
340
av1_frame_unref(AVCodecContext * avctx,AV1Frame * f)341 static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
342 {
343 ff_thread_release_buffer(avctx, &f->tf);
344 av_buffer_unref(&f->hwaccel_priv_buf);
345 f->hwaccel_picture_private = NULL;
346 }
347
av1_frame_ref(AVCodecContext * avctx,AV1Frame * dst,const AV1Frame * src)348 static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
349 {
350 int ret;
351
352 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
353 if (ret < 0)
354 return ret;
355
356 if (src->hwaccel_picture_private) {
357 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
358 if (!dst->hwaccel_priv_buf)
359 goto fail;
360 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
361 }
362
363 dst->loop_filter_delta_enabled = src->loop_filter_delta_enabled;
364 memcpy(dst->loop_filter_ref_deltas,
365 src->loop_filter_ref_deltas,
366 AV1_NUM_REF_FRAMES * sizeof(int8_t));
367 memcpy(dst->loop_filter_mode_deltas,
368 src->loop_filter_mode_deltas,
369 2 * sizeof(int8_t));
370 memcpy(dst->gm_type,
371 src->gm_type,
372 AV1_NUM_REF_FRAMES * sizeof(uint8_t));
373 memcpy(dst->gm_params,
374 src->gm_params,
375 AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t));
376
377 return 0;
378
379 fail:
380 av1_frame_unref(avctx, dst);
381 return AVERROR(ENOMEM);
382 }
383
av1_decode_free(AVCodecContext * avctx)384 static av_cold int av1_decode_free(AVCodecContext *avctx)
385 {
386 AV1DecContext *s = avctx->priv_data;
387
388 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
389 av1_frame_unref(avctx, &s->ref[i]);
390 av_frame_free(&s->ref[i].tf.f);
391 }
392 av1_frame_unref(avctx, &s->cur_frame);
393 av_frame_free(&s->cur_frame.tf.f);
394
395 av_buffer_unref(&s->seq_ref);
396 av_buffer_unref(&s->header_ref);
397 av_freep(&s->tile_group_info);
398
399 ff_cbs_fragment_free(&s->current_obu);
400 ff_cbs_close(&s->cbc);
401
402 return 0;
403 }
404
set_context_with_sequence(AVCodecContext * avctx,const AV1RawSequenceHeader * seq)405 static int set_context_with_sequence(AVCodecContext *avctx,
406 const AV1RawSequenceHeader *seq)
407 {
408 int width = seq->max_frame_width_minus_1 + 1;
409 int height = seq->max_frame_height_minus_1 + 1;
410
411 avctx->profile = seq->seq_profile;
412 avctx->level = seq->seq_level_idx[0];
413
414 avctx->color_range =
415 seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
416 avctx->color_primaries = seq->color_config.color_primaries;
417 avctx->colorspace = seq->color_config.color_primaries;
418 avctx->color_trc = seq->color_config.transfer_characteristics;
419
420 switch (seq->color_config.chroma_sample_position) {
421 case AV1_CSP_VERTICAL:
422 avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
423 break;
424 case AV1_CSP_COLOCATED:
425 avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
426 break;
427 }
428
429 if (avctx->width != width || avctx->height != height) {
430 int ret = ff_set_dimensions(avctx, width, height);
431 if (ret < 0)
432 return ret;
433 }
434 avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
435
436 if (seq->timing_info.num_units_in_display_tick &&
437 seq->timing_info.time_scale) {
438 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
439 seq->timing_info.num_units_in_display_tick,
440 seq->timing_info.time_scale,
441 INT_MAX);
442 if (seq->timing_info.equal_picture_interval)
443 avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1;
444 }
445
446 return 0;
447 }
448
update_context_with_frame_header(AVCodecContext * avctx,const AV1RawFrameHeader * header)449 static int update_context_with_frame_header(AVCodecContext *avctx,
450 const AV1RawFrameHeader *header)
451 {
452 AVRational aspect_ratio;
453 int width = header->frame_width_minus_1 + 1;
454 int height = header->frame_height_minus_1 + 1;
455 int r_width = header->render_width_minus_1 + 1;
456 int r_height = header->render_height_minus_1 + 1;
457 int ret;
458
459 if (avctx->width != width || avctx->height != height) {
460 ret = ff_set_dimensions(avctx, width, height);
461 if (ret < 0)
462 return ret;
463 }
464
465 av_reduce(&aspect_ratio.num, &aspect_ratio.den,
466 (int64_t)height * r_width,
467 (int64_t)width * r_height,
468 INT_MAX);
469
470 if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) {
471 ret = ff_set_sar(avctx, aspect_ratio);
472 if (ret < 0)
473 return ret;
474 }
475
476 return 0;
477 }
478
av1_decode_init(AVCodecContext * avctx)479 static av_cold int av1_decode_init(AVCodecContext *avctx)
480 {
481 AV1DecContext *s = avctx->priv_data;
482 AV1RawSequenceHeader *seq;
483 int ret;
484
485 s->avctx = avctx;
486 s->pix_fmt = AV_PIX_FMT_NONE;
487
488 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
489 s->ref[i].tf.f = av_frame_alloc();
490 if (!s->ref[i].tf.f) {
491 av_log(avctx, AV_LOG_ERROR,
492 "Failed to allocate reference frame buffer %d.\n", i);
493 return AVERROR(ENOMEM);
494 }
495 }
496
497 s->cur_frame.tf.f = av_frame_alloc();
498 if (!s->cur_frame.tf.f) {
499 av_log(avctx, AV_LOG_ERROR,
500 "Failed to allocate current frame buffer.\n");
501 return AVERROR(ENOMEM);
502 }
503
504 ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
505 if (ret < 0)
506 return ret;
507
508 if (avctx->extradata && avctx->extradata_size) {
509 ret = ff_cbs_read(s->cbc, &s->current_obu, avctx->extradata,
510 avctx->extradata_size);
511 if (ret < 0) {
512 av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
513 return ret;
514 }
515
516 seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header;
517 if (!seq) {
518 av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n");
519 goto end;
520 }
521
522 ret = set_context_with_sequence(avctx, seq);
523 if (ret < 0) {
524 av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n");
525 goto end;
526 }
527
528 end:
529 ff_cbs_fragment_reset(&s->current_obu);
530 }
531
532 return ret;
533 }
534
av1_frame_alloc(AVCodecContext * avctx,AV1Frame * f)535 static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
536 {
537 AV1DecContext *s = avctx->priv_data;
538 AV1RawFrameHeader *header= s->raw_frame_header;
539 AVFrame *frame;
540 int ret;
541
542 ret = update_context_with_frame_header(avctx, header);
543 if (ret < 0) {
544 av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n");
545 return ret;
546 }
547
548 if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
549 return ret;
550
551 frame = f->tf.f;
552 frame->key_frame = header->frame_type == AV1_FRAME_KEY;
553
554 switch (header->frame_type) {
555 case AV1_FRAME_KEY:
556 case AV1_FRAME_INTRA_ONLY:
557 frame->pict_type = AV_PICTURE_TYPE_I;
558 break;
559 case AV1_FRAME_INTER:
560 frame->pict_type = AV_PICTURE_TYPE_P;
561 break;
562 case AV1_FRAME_SWITCH:
563 frame->pict_type = AV_PICTURE_TYPE_SP;
564 break;
565 }
566
567 if (avctx->hwaccel) {
568 const AVHWAccel *hwaccel = avctx->hwaccel;
569 if (hwaccel->frame_priv_data_size) {
570 f->hwaccel_priv_buf =
571 av_buffer_allocz(hwaccel->frame_priv_data_size);
572 if (!f->hwaccel_priv_buf)
573 goto fail;
574 f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
575 }
576 }
577 return 0;
578
579 fail:
580 av1_frame_unref(avctx, f);
581 return AVERROR(ENOMEM);
582 }
583
set_output_frame(AVCodecContext * avctx,AVFrame * frame,const AVPacket * pkt,int * got_frame)584 static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
585 const AVPacket *pkt, int *got_frame)
586 {
587 AV1DecContext *s = avctx->priv_data;
588 const AVFrame *srcframe = s->cur_frame.tf.f;
589 int ret;
590
591 ret = av_frame_ref(frame, srcframe);
592 if (ret < 0)
593 return ret;
594
595 frame->pts = pkt->pts;
596 frame->pkt_dts = pkt->dts;
597 frame->pkt_size = pkt->size;
598
599 *got_frame = 1;
600
601 return 0;
602 }
603
update_reference_list(AVCodecContext * avctx)604 static int update_reference_list(AVCodecContext *avctx)
605 {
606 AV1DecContext *s = avctx->priv_data;
607 const AV1RawFrameHeader *header = s->raw_frame_header;
608 int ret;
609
610 for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
611 if (header->refresh_frame_flags & (1 << i)) {
612 if (s->ref[i].tf.f->buf[0])
613 av1_frame_unref(avctx, &s->ref[i]);
614 if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
615 av_log(avctx, AV_LOG_ERROR,
616 "Failed to update frame %d in reference list\n", i);
617 return ret;
618 }
619 }
620 }
621 return 0;
622 }
623
get_current_frame(AVCodecContext * avctx)624 static int get_current_frame(AVCodecContext *avctx)
625 {
626 AV1DecContext *s = avctx->priv_data;
627 int ret;
628
629 if (s->cur_frame.tf.f->buf[0])
630 av1_frame_unref(avctx, &s->cur_frame);
631
632 ret = av1_frame_alloc(avctx, &s->cur_frame);
633 if (ret < 0) {
634 av_log(avctx, AV_LOG_ERROR,
635 "Failed to allocate space for current frame.\n");
636 return ret;
637 }
638
639 ret = init_tile_data(s);
640 if (ret < 0) {
641 av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
642 return ret;
643 }
644
645 if (s->raw_frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE)
646 setup_past_independence(&s->cur_frame);
647 else
648 load_previous_and_update(s);
649
650 global_motion_params(s);
651
652 return ret;
653 }
654
av1_decode_frame(AVCodecContext * avctx,void * frame,int * got_frame,AVPacket * pkt)655 static int av1_decode_frame(AVCodecContext *avctx, void *frame,
656 int *got_frame, AVPacket *pkt)
657 {
658 AV1DecContext *s = avctx->priv_data;
659 AV1RawTileGroup *raw_tile_group = NULL;
660 int ret;
661
662 ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
663 if (ret < 0) {
664 av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
665 goto end;
666 }
667 av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
668 s->current_obu.nb_units);
669
670 for (int i = 0; i < s->current_obu.nb_units; i++) {
671 CodedBitstreamUnit *unit = &s->current_obu.units[i];
672 AV1RawOBU *obu = unit->content;
673 av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type);
674
675 switch (unit->type) {
676 case AV1_OBU_SEQUENCE_HEADER:
677 av_buffer_unref(&s->seq_ref);
678 s->seq_ref = av_buffer_ref(unit->content_ref);
679 if (!s->seq_ref) {
680 ret = AVERROR(ENOMEM);
681 goto end;
682 }
683
684 s->raw_seq = &obu->obu.sequence_header;
685
686 ret = set_context_with_sequence(avctx, s->raw_seq);
687 if (ret < 0) {
688 av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n");
689 goto end;
690 }
691
692 if (s->pix_fmt == AV_PIX_FMT_NONE) {
693 ret = get_pixel_format(avctx);
694 if (ret < 0) {
695 av_log(avctx, AV_LOG_ERROR,
696 "Failed to get pixel format.\n");
697 goto end;
698 }
699 }
700
701 if (avctx->hwaccel && avctx->hwaccel->decode_params) {
702 ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
703 unit->data_size);
704 if (ret < 0) {
705 av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
706 goto end;
707 }
708 }
709 break;
710 case AV1_OBU_REDUNDANT_FRAME_HEADER:
711 if (s->raw_frame_header)
712 break;
713 // fall-through
714 case AV1_OBU_FRAME:
715 case AV1_OBU_FRAME_HEADER:
716 if (!s->raw_seq) {
717 av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n");
718 ret = AVERROR_INVALIDDATA;
719 goto end;
720 }
721
722 av_buffer_unref(&s->header_ref);
723 s->header_ref = av_buffer_ref(unit->content_ref);
724 if (!s->header_ref) {
725 ret = AVERROR(ENOMEM);
726 goto end;
727 }
728
729 if (unit->type == AV1_OBU_FRAME)
730 s->raw_frame_header = &obu->obu.frame.header;
731 else
732 s->raw_frame_header = &obu->obu.frame_header;
733
734 if (s->raw_frame_header->show_existing_frame) {
735 if (s->cur_frame.tf.f->buf[0])
736 av1_frame_unref(avctx, &s->cur_frame);
737
738 ret = av1_frame_ref(avctx, &s->cur_frame,
739 &s->ref[s->raw_frame_header->frame_to_show_map_idx]);
740 if (ret < 0) {
741 av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n");
742 goto end;
743 }
744
745 ret = update_reference_list(avctx);
746 if (ret < 0) {
747 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
748 goto end;
749 }
750
751 ret = set_output_frame(avctx, frame, pkt, got_frame);
752 if (ret < 0)
753 av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
754
755 s->raw_frame_header = NULL;
756
757 goto end;
758 }
759
760 ret = get_current_frame(avctx);
761 if (ret < 0) {
762 av_log(avctx, AV_LOG_ERROR, "Get current frame error\n");
763 goto end;
764 }
765
766 if (avctx->hwaccel) {
767 ret = avctx->hwaccel->start_frame(avctx, unit->data,
768 unit->data_size);
769 if (ret < 0) {
770 av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
771 goto end;
772 }
773 }
774 if (unit->type != AV1_OBU_FRAME)
775 break;
776 // fall-through
777 case AV1_OBU_TILE_GROUP:
778 if (!s->raw_frame_header) {
779 av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n");
780 ret = AVERROR_INVALIDDATA;
781 goto end;
782 }
783
784 if (unit->type == AV1_OBU_FRAME)
785 raw_tile_group = &obu->obu.frame.tile_group;
786 else
787 raw_tile_group = &obu->obu.tile_group;
788
789 ret = get_tiles_info(avctx, raw_tile_group);
790 if (ret < 0)
791 goto end;
792
793 if (avctx->hwaccel) {
794 ret = avctx->hwaccel->decode_slice(avctx,
795 raw_tile_group->tile_data.data,
796 raw_tile_group->tile_data.data_size);
797 if (ret < 0) {
798 av_log(avctx, AV_LOG_ERROR,
799 "HW accel decode slice fail.\n");
800 goto end;
801 }
802 }
803 break;
804 case AV1_OBU_TILE_LIST:
805 case AV1_OBU_TEMPORAL_DELIMITER:
806 case AV1_OBU_PADDING:
807 case AV1_OBU_METADATA:
808 break;
809 default:
810 av_log(avctx, AV_LOG_DEBUG,
811 "Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n",
812 unit->type, unit->data_size);
813 }
814
815 if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
816 if (avctx->hwaccel) {
817 ret = avctx->hwaccel->end_frame(avctx);
818 if (ret < 0) {
819 av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
820 goto end;
821 }
822 }
823
824 ret = update_reference_list(avctx);
825 if (ret < 0) {
826 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
827 goto end;
828 }
829
830 if (s->raw_frame_header->show_frame) {
831 ret = set_output_frame(avctx, frame, pkt, got_frame);
832 if (ret < 0) {
833 av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
834 goto end;
835 }
836 }
837 raw_tile_group = NULL;
838 s->raw_frame_header = NULL;
839 }
840 }
841
842 end:
843 ff_cbs_fragment_reset(&s->current_obu);
844 return ret;
845 }
846
av1_decode_flush(AVCodecContext * avctx)847 static void av1_decode_flush(AVCodecContext *avctx)
848 {
849 AV1DecContext *s = avctx->priv_data;
850
851 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
852 av1_frame_unref(avctx, &s->ref[i]);
853
854 av1_frame_unref(avctx, &s->cur_frame);
855 s->raw_frame_header = NULL;
856 s->raw_seq = NULL;
857
858 ff_cbs_flush(s->cbc);
859 }
860
861 AVCodec ff_av1_decoder = {
862 .name = "av1",
863 .long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
864 .type = AVMEDIA_TYPE_VIDEO,
865 .id = AV_CODEC_ID_AV1,
866 .priv_data_size = sizeof(AV1DecContext),
867 .init = av1_decode_init,
868 .close = av1_decode_free,
869 .decode = av1_decode_frame,
870 .capabilities = AV_CODEC_CAP_DR1,
871 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
872 FF_CODEC_CAP_INIT_CLEANUP |
873 FF_CODEC_CAP_SETS_PKT_DTS,
874 .flush = av1_decode_flush,
875 .profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
876 .hw_configs = (const AVCodecHWConfigInternal * []) {
877 NULL
878 },
879 };
880