1 /* GStreamer
2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25 /**
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
29 * @see_also:
30 *
31 * This base class is for video decoders turning encoded data into raw video
32 * frames.
33 *
34 * The GstVideoDecoder base class and derived subclasses should cooperate as
35 * follows:
36 *
37 * ## Configuration
38 *
39 * * Initially, GstVideoDecoder calls @start when the decoder element
40 * is activated, which allows the subclass to perform any global setup.
41 *
42 * * GstVideoDecoder calls @set_format to inform the subclass of caps
43 * describing input video data that it is about to receive, including
44 * possibly configuration data.
45 * While unlikely, it might be called more than once, if changing input
46 * parameters require reconfiguration.
47 *
48 * * Incoming data buffers are processed as needed, described in Data
49 * Processing below.
50 *
51 * * GstVideoDecoder calls @stop at end of all processing.
52 *
53 * ## Data processing
54 *
55 * * The base class gathers input data, and optionally allows subclass
56 * to parse this into subsequently manageable chunks, typically
57 * corresponding to and referred to as 'frames'.
58 *
59 * * Each input frame is provided in turn to the subclass' @handle_frame
60 * callback.
61 * The ownership of the frame is given to the @handle_frame callback.
62 *
63 * * If codec processing results in decoded data, the subclass should call
64 * @gst_video_decoder_finish_frame to have decoded data pushed.
65 * downstream. Otherwise, the subclass must call
66 * @gst_video_decoder_drop_frame, to allow the base class to do timestamp
67 * and offset tracking, and possibly to requeue the frame for a later
68 * attempt in the case of reverse playback.
69 *
70 * ## Shutdown phase
71 *
72 * * The GstVideoDecoder class calls @stop to inform the subclass that data
73 * parsing will be stopped.
74 *
75 * ## Additional Notes
76 *
77 * * Seeking/Flushing
78 *
79 * * When the pipeline is seeked or otherwise flushed, the subclass is
80 * informed via a call to its @reset callback, with the hard parameter
81 * set to true. This indicates the subclass should drop any internal data
82 * queues and timestamps and prepare for a fresh set of buffers to arrive
83 * for parsing and decoding.
84 *
85 * * End Of Stream
86 *
87 * * At end-of-stream, the subclass @parse function may be called some final
88 * times with the at_eos parameter set to true, indicating that the element
89 * should not expect any more data to be arriving, and it should parse and
90 * remaining frames and call gst_video_decoder_have_frame() if possible.
91 *
92 * The subclass is responsible for providing pad template caps for
93 * source and sink pads. The pads need to be named "sink" and "src". It also
94 * needs to provide information about the ouptput caps, when they are known.
95 * This may be when the base class calls the subclass' @set_format function,
96 * though it might be during decoding, before calling
97 * @gst_video_decoder_finish_frame. This is done via
98 * @gst_video_decoder_set_output_state
99 *
100 * The subclass is also responsible for providing (presentation) timestamps
101 * (likely based on corresponding input ones). If that is not applicable
102 * or possible, the base class provides limited framerate based interpolation.
103 *
104 * Similarly, the base class provides some limited (legacy) seeking support
105 * if specifically requested by the subclass, as full-fledged support
106 * should rather be left to upstream demuxer, parser or alike. This simple
107 * approach caters for seeking and duration reporting using estimated input
108 * bitrates. To enable it, a subclass should call
109 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
110 * byte-streams.
111 *
112 * The base class provides some support for reverse playback, in particular
113 * in case incoming data is not packetized or upstream does not provide
114 * fragments on keyframe boundaries. However, the subclass should then be
115 * prepared for the parsing and frame processing stage to occur separately
116 * (in normal forward processing, the latter immediately follows the former),
117 * The subclass also needs to ensure the parsing stage properly marks
118 * keyframes, unless it knows the upstream elements will do so properly for
119 * incoming data.
120 *
121 * The bare minimum that a functional subclass needs to implement is:
122 *
123 * * Provide pad templates
124 * * Inform the base class of output caps via
125 * @gst_video_decoder_set_output_state
126 *
127 * * Parse input data, if it is not considered packetized from upstream
128 * Data will be provided to @parse which should invoke
129 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
130 * separate the data belonging to each video frame.
131 *
132 * * Accept data in @handle_frame and provide decoded results to
133 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
134 */
135
136 #ifdef HAVE_CONFIG_H
137 #include "config.h"
138 #endif
139
140 /* TODO
141 *
142 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
143 * features, like applying QoS on input (as opposed to after the frame is
144 * decoded).
145 * * Add a flag/boolean for decoders that require keyframes, so the base
146 * class can automatically discard non-keyframes before one has arrived
147 * * Detect reordered frame/timestamps and fix the pts/dts
148 * * Support for GstIndex (or shall we not care ?)
149 * * Calculate actual latency based on input/output timestamp/frame_number
150 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
151 * * Emit latency message when it changes
152 *
153 */
154
155 /* Implementation notes:
156 * The Video Decoder base class operates in 2 primary processing modes, depending
157 * on whether forward or reverse playback is requested.
158 *
159 * Forward playback:
160 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
161 * handle_frame() -> push downstream
162 *
163 * Reverse playback is more complicated, since it involves gathering incoming
164 * data regions as we loop backwards through the upstream data. The processing
165 * concept (using incoming buffers as containing one frame each to simplify
166 * things) is:
167 *
168 * Upstream data we want to play:
169 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
170 * Keyframe flag: K K
171 * Groupings: AAAAAAA BBBBBBB CCCCCCC
172 *
173 * Input:
174 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
175 * Keyframe flag: K K
176 * Discont flag: D D D
177 *
178 * - Each Discont marks a discont in the decoding order.
179 * - The keyframes mark where we can start decoding.
180 *
181 * Initially, we prepend incoming buffers to the gather queue. Whenever the
182 * discont flag is set on an incoming buffer, the gather queue is flushed out
183 * before the new buffer is collected.
184 *
185 * The above data will be accumulated in the gather queue like this:
186 *
187 * gather queue: 9 8 7
188 * D
189 *
190 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
191 * this:
192 *
193 * while (gather)
194 * take head of queue and prepend to parse queue (this reverses the
195 * sequence, so parse queue is 7 -> 8 -> 9)
196 *
197 * Next, we process the parse queue, which now contains all un-parsed packets
198 * (including any leftover ones from the previous decode section)
199 *
200 * for each buffer now in the parse queue:
201 * Call the subclass parse function, prepending each resulting frame to
202 * the parse_gather queue. Buffers which precede the first one that
203 * produces a parsed frame are retained in the parse queue for
204 * re-processing on the next cycle of parsing.
205 *
206 * The parse_gather queue now contains frame objects ready for decoding,
207 * in reverse order.
208 * parse_gather: 9 -> 8 -> 7
209 *
210 * while (parse_gather)
211 * Take the head of the queue and prepend it to the decode queue
212 * If the frame was a keyframe, process the decode queue
213 * decode is now 7-8-9
214 *
215 * Processing the decode queue results in frames with attached output buffers
216 * stored in the 'output_queue' ready for outputting in reverse order.
217 *
218 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
219 * gather queue. We get the following situation:
220 *
221 * gather queue: 4
222 * decode queue: 7 8 9
223 *
224 * After we received 5 (Keyframe) and 6:
225 *
226 * gather queue: 6 5 4
227 * decode queue: 7 8 9
228 *
229 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
230 *
231 * Copy head of the gather queue (6) to decode queue:
232 *
233 * gather queue: 5 4
234 * decode queue: 6 7 8 9
235 *
236 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
237 * can start decoding.
238 *
239 * gather queue: 4
240 * decode queue: 5 6 7 8 9
241 *
242 * Decode frames in decode queue, store raw decoded data in output queue, we
243 * can take the head of the decode queue and prepend the decoded result in the
244 * output queue:
245 *
246 * gather queue: 4
247 * decode queue:
248 * output queue: 9 8 7 6 5
249 *
250 * Now output all the frames in the output queue, picking a frame from the
251 * head of the queue.
252 *
253 * Copy head of the gather queue (4) to decode queue, we flushed the gather
254 * queue and can now store input buffer in the gather queue:
255 *
256 * gather queue: 1
257 * decode queue: 4
258 *
259 * When we receive EOS, the queue looks like:
260 *
261 * gather queue: 3 2 1
262 * decode queue: 4
263 *
264 * Fill decode queue, first keyframe we copy is 2:
265 *
266 * gather queue: 1
267 * decode queue: 2 3 4
268 *
269 * Decoded output:
270 *
271 * gather queue: 1
272 * decode queue:
273 * output queue: 4 3 2
274 *
275 * Leftover buffer 1 cannot be decoded and must be discarded.
276 */
277
278 #include "gstvideodecoder.h"
279 #include "gstvideoutils.h"
280 #include "gstvideoutilsprivate.h"
281
282 #include <gst/video/video.h>
283 #include <gst/video/video-event.h>
284 #include <gst/video/gstvideopool.h>
285 #include <gst/video/gstvideometa.h>
286 #include <string.h>
287
288 GST_DEBUG_CATEGORY (videodecoder_debug);
289 #define GST_CAT_DEFAULT videodecoder_debug
290
291 struct _GstVideoDecoderPrivate
292 {
293 /* FIXME introduce a context ? */
294
295 GstBufferPool *pool;
296 GstAllocator *allocator;
297 GstAllocationParams params;
298
299 /* parse tracking */
300 /* input data */
301 GstAdapter *input_adapter;
302 /* assembles current frame */
303 GstAdapter *output_adapter;
304
305 /* Whether we attempt to convert newsegment from bytes to
306 * time using a bitrate estimation */
307 gboolean do_estimate_rate;
308
309 /* Whether input is considered packetized or not */
310 gboolean packetized;
311
312 /* Error handling */
313 gint max_errors;
314 gint error_count;
315 gboolean had_output_data;
316 gboolean had_input_data;
317
318 gboolean needs_format;
319 /* input_segment are output_segment identical */
320 gboolean in_out_segment_sync;
321
322 /* ... being tracked here;
323 * only available during parsing */
324 GstVideoCodecFrame *current_frame;
325 /* events that should apply to the current frame */
326 GList *current_frame_events;
327 /* events that should be pushed before the next frame */
328 GList *pending_events;
329
330 /* relative offset of input data */
331 guint64 input_offset;
332 /* relative offset of frame */
333 guint64 frame_offset;
334 /* tracking ts and offsets */
335 GList *timestamps;
336
337 /* last outgoing ts */
338 GstClockTime last_timestamp_out;
339 /* incoming pts - dts */
340 GstClockTime pts_delta;
341 gboolean reordered_output;
342
343 /* reverse playback */
344 /* collect input */
345 GList *gather;
346 /* to-be-parsed */
347 GList *parse;
348 /* collected parsed frames */
349 GList *parse_gather;
350 /* frames to be handled == decoded */
351 GList *decode;
352 /* collected output - of buffer objects, not frames */
353 GList *output_queued;
354
355
356 /* base_picture_number is the picture number of the reference picture */
357 guint64 base_picture_number;
358 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
359 GstClockTime base_timestamp;
360
361 /* FIXME : reorder_depth is never set */
362 int reorder_depth;
363 int distance_from_sync;
364
365 guint32 system_frame_number;
366 guint32 decode_frame_number;
367
368 GList *frames; /* Protected with OBJECT_LOCK */
369 GstVideoCodecState *input_state;
370 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
371 gboolean output_state_changed;
372
373 /* QoS properties */
374 gdouble proportion; /* OBJECT_LOCK */
375 GstClockTime earliest_time; /* OBJECT_LOCK */
376 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
377 gboolean discont;
378 /* qos messages: frames dropped/processed */
379 guint dropped;
380 guint processed;
381
382 /* Outgoing byte size ? */
383 gint64 bytes_out;
384 gint64 time;
385
386 gint64 min_latency;
387 gint64 max_latency;
388
389 /* upstream stream tags (global tags are passed through as-is) */
390 GstTagList *upstream_tags;
391
392 /* subclass tags */
393 GstTagList *tags;
394 GstTagMergeMode tags_merge_mode;
395
396 gboolean tags_changed;
397
398 /* flags */
399 gboolean use_default_pad_acceptcaps;
400
401 #ifndef GST_DISABLE_DEBUG
402 /* Diagnostic time for reporting the time
403 * from flush to first output */
404 GstClockTime last_reset_time;
405 #endif
406 };
407
408 static GstElementClass *parent_class = NULL;
409 static gint private_offset = 0;
410
411 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
412 static void gst_video_decoder_init (GstVideoDecoder * dec,
413 GstVideoDecoderClass * klass);
414
415 static void gst_video_decoder_finalize (GObject * object);
416
417 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
418 GstCaps * caps);
419 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
420 GstEvent * event);
421 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
422 GstEvent * event);
423 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
424 GstBuffer * buf);
425 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
426 GstQuery * query);
427 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
428 element, GstStateChange transition);
429 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
430 GstQuery * query);
431 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
432 gboolean flush_hard);
433
434 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
435 GstVideoCodecFrame * frame);
436
437 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
438 GList * events);
439 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
440 decoder, GstVideoCodecFrame * frame);
441 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
442 decoder);
443 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
444 decoder, GstBuffer * buf);
445 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
446 gboolean at_eos);
447
448 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
449
450 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
451 GstEvent * event);
452 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
453 GstEvent * event);
454 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
455 decoder, GstQuery * query);
456 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
457 decoder, GstQuery * query);
458 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
459 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
460 gboolean at_eos, gboolean new_buffer);
461 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
462 decoder);
463 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
464 GstQuery * query);
465 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
466 GstQuery * query);
467
468 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
469 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
470
471 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
472 * method to get to the padtemplates */
473 GType
gst_video_decoder_get_type(void)474 gst_video_decoder_get_type (void)
475 {
476 static volatile gsize type = 0;
477
478 if (g_once_init_enter (&type)) {
479 GType _type;
480 static const GTypeInfo info = {
481 sizeof (GstVideoDecoderClass),
482 NULL,
483 NULL,
484 (GClassInitFunc) gst_video_decoder_class_init,
485 NULL,
486 NULL,
487 sizeof (GstVideoDecoder),
488 0,
489 (GInstanceInitFunc) gst_video_decoder_init,
490 };
491
492 _type = g_type_register_static (GST_TYPE_ELEMENT,
493 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
494
495 private_offset =
496 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
497
498 g_once_init_leave (&type, _type);
499 }
500 return type;
501 }
502
503 static inline GstVideoDecoderPrivate *
gst_video_decoder_get_instance_private(GstVideoDecoder * self)504 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
505 {
506 return (G_STRUCT_MEMBER_P (self, private_offset));
507 }
508
509 static void
gst_video_decoder_class_init(GstVideoDecoderClass * klass)510 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
511 {
512 GObjectClass *gobject_class;
513 GstElementClass *gstelement_class;
514
515 gobject_class = G_OBJECT_CLASS (klass);
516 gstelement_class = GST_ELEMENT_CLASS (klass);
517
518 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
519 "Base Video Decoder");
520
521 parent_class = g_type_class_peek_parent (klass);
522
523 if (private_offset != 0)
524 g_type_class_adjust_private_offset (klass, &private_offset);
525
526 gobject_class->finalize = gst_video_decoder_finalize;
527
528 gstelement_class->change_state =
529 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
530
531 klass->sink_event = gst_video_decoder_sink_event_default;
532 klass->src_event = gst_video_decoder_src_event_default;
533 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
534 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
535 klass->negotiate = gst_video_decoder_negotiate_default;
536 klass->sink_query = gst_video_decoder_sink_query_default;
537 klass->src_query = gst_video_decoder_src_query_default;
538 klass->transform_meta = gst_video_decoder_transform_meta_default;
539 }
540
541 static void
gst_video_decoder_init(GstVideoDecoder * decoder,GstVideoDecoderClass * klass)542 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
543 {
544 GstPadTemplate *pad_template;
545 GstPad *pad;
546
547 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
548
549 decoder->priv = gst_video_decoder_get_instance_private (decoder);
550
551 pad_template =
552 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
553 g_return_if_fail (pad_template != NULL);
554
555 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
556
557 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
558 gst_pad_set_event_function (pad,
559 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
560 gst_pad_set_query_function (pad,
561 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
562 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
563
564 pad_template =
565 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
566 g_return_if_fail (pad_template != NULL);
567
568 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
569
570 gst_pad_set_event_function (pad,
571 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
572 gst_pad_set_query_function (pad,
573 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
574 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
575
576 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
577 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
578
579 g_rec_mutex_init (&decoder->stream_lock);
580
581 decoder->priv->input_adapter = gst_adapter_new ();
582 decoder->priv->output_adapter = gst_adapter_new ();
583 decoder->priv->packetized = TRUE;
584 decoder->priv->needs_format = FALSE;
585
586 decoder->priv->min_latency = 0;
587 decoder->priv->max_latency = 0;
588
589 gst_video_decoder_reset (decoder, TRUE, TRUE);
590 }
591
592 static GstVideoCodecState *
_new_input_state(GstCaps * caps)593 _new_input_state (GstCaps * caps)
594 {
595 GstVideoCodecState *state;
596 GstStructure *structure;
597 const GValue *codec_data;
598
599 state = g_slice_new0 (GstVideoCodecState);
600 state->ref_count = 1;
601 gst_video_info_init (&state->info);
602 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
603 goto parse_fail;
604 state->caps = gst_caps_ref (caps);
605
606 structure = gst_caps_get_structure (caps, 0);
607
608 codec_data = gst_structure_get_value (structure, "codec_data");
609 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
610 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
611
612 return state;
613
614 parse_fail:
615 {
616 g_slice_free (GstVideoCodecState, state);
617 return NULL;
618 }
619 }
620
621 static GstVideoCodecState *
_new_output_state(GstVideoFormat fmt,GstVideoInterlaceMode mode,guint width,guint height,GstVideoCodecState * reference)622 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width,
623 guint height, GstVideoCodecState * reference)
624 {
625 GstVideoCodecState *state;
626
627 state = g_slice_new0 (GstVideoCodecState);
628 state->ref_count = 1;
629 gst_video_info_init (&state->info);
630 if (!gst_video_info_set_interlaced_format (&state->info, fmt, mode, width,
631 height)) {
632 g_slice_free (GstVideoCodecState, state);
633 return NULL;
634 }
635
636 if (reference) {
637 GstVideoInfo *tgt, *ref;
638
639 tgt = &state->info;
640 ref = &reference->info;
641
642 /* Copy over extra fields from reference state */
643 tgt->interlace_mode = ref->interlace_mode;
644 tgt->flags = ref->flags;
645 /* only copy values that are not unknown so that we don't override the
646 * defaults. subclasses should really fill these in when they know. */
647 if (ref->chroma_site)
648 tgt->chroma_site = ref->chroma_site;
649 if (ref->colorimetry.range)
650 tgt->colorimetry.range = ref->colorimetry.range;
651 if (ref->colorimetry.matrix)
652 tgt->colorimetry.matrix = ref->colorimetry.matrix;
653 if (ref->colorimetry.transfer)
654 tgt->colorimetry.transfer = ref->colorimetry.transfer;
655 if (ref->colorimetry.primaries)
656 tgt->colorimetry.primaries = ref->colorimetry.primaries;
657 GST_DEBUG ("reference par %d/%d fps %d/%d",
658 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
659 tgt->par_n = ref->par_n;
660 tgt->par_d = ref->par_d;
661 tgt->fps_n = ref->fps_n;
662 tgt->fps_d = ref->fps_d;
663 tgt->views = ref->views;
664
665 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
666
667 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
668 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
669 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
670 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
671 } else {
672 /* Default to MONO, overridden as needed by sub-classes */
673 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
674 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
675 }
676 }
677
678 GST_DEBUG ("reference par %d/%d fps %d/%d",
679 state->info.par_n, state->info.par_d,
680 state->info.fps_n, state->info.fps_d);
681
682 return state;
683 }
684
685 static gboolean
gst_video_decoder_setcaps(GstVideoDecoder * decoder,GstCaps * caps)686 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
687 {
688 GstVideoDecoderClass *decoder_class;
689 GstVideoCodecState *state;
690 gboolean ret = TRUE;
691
692 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
693
694 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
695
696 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
697
698 if (decoder->priv->input_state) {
699 GST_DEBUG_OBJECT (decoder,
700 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
701 decoder->priv->input_state->caps, caps);
702 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
703 goto caps_not_changed;
704 }
705
706 state = _new_input_state (caps);
707
708 if (G_UNLIKELY (state == NULL))
709 goto parse_fail;
710
711 if (decoder_class->set_format)
712 ret = decoder_class->set_format (decoder, state);
713
714 if (!ret)
715 goto refused_format;
716
717 if (decoder->priv->input_state)
718 gst_video_codec_state_unref (decoder->priv->input_state);
719 decoder->priv->input_state = state;
720
721 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
722
723 return ret;
724
725 caps_not_changed:
726 {
727 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
728 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
729 return TRUE;
730 }
731
732 /* ERRORS */
733 parse_fail:
734 {
735 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
736 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
737 return FALSE;
738 }
739
740 refused_format:
741 {
742 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
743 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
744 gst_video_codec_state_unref (state);
745 return FALSE;
746 }
747 }
748
749 static void
gst_video_decoder_finalize(GObject * object)750 gst_video_decoder_finalize (GObject * object)
751 {
752 GstVideoDecoder *decoder;
753
754 decoder = GST_VIDEO_DECODER (object);
755
756 GST_DEBUG_OBJECT (object, "finalize");
757
758 g_rec_mutex_clear (&decoder->stream_lock);
759
760 if (decoder->priv->input_adapter) {
761 g_object_unref (decoder->priv->input_adapter);
762 decoder->priv->input_adapter = NULL;
763 }
764 if (decoder->priv->output_adapter) {
765 g_object_unref (decoder->priv->output_adapter);
766 decoder->priv->output_adapter = NULL;
767 }
768
769 if (decoder->priv->input_state)
770 gst_video_codec_state_unref (decoder->priv->input_state);
771 if (decoder->priv->output_state)
772 gst_video_codec_state_unref (decoder->priv->output_state);
773
774 if (decoder->priv->pool) {
775 gst_object_unref (decoder->priv->pool);
776 decoder->priv->pool = NULL;
777 }
778
779 if (decoder->priv->allocator) {
780 gst_object_unref (decoder->priv->allocator);
781 decoder->priv->allocator = NULL;
782 }
783
784 G_OBJECT_CLASS (parent_class)->finalize (object);
785 }
786
787 /* hard == FLUSH, otherwise discont */
788 static GstFlowReturn
gst_video_decoder_flush(GstVideoDecoder * dec,gboolean hard)789 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
790 {
791 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
792 GstFlowReturn ret = GST_FLOW_OK;
793
794 GST_LOG_OBJECT (dec, "flush hard %d", hard);
795
796 /* Inform subclass */
797 if (klass->reset) {
798 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
799 klass->reset (dec, hard);
800 }
801
802 if (klass->flush)
803 klass->flush (dec);
804
805 /* and get (re)set for the sequel */
806 gst_video_decoder_reset (dec, FALSE, hard);
807
808 return ret;
809 }
810
811 static GstEvent *
gst_video_decoder_create_merged_tags_event(GstVideoDecoder * dec)812 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
813 {
814 GstTagList *merged_tags;
815
816 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
817 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
818 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
819
820 merged_tags =
821 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
822 dec->priv->tags_merge_mode);
823
824 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
825
826 if (merged_tags == NULL)
827 return NULL;
828
829 if (gst_tag_list_is_empty (merged_tags)) {
830 gst_tag_list_unref (merged_tags);
831 return NULL;
832 }
833
834 return gst_event_new_tag (merged_tags);
835 }
836
837 static gboolean
gst_video_decoder_push_event(GstVideoDecoder * decoder,GstEvent * event)838 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
839 {
840 switch (GST_EVENT_TYPE (event)) {
841 case GST_EVENT_SEGMENT:
842 {
843 GstSegment segment;
844
845 gst_event_copy_segment (event, &segment);
846
847 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
848
849 if (segment.format != GST_FORMAT_TIME) {
850 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
851 break;
852 }
853
854 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
855 decoder->output_segment = segment;
856 decoder->priv->in_out_segment_sync =
857 gst_segment_is_equal (&decoder->input_segment, &segment);
858 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
859 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
860 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
861 break;
862 }
863 default:
864 break;
865 }
866
867 GST_DEBUG_OBJECT (decoder, "pushing event %s",
868 gst_event_type_get_name (GST_EVENT_TYPE (event)));
869
870 return gst_pad_push_event (decoder->srcpad, event);
871 }
872
873 static GstFlowReturn
gst_video_decoder_parse_available(GstVideoDecoder * dec,gboolean at_eos,gboolean new_buffer)874 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
875 gboolean new_buffer)
876 {
877 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
878 GstVideoDecoderPrivate *priv = dec->priv;
879 GstFlowReturn ret = GST_FLOW_OK;
880 gsize was_available, available;
881 guint inactive = 0;
882
883 available = gst_adapter_available (priv->input_adapter);
884
885 while (available || new_buffer) {
886 new_buffer = FALSE;
887 /* current frame may have been parsed and handled,
888 * so we need to set up a new one when asking subclass to parse */
889 if (priv->current_frame == NULL)
890 priv->current_frame = gst_video_decoder_new_frame (dec);
891
892 was_available = available;
893 ret = decoder_class->parse (dec, priv->current_frame,
894 priv->input_adapter, at_eos);
895 if (ret != GST_FLOW_OK)
896 break;
897
898 /* if the subclass returned success (GST_FLOW_OK), it is expected
899 * to have collected and submitted a frame, i.e. it should have
900 * called gst_video_decoder_have_frame(), or at least consumed a
901 * few bytes through gst_video_decoder_add_to_frame().
902 *
903 * Otherwise, this is an implementation bug, and we error out
904 * after 2 failed attempts */
905 available = gst_adapter_available (priv->input_adapter);
906 if (!priv->current_frame || available != was_available)
907 inactive = 0;
908 else if (++inactive == 2)
909 goto error_inactive;
910 }
911
912 return ret;
913
914 /* ERRORS */
915 error_inactive:
916 {
917 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
918 return GST_FLOW_ERROR;
919 }
920 }
921
922 /* This function has to be called with the stream lock taken. */
923 static GstFlowReturn
gst_video_decoder_drain_out(GstVideoDecoder * dec,gboolean at_eos)924 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
925 {
926 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
927 GstVideoDecoderPrivate *priv = dec->priv;
928 GstFlowReturn ret = GST_FLOW_OK;
929
930 if (dec->input_segment.rate > 0.0) {
931 /* Forward mode, if unpacketized, give the child class
932 * a final chance to flush out packets */
933 if (!priv->packetized) {
934 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
935 }
936
937 if (at_eos) {
938 if (decoder_class->finish)
939 ret = decoder_class->finish (dec);
940 } else {
941 if (decoder_class->drain) {
942 ret = decoder_class->drain (dec);
943 } else {
944 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
945 }
946 }
947 } else {
948 /* Reverse playback mode */
949 ret = gst_video_decoder_flush_parse (dec, TRUE);
950 }
951
952 return ret;
953 }
954
955 static GList *
_flush_events(GstPad * pad,GList * events)956 _flush_events (GstPad * pad, GList * events)
957 {
958 GList *tmp;
959
960 for (tmp = events; tmp; tmp = tmp->next) {
961 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
962 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
963 GST_EVENT_IS_STICKY (tmp->data)) {
964 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
965 }
966 gst_event_unref (tmp->data);
967 }
968 g_list_free (events);
969
970 return NULL;
971 }
972
973 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
974 static gboolean
gst_video_decoder_negotiate_default_caps(GstVideoDecoder * decoder)975 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
976 {
977 GstCaps *caps, *templcaps;
978 GstVideoCodecState *state;
979 GstVideoInfo info;
980 gint i;
981 gint caps_size;
982 GstStructure *structure;
983
984 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
985 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
986 if (caps)
987 gst_caps_unref (templcaps);
988 else
989 caps = templcaps;
990 templcaps = NULL;
991
992 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
993 goto caps_error;
994
995 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
996
997 /* before fixating, try to use whatever upstream provided */
998 caps = gst_caps_make_writable (caps);
999 caps_size = gst_caps_get_size (caps);
1000 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1001 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1002 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1003 gint width, height;
1004
1005 if (gst_structure_get_int (structure, "width", &width)) {
1006 for (i = 0; i < caps_size; i++) {
1007 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1008 G_TYPE_INT, width, NULL);
1009 }
1010 }
1011
1012 if (gst_structure_get_int (structure, "height", &height)) {
1013 for (i = 0; i < caps_size; i++) {
1014 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1015 G_TYPE_INT, height, NULL);
1016 }
1017 }
1018 }
1019
1020 for (i = 0; i < caps_size; i++) {
1021 structure = gst_caps_get_structure (caps, i);
1022 /* Random I420 1280x720 for fixation */
1023 if (gst_structure_has_field (structure, "format"))
1024 gst_structure_fixate_field_string (structure, "format", "I420");
1025 else
1026 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1027
1028 if (gst_structure_has_field (structure, "width"))
1029 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1030 else
1031 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1032
1033 if (gst_structure_has_field (structure, "height"))
1034 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1035 else
1036 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1037 }
1038 caps = gst_caps_fixate (caps);
1039
1040 if (!caps || !gst_video_info_from_caps (&info, caps))
1041 goto caps_error;
1042
1043 GST_INFO_OBJECT (decoder,
1044 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1045 state =
1046 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1047 info.width, info.height, decoder->priv->input_state);
1048 gst_video_codec_state_unref (state);
1049 gst_caps_unref (caps);
1050
1051 return TRUE;
1052
1053 caps_error:
1054 {
1055 if (caps)
1056 gst_caps_unref (caps);
1057 return FALSE;
1058 }
1059 }
1060
1061 static gboolean
gst_video_decoder_sink_event_default(GstVideoDecoder * decoder,GstEvent * event)1062 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1063 GstEvent * event)
1064 {
1065 GstVideoDecoderPrivate *priv;
1066 gboolean ret = FALSE;
1067 gboolean forward_immediate = FALSE;
1068
1069 priv = decoder->priv;
1070
1071 switch (GST_EVENT_TYPE (event)) {
1072 case GST_EVENT_STREAM_START:
1073 {
1074 GstFlowReturn flow_ret = GST_FLOW_OK;
1075
1076 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1077 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1078 ret = (flow_ret == GST_FLOW_OK);
1079
1080 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1081 /* Flush upstream tags after a STREAM_START */
1082 if (priv->upstream_tags) {
1083 gst_tag_list_unref (priv->upstream_tags);
1084 priv->upstream_tags = NULL;
1085 priv->tags_changed = TRUE;
1086 }
1087 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1088
1089 /* Forward STREAM_START immediately. Everything is drained after
1090 * the STREAM_START event and we can forward this event immediately
1091 * now without having buffers out of order.
1092 */
1093 forward_immediate = TRUE;
1094 break;
1095 }
1096 case GST_EVENT_CAPS:
1097 {
1098 GstCaps *caps;
1099
1100 gst_event_parse_caps (event, &caps);
1101 ret = gst_video_decoder_setcaps (decoder, caps);
1102 gst_event_unref (event);
1103 event = NULL;
1104 break;
1105 }
1106 case GST_EVENT_SEGMENT_DONE:
1107 {
1108 GstFlowReturn flow_ret = GST_FLOW_OK;
1109
1110 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1111 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1112 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1113 ret = (flow_ret == GST_FLOW_OK);
1114
1115 /* Forward SEGMENT_DONE immediately. This is required
1116 * because no buffer or serialized event might come
1117 * after SEGMENT_DONE and nothing could trigger another
1118 * _finish_frame() call.
1119 *
1120 * The subclass can override this behaviour by overriding
1121 * the ::sink_event() vfunc and not chaining up to the
1122 * parent class' ::sink_event() until a later time.
1123 */
1124 forward_immediate = TRUE;
1125 break;
1126 }
1127 case GST_EVENT_EOS:
1128 {
1129 GstFlowReturn flow_ret = GST_FLOW_OK;
1130
1131 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1132 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1133 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1134 ret = (flow_ret == GST_FLOW_OK);
1135
1136 /* Error out even if EOS was ok when we had input, but no output */
1137 if (ret && priv->had_input_data && !priv->had_output_data) {
1138 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1139 ("No valid frames decoded before end of stream"),
1140 ("no valid frames found"));
1141 }
1142
1143 /* Forward EOS immediately. This is required because no
1144 * buffer or serialized event will come after EOS and
1145 * nothing could trigger another _finish_frame() call.
1146 *
1147 * The subclass can override this behaviour by overriding
1148 * the ::sink_event() vfunc and not chaining up to the
1149 * parent class' ::sink_event() until a later time.
1150 */
1151 forward_immediate = TRUE;
1152 break;
1153 }
1154 case GST_EVENT_GAP:
1155 {
1156 GstFlowReturn flow_ret = GST_FLOW_OK;
1157 gboolean needs_reconfigure = FALSE;
1158 GList *events;
1159 GList *frame_events;
1160
1161 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1162 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1163 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1164 ret = (flow_ret == GST_FLOW_OK);
1165
1166 /* Ensure we have caps before forwarding the event */
1167 if (!decoder->priv->output_state) {
1168 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1169 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1170 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1171 ("Decoder output not negotiated before GAP event."));
1172 forward_immediate = TRUE;
1173 break;
1174 }
1175 needs_reconfigure = TRUE;
1176 }
1177
1178 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1179 || needs_reconfigure;
1180 if (decoder->priv->output_state_changed || needs_reconfigure) {
1181 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1182 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1183 gst_pad_mark_reconfigure (decoder->srcpad);
1184 }
1185 }
1186
1187 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1188 " before the gap");
1189 events = decoder->priv->pending_events;
1190 frame_events = decoder->priv->current_frame_events;
1191 decoder->priv->pending_events = NULL;
1192 decoder->priv->current_frame_events = NULL;
1193
1194 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1195
1196 gst_video_decoder_push_event_list (decoder, events);
1197 gst_video_decoder_push_event_list (decoder, frame_events);
1198
1199 /* Forward GAP immediately. Everything is drained after
1200 * the GAP event and we can forward this event immediately
1201 * now without having buffers out of order.
1202 */
1203 forward_immediate = TRUE;
1204 break;
1205 }
1206 case GST_EVENT_CUSTOM_DOWNSTREAM:
1207 {
1208 gboolean in_still;
1209 GstFlowReturn flow_ret = GST_FLOW_OK;
1210
1211 if (gst_video_event_parse_still_frame (event, &in_still)) {
1212 if (in_still) {
1213 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1214 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1215 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1216 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1217 ret = (flow_ret == GST_FLOW_OK);
1218 }
1219 /* Forward STILL_FRAME immediately. Everything is drained after
1220 * the STILL_FRAME event and we can forward this event immediately
1221 * now without having buffers out of order.
1222 */
1223 forward_immediate = TRUE;
1224 }
1225 break;
1226 }
1227 case GST_EVENT_SEGMENT:
1228 {
1229 GstSegment segment;
1230
1231 gst_event_copy_segment (event, &segment);
1232
1233 if (segment.format == GST_FORMAT_TIME) {
1234 GST_DEBUG_OBJECT (decoder,
1235 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1236 } else {
1237 gint64 start;
1238
1239 GST_DEBUG_OBJECT (decoder,
1240 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1241
1242 /* handle newsegment as a result from our legacy simple seeking */
1243 /* note that initial 0 should convert to 0 in any case */
1244 if (priv->do_estimate_rate &&
1245 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1246 segment.start, GST_FORMAT_TIME, &start)) {
1247 /* best attempt convert */
1248 /* as these are only estimates, stop is kept open-ended to avoid
1249 * premature cutting */
1250 GST_DEBUG_OBJECT (decoder,
1251 "converted to TIME start %" GST_TIME_FORMAT,
1252 GST_TIME_ARGS (start));
1253 segment.start = start;
1254 segment.stop = GST_CLOCK_TIME_NONE;
1255 segment.time = start;
1256 /* replace event */
1257 gst_event_unref (event);
1258 event = gst_event_new_segment (&segment);
1259 } else {
1260 goto newseg_wrong_format;
1261 }
1262 }
1263
1264 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1265
1266 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1267 priv->base_picture_number = 0;
1268
1269 decoder->input_segment = segment;
1270 decoder->priv->in_out_segment_sync = FALSE;
1271
1272 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1273 break;
1274 }
1275 case GST_EVENT_FLUSH_STOP:
1276 {
1277 GList *l;
1278
1279 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1280 for (l = priv->frames; l; l = l->next) {
1281 GstVideoCodecFrame *frame = l->data;
1282
1283 frame->events = _flush_events (decoder->srcpad, frame->events);
1284 }
1285 priv->current_frame_events = _flush_events (decoder->srcpad,
1286 decoder->priv->current_frame_events);
1287
1288 /* well, this is kind of worse than a DISCONT */
1289 gst_video_decoder_flush (decoder, TRUE);
1290 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1291 /* Forward FLUSH_STOP immediately. This is required because it is
1292 * expected to be forwarded immediately and no buffers are queued
1293 * anyway.
1294 */
1295 forward_immediate = TRUE;
1296 break;
1297 }
1298 case GST_EVENT_TAG:
1299 {
1300 GstTagList *tags;
1301
1302 gst_event_parse_tag (event, &tags);
1303
1304 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1305 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1306 if (priv->upstream_tags != tags) {
1307 if (priv->upstream_tags)
1308 gst_tag_list_unref (priv->upstream_tags);
1309 priv->upstream_tags = gst_tag_list_ref (tags);
1310 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1311 }
1312 gst_event_unref (event);
1313 event = gst_video_decoder_create_merged_tags_event (decoder);
1314 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1315 if (!event)
1316 ret = TRUE;
1317 }
1318 break;
1319 }
1320 default:
1321 break;
1322 }
1323
1324 /* Forward non-serialized events immediately, and all other
1325 * events which can be forwarded immediately without potentially
1326 * causing the event to go out of order with other events and
1327 * buffers as decided above.
1328 */
1329 if (event) {
1330 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1331 ret = gst_video_decoder_push_event (decoder, event);
1332 } else {
1333 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1334 decoder->priv->current_frame_events =
1335 g_list_prepend (decoder->priv->current_frame_events, event);
1336 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1337 ret = TRUE;
1338 }
1339 }
1340
1341 return ret;
1342
1343 newseg_wrong_format:
1344 {
1345 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1346 gst_event_unref (event);
1347 /* SWALLOW EVENT */
1348 return TRUE;
1349 }
1350 }
1351
1352 static gboolean
gst_video_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1353 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1354 GstEvent * event)
1355 {
1356 GstVideoDecoder *decoder;
1357 GstVideoDecoderClass *decoder_class;
1358 gboolean ret = FALSE;
1359
1360 decoder = GST_VIDEO_DECODER (parent);
1361 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1362
1363 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1364 GST_EVENT_TYPE_NAME (event));
1365
1366 if (decoder_class->sink_event)
1367 ret = decoder_class->sink_event (decoder, event);
1368
1369 return ret;
1370 }
1371
1372 /* perform upstream byte <-> time conversion (duration, seeking)
1373 * if subclass allows and if enough data for moderately decent conversion */
1374 static inline gboolean
gst_video_decoder_do_byte(GstVideoDecoder * dec)1375 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1376 {
1377 gboolean ret;
1378
1379 GST_OBJECT_LOCK (dec);
1380 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1381 && (dec->priv->time > GST_SECOND);
1382 GST_OBJECT_UNLOCK (dec);
1383
1384 return ret;
1385 }
1386
1387 static gboolean
gst_video_decoder_do_seek(GstVideoDecoder * dec,GstEvent * event)1388 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1389 {
1390 GstFormat format;
1391 GstSeekFlags flags;
1392 GstSeekType start_type, end_type;
1393 gdouble rate;
1394 gint64 start, start_time, end_time;
1395 GstSegment seek_segment;
1396 guint32 seqnum;
1397
1398 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1399 &start_time, &end_type, &end_time);
1400
1401 /* we'll handle plain open-ended flushing seeks with the simple approach */
1402 if (rate != 1.0) {
1403 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1404 return FALSE;
1405 }
1406
1407 if (start_type != GST_SEEK_TYPE_SET) {
1408 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1409 return FALSE;
1410 }
1411
1412 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1413 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1414 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1415 return FALSE;
1416 }
1417
1418 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1419 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1420 return FALSE;
1421 }
1422
1423 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1424 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1425 start_time, end_type, end_time, NULL);
1426 start_time = seek_segment.position;
1427
1428 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1429 GST_FORMAT_BYTES, &start)) {
1430 GST_DEBUG_OBJECT (dec, "conversion failed");
1431 return FALSE;
1432 }
1433
1434 seqnum = gst_event_get_seqnum (event);
1435 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1436 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1437 gst_event_set_seqnum (event, seqnum);
1438
1439 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1440 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1441
1442 return gst_pad_push_event (dec->sinkpad, event);
1443 }
1444
1445 static gboolean
gst_video_decoder_src_event_default(GstVideoDecoder * decoder,GstEvent * event)1446 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1447 GstEvent * event)
1448 {
1449 GstVideoDecoderPrivate *priv;
1450 gboolean res = FALSE;
1451
1452 priv = decoder->priv;
1453
1454 GST_DEBUG_OBJECT (decoder,
1455 "received event %d, %s", GST_EVENT_TYPE (event),
1456 GST_EVENT_TYPE_NAME (event));
1457
1458 switch (GST_EVENT_TYPE (event)) {
1459 case GST_EVENT_SEEK:
1460 {
1461 GstFormat format;
1462 gdouble rate;
1463 GstSeekFlags flags;
1464 GstSeekType start_type, stop_type;
1465 gint64 start, stop;
1466 gint64 tstart, tstop;
1467 guint32 seqnum;
1468
1469 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1470 &stop_type, &stop);
1471 seqnum = gst_event_get_seqnum (event);
1472
1473 /* upstream gets a chance first */
1474 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1475 break;
1476
1477 /* if upstream fails for a time seek, maybe we can help if allowed */
1478 if (format == GST_FORMAT_TIME) {
1479 if (gst_video_decoder_do_byte (decoder))
1480 res = gst_video_decoder_do_seek (decoder, event);
1481 break;
1482 }
1483
1484 /* ... though a non-time seek can be aided as well */
1485 /* First bring the requested format to time */
1486 if (!(res =
1487 gst_pad_query_convert (decoder->srcpad, format, start,
1488 GST_FORMAT_TIME, &tstart)))
1489 goto convert_error;
1490 if (!(res =
1491 gst_pad_query_convert (decoder->srcpad, format, stop,
1492 GST_FORMAT_TIME, &tstop)))
1493 goto convert_error;
1494
1495 /* then seek with time on the peer */
1496 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1497 flags, start_type, tstart, stop_type, tstop);
1498 gst_event_set_seqnum (event, seqnum);
1499
1500 res = gst_pad_push_event (decoder->sinkpad, event);
1501 break;
1502 }
1503 case GST_EVENT_QOS:
1504 {
1505 GstQOSType type;
1506 gdouble proportion;
1507 GstClockTimeDiff diff;
1508 GstClockTime timestamp;
1509
1510 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1511
1512 GST_OBJECT_LOCK (decoder);
1513 priv->proportion = proportion;
1514 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1515 if (G_UNLIKELY (diff > 0)) {
1516 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1517 } else {
1518 priv->earliest_time = timestamp + diff;
1519 }
1520 } else {
1521 priv->earliest_time = GST_CLOCK_TIME_NONE;
1522 }
1523 GST_OBJECT_UNLOCK (decoder);
1524
1525 GST_DEBUG_OBJECT (decoder,
1526 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1527 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1528
1529 res = gst_pad_push_event (decoder->sinkpad, event);
1530 break;
1531 }
1532 default:
1533 res = gst_pad_push_event (decoder->sinkpad, event);
1534 break;
1535 }
1536 done:
1537 return res;
1538
1539 convert_error:
1540 GST_DEBUG_OBJECT (decoder, "could not convert format");
1541 goto done;
1542 }
1543
1544 static gboolean
gst_video_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1545 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1546 {
1547 GstVideoDecoder *decoder;
1548 GstVideoDecoderClass *decoder_class;
1549 gboolean ret = FALSE;
1550
1551 decoder = GST_VIDEO_DECODER (parent);
1552 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1553
1554 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1555 GST_EVENT_TYPE_NAME (event));
1556
1557 if (decoder_class->src_event)
1558 ret = decoder_class->src_event (decoder, event);
1559
1560 return ret;
1561 }
1562
1563 static gboolean
gst_video_decoder_src_query_default(GstVideoDecoder * dec,GstQuery * query)1564 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1565 {
1566 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1567 gboolean res = TRUE;
1568
1569 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1570
1571 switch (GST_QUERY_TYPE (query)) {
1572 case GST_QUERY_POSITION:
1573 {
1574 GstFormat format;
1575 gint64 time, value;
1576
1577 /* upstream gets a chance first */
1578 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1579 GST_LOG_OBJECT (dec, "returning peer response");
1580 break;
1581 }
1582
1583 /* Refuse BYTES format queries. If it made sense to
1584 * answer them, upstream would have already */
1585 gst_query_parse_position (query, &format, NULL);
1586
1587 if (format == GST_FORMAT_BYTES) {
1588 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1589 break;
1590 }
1591
1592 /* we start from the last seen time */
1593 time = dec->priv->last_timestamp_out;
1594 /* correct for the segment values */
1595 time = gst_segment_to_stream_time (&dec->output_segment,
1596 GST_FORMAT_TIME, time);
1597
1598 GST_LOG_OBJECT (dec,
1599 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1600
1601 /* and convert to the final format */
1602 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1603 format, &value)))
1604 break;
1605
1606 gst_query_set_position (query, format, value);
1607
1608 GST_LOG_OBJECT (dec,
1609 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1610 format);
1611 break;
1612 }
1613 case GST_QUERY_DURATION:
1614 {
1615 GstFormat format;
1616
1617 /* upstream in any case */
1618 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1619 break;
1620
1621 gst_query_parse_duration (query, &format, NULL);
1622 /* try answering TIME by converting from BYTE if subclass allows */
1623 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
1624 gint64 value;
1625
1626 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1627 &value)) {
1628 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1629 if (gst_pad_query_convert (dec->sinkpad,
1630 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1631 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1632 res = TRUE;
1633 }
1634 }
1635 }
1636 break;
1637 }
1638 case GST_QUERY_CONVERT:
1639 {
1640 GstFormat src_fmt, dest_fmt;
1641 gint64 src_val, dest_val;
1642
1643 GST_DEBUG_OBJECT (dec, "convert query");
1644
1645 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1646 GST_OBJECT_LOCK (dec);
1647 if (dec->priv->output_state != NULL)
1648 res = __gst_video_rawvideo_convert (dec->priv->output_state,
1649 src_fmt, src_val, &dest_fmt, &dest_val);
1650 else
1651 res = FALSE;
1652 GST_OBJECT_UNLOCK (dec);
1653 if (!res)
1654 goto error;
1655 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1656 break;
1657 }
1658 case GST_QUERY_LATENCY:
1659 {
1660 gboolean live;
1661 GstClockTime min_latency, max_latency;
1662
1663 res = gst_pad_peer_query (dec->sinkpad, query);
1664 if (res) {
1665 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1666 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1667 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1668 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1669
1670 GST_OBJECT_LOCK (dec);
1671 min_latency += dec->priv->min_latency;
1672 if (max_latency == GST_CLOCK_TIME_NONE
1673 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
1674 max_latency = GST_CLOCK_TIME_NONE;
1675 else
1676 max_latency += dec->priv->max_latency;
1677 GST_OBJECT_UNLOCK (dec);
1678
1679 gst_query_set_latency (query, live, min_latency, max_latency);
1680 }
1681 }
1682 break;
1683 default:
1684 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
1685 }
1686 return res;
1687
1688 error:
1689 GST_ERROR_OBJECT (dec, "query failed");
1690 return res;
1691 }
1692
1693 static gboolean
gst_video_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)1694 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
1695 {
1696 GstVideoDecoder *decoder;
1697 GstVideoDecoderClass *decoder_class;
1698 gboolean ret = FALSE;
1699
1700 decoder = GST_VIDEO_DECODER (parent);
1701 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1702
1703 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1704 GST_QUERY_TYPE_NAME (query));
1705
1706 if (decoder_class->src_query)
1707 ret = decoder_class->src_query (decoder, query);
1708
1709 return ret;
1710 }
1711
1712 /**
1713 * gst_video_decoder_proxy_getcaps:
1714 * @decoder: a #GstVideoDecoder
1715 * @caps: (allow-none): initial caps
1716 * @filter: (allow-none): filter caps
1717 *
1718 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1719 * restricted to resolution/format/... combinations supported by downstream
1720 * elements.
1721 *
1722 * Returns: (transfer full): a #GstCaps owned by caller
1723 *
1724 * Since: 1.6
1725 */
1726 GstCaps *
gst_video_decoder_proxy_getcaps(GstVideoDecoder * decoder,GstCaps * caps,GstCaps * filter)1727 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
1728 GstCaps * filter)
1729 {
1730 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
1731 GST_VIDEO_DECODER_SINK_PAD (decoder),
1732 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
1733 }
1734
1735 static GstCaps *
gst_video_decoder_sink_getcaps(GstVideoDecoder * decoder,GstCaps * filter)1736 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
1737 {
1738 GstVideoDecoderClass *klass;
1739 GstCaps *caps;
1740
1741 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1742
1743 if (klass->getcaps)
1744 caps = klass->getcaps (decoder, filter);
1745 else
1746 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
1747
1748 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
1749
1750 return caps;
1751 }
1752
1753 static gboolean
gst_video_decoder_sink_query_default(GstVideoDecoder * decoder,GstQuery * query)1754 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
1755 GstQuery * query)
1756 {
1757 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
1758 GstVideoDecoderPrivate *priv;
1759 gboolean res = FALSE;
1760
1761 priv = decoder->priv;
1762
1763 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
1764
1765 switch (GST_QUERY_TYPE (query)) {
1766 case GST_QUERY_CONVERT:
1767 {
1768 GstFormat src_fmt, dest_fmt;
1769 gint64 src_val, dest_val;
1770
1771 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1772 GST_OBJECT_LOCK (decoder);
1773 res =
1774 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
1775 src_fmt, src_val, &dest_fmt, &dest_val);
1776 GST_OBJECT_UNLOCK (decoder);
1777 if (!res)
1778 goto error;
1779 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1780 break;
1781 }
1782 case GST_QUERY_ALLOCATION:{
1783 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1784
1785 if (klass->propose_allocation)
1786 res = klass->propose_allocation (decoder, query);
1787 break;
1788 }
1789 case GST_QUERY_CAPS:{
1790 GstCaps *filter, *caps;
1791
1792 gst_query_parse_caps (query, &filter);
1793 caps = gst_video_decoder_sink_getcaps (decoder, filter);
1794 gst_query_set_caps_result (query, caps);
1795 gst_caps_unref (caps);
1796 res = TRUE;
1797 break;
1798 }
1799 case GST_QUERY_ACCEPT_CAPS:{
1800 if (decoder->priv->use_default_pad_acceptcaps) {
1801 res =
1802 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
1803 GST_OBJECT_CAST (decoder), query);
1804 } else {
1805 GstCaps *caps;
1806 GstCaps *allowed_caps;
1807 GstCaps *template_caps;
1808 gboolean accept;
1809
1810 gst_query_parse_accept_caps (query, &caps);
1811
1812 template_caps = gst_pad_get_pad_template_caps (pad);
1813 accept = gst_caps_is_subset (caps, template_caps);
1814 gst_caps_unref (template_caps);
1815
1816 if (accept) {
1817 allowed_caps =
1818 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
1819
1820 accept = gst_caps_can_intersect (caps, allowed_caps);
1821
1822 gst_caps_unref (allowed_caps);
1823 }
1824
1825 gst_query_set_accept_caps_result (query, accept);
1826 res = TRUE;
1827 }
1828 break;
1829 }
1830 default:
1831 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
1832 break;
1833 }
1834 done:
1835
1836 return res;
1837 error:
1838 GST_DEBUG_OBJECT (decoder, "query failed");
1839 goto done;
1840
1841 }
1842
1843 static gboolean
gst_video_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)1844 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
1845 GstQuery * query)
1846 {
1847 GstVideoDecoder *decoder;
1848 GstVideoDecoderClass *decoder_class;
1849 gboolean ret = FALSE;
1850
1851 decoder = GST_VIDEO_DECODER (parent);
1852 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1853
1854 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1855 GST_QUERY_TYPE_NAME (query));
1856
1857 if (decoder_class->sink_query)
1858 ret = decoder_class->sink_query (decoder, query);
1859
1860 return ret;
1861 }
1862
1863 typedef struct _Timestamp Timestamp;
1864 struct _Timestamp
1865 {
1866 guint64 offset;
1867 GstClockTime pts;
1868 GstClockTime dts;
1869 GstClockTime duration;
1870 guint flags;
1871 };
1872
1873 static void
timestamp_free(Timestamp * ts)1874 timestamp_free (Timestamp * ts)
1875 {
1876 g_slice_free (Timestamp, ts);
1877 }
1878
1879 static void
gst_video_decoder_add_buffer_info(GstVideoDecoder * decoder,GstBuffer * buffer)1880 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
1881 GstBuffer * buffer)
1882 {
1883 GstVideoDecoderPrivate *priv = decoder->priv;
1884 Timestamp *ts;
1885
1886 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
1887 !GST_BUFFER_DTS_IS_VALID (buffer) &&
1888 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
1889 GST_BUFFER_FLAGS (buffer) == 0) {
1890 /* Save memory - don't bother storing info
1891 * for buffers with no distinguishing info */
1892 return;
1893 }
1894
1895 ts = g_slice_new (Timestamp);
1896
1897 GST_LOG_OBJECT (decoder,
1898 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
1899 " (offset:%" G_GUINT64_FORMAT ")",
1900 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
1901 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
1902
1903 ts->offset = priv->input_offset;
1904 ts->pts = GST_BUFFER_PTS (buffer);
1905 ts->dts = GST_BUFFER_DTS (buffer);
1906 ts->duration = GST_BUFFER_DURATION (buffer);
1907 ts->flags = GST_BUFFER_FLAGS (buffer);
1908
1909 priv->timestamps = g_list_append (priv->timestamps, ts);
1910 }
1911
1912 static void
gst_video_decoder_get_buffer_info_at_offset(GstVideoDecoder * decoder,guint64 offset,GstClockTime * pts,GstClockTime * dts,GstClockTime * duration,guint * flags)1913 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
1914 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
1915 GstClockTime * duration, guint * flags)
1916 {
1917 #ifndef GST_DISABLE_GST_DEBUG
1918 guint64 got_offset = 0;
1919 #endif
1920 Timestamp *ts;
1921 GList *g;
1922
1923 *pts = GST_CLOCK_TIME_NONE;
1924 *dts = GST_CLOCK_TIME_NONE;
1925 *duration = GST_CLOCK_TIME_NONE;
1926 *flags = 0;
1927
1928 g = decoder->priv->timestamps;
1929 while (g) {
1930 ts = g->data;
1931 if (ts->offset <= offset) {
1932 #ifndef GST_DISABLE_GST_DEBUG
1933 got_offset = ts->offset;
1934 #endif
1935 *pts = ts->pts;
1936 *dts = ts->dts;
1937 *duration = ts->duration;
1938 *flags = ts->flags;
1939 g = g->next;
1940 decoder->priv->timestamps = g_list_remove (decoder->priv->timestamps, ts);
1941 timestamp_free (ts);
1942 } else {
1943 break;
1944 }
1945 }
1946
1947 GST_LOG_OBJECT (decoder,
1948 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
1949 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
1950 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
1951 }
1952
1953 static void
gst_video_decoder_clear_queues(GstVideoDecoder * dec)1954 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
1955 {
1956 GstVideoDecoderPrivate *priv = dec->priv;
1957
1958 g_list_free_full (priv->output_queued,
1959 (GDestroyNotify) gst_mini_object_unref);
1960 priv->output_queued = NULL;
1961
1962 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
1963 priv->gather = NULL;
1964 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
1965 priv->decode = NULL;
1966 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
1967 priv->parse = NULL;
1968 g_list_free_full (priv->parse_gather,
1969 (GDestroyNotify) gst_video_codec_frame_unref);
1970 priv->parse_gather = NULL;
1971 g_list_free_full (priv->frames, (GDestroyNotify) gst_video_codec_frame_unref);
1972 priv->frames = NULL;
1973 }
1974
1975 static void
gst_video_decoder_reset(GstVideoDecoder * decoder,gboolean full,gboolean flush_hard)1976 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
1977 gboolean flush_hard)
1978 {
1979 GstVideoDecoderPrivate *priv = decoder->priv;
1980
1981 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
1982
1983 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1984
1985 if (full || flush_hard) {
1986 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
1987 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
1988 gst_video_decoder_clear_queues (decoder);
1989 decoder->priv->in_out_segment_sync = TRUE;
1990
1991 if (priv->current_frame) {
1992 gst_video_codec_frame_unref (priv->current_frame);
1993 priv->current_frame = NULL;
1994 }
1995
1996 g_list_free_full (priv->current_frame_events,
1997 (GDestroyNotify) gst_event_unref);
1998 priv->current_frame_events = NULL;
1999 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2000 priv->pending_events = NULL;
2001
2002 priv->error_count = 0;
2003 priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
2004 priv->had_output_data = FALSE;
2005 priv->had_input_data = FALSE;
2006
2007 GST_OBJECT_LOCK (decoder);
2008 priv->earliest_time = GST_CLOCK_TIME_NONE;
2009 priv->proportion = 0.5;
2010 GST_OBJECT_UNLOCK (decoder);
2011 }
2012
2013 if (full) {
2014 if (priv->input_state)
2015 gst_video_codec_state_unref (priv->input_state);
2016 priv->input_state = NULL;
2017 GST_OBJECT_LOCK (decoder);
2018 if (priv->output_state)
2019 gst_video_codec_state_unref (priv->output_state);
2020 priv->output_state = NULL;
2021
2022 priv->qos_frame_duration = 0;
2023 GST_OBJECT_UNLOCK (decoder);
2024
2025 if (priv->tags)
2026 gst_tag_list_unref (priv->tags);
2027 priv->tags = NULL;
2028 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2029 if (priv->upstream_tags) {
2030 gst_tag_list_unref (priv->upstream_tags);
2031 priv->upstream_tags = NULL;
2032 }
2033 priv->tags_changed = FALSE;
2034 priv->reordered_output = FALSE;
2035
2036 priv->dropped = 0;
2037 priv->processed = 0;
2038
2039 priv->decode_frame_number = 0;
2040 priv->base_picture_number = 0;
2041
2042 if (priv->pool) {
2043 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2044 priv->pool);
2045 gst_buffer_pool_set_active (priv->pool, FALSE);
2046 gst_object_unref (priv->pool);
2047 priv->pool = NULL;
2048 }
2049
2050 if (priv->allocator) {
2051 gst_object_unref (priv->allocator);
2052 priv->allocator = NULL;
2053 }
2054 }
2055
2056 priv->discont = TRUE;
2057
2058 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2059 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2060 priv->pts_delta = GST_CLOCK_TIME_NONE;
2061
2062 priv->input_offset = 0;
2063 priv->frame_offset = 0;
2064 gst_adapter_clear (priv->input_adapter);
2065 gst_adapter_clear (priv->output_adapter);
2066 g_list_free_full (priv->timestamps, (GDestroyNotify) timestamp_free);
2067 priv->timestamps = NULL;
2068
2069 GST_OBJECT_LOCK (decoder);
2070 priv->bytes_out = 0;
2071 priv->time = 0;
2072 GST_OBJECT_UNLOCK (decoder);
2073
2074 #ifndef GST_DISABLE_DEBUG
2075 priv->last_reset_time = gst_util_get_timestamp ();
2076 #endif
2077
2078 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2079 }
2080
2081 static GstFlowReturn
gst_video_decoder_chain_forward(GstVideoDecoder * decoder,GstBuffer * buf,gboolean at_eos)2082 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2083 GstBuffer * buf, gboolean at_eos)
2084 {
2085 GstVideoDecoderPrivate *priv;
2086 GstVideoDecoderClass *klass;
2087 GstFlowReturn ret = GST_FLOW_OK;
2088
2089 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2090 priv = decoder->priv;
2091
2092 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2093
2094 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2095 * and this function would only be called to get everything collected GOP
2096 * by GOP in the parse_gather list */
2097 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2098 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2099 ret = gst_video_decoder_drain_out (decoder, FALSE);
2100
2101 if (priv->current_frame == NULL)
2102 priv->current_frame = gst_video_decoder_new_frame (decoder);
2103
2104 if (!priv->packetized)
2105 gst_video_decoder_add_buffer_info (decoder, buf);
2106
2107 priv->input_offset += gst_buffer_get_size (buf);
2108
2109 if (priv->packetized) {
2110 gboolean was_keyframe = FALSE;
2111 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2112 was_keyframe = TRUE;
2113 GST_LOG_OBJECT (decoder, "Marking current_frame as sync point");
2114 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
2115 }
2116
2117 priv->current_frame->input_buffer = buf;
2118
2119 if (decoder->input_segment.rate < 0.0) {
2120 priv->parse_gather =
2121 g_list_prepend (priv->parse_gather, priv->current_frame);
2122 } else {
2123 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
2124 }
2125 priv->current_frame = NULL;
2126 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2127 * latency. Only do this for forwards playback as reverse playback handles
2128 * draining on keyframes in flush_parse(), and would otherwise call back
2129 * from drain_out() to here causing an infinite loop.
2130 * Also this function is only called for reverse playback to gather frames
2131 * GOP by GOP, and does not do any actual decoding. That would be done by
2132 * flush_decode() */
2133 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2134 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2135 ret = gst_video_decoder_drain_out (decoder, FALSE);
2136 } else {
2137 gst_adapter_push (priv->input_adapter, buf);
2138
2139 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2140 }
2141
2142 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2143 return GST_FLOW_OK;
2144
2145 return ret;
2146 }
2147
2148 static GstFlowReturn
gst_video_decoder_flush_decode(GstVideoDecoder * dec)2149 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2150 {
2151 GstVideoDecoderPrivate *priv = dec->priv;
2152 GstFlowReturn res = GST_FLOW_OK;
2153 GList *walk;
2154
2155 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2156
2157 walk = priv->decode;
2158 while (walk) {
2159 GList *next;
2160 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2161
2162 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2163 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2164 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2165 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2166
2167 next = walk->next;
2168
2169 priv->decode = g_list_delete_link (priv->decode, walk);
2170
2171 /* decode buffer, resulting data prepended to queue */
2172 res = gst_video_decoder_decode_frame (dec, frame);
2173 if (res != GST_FLOW_OK)
2174 break;
2175
2176 walk = next;
2177 }
2178
2179 return res;
2180 }
2181
2182 /* gst_video_decoder_flush_parse is called from the
2183 * chain_reverse() function when a buffer containing
2184 * a DISCONT - indicating that reverse playback
2185 * looped back to the next data block, and therefore
2186 * all available data should be fed through the
2187 * decoder and frames gathered for reversed output
2188 */
2189 static GstFlowReturn
gst_video_decoder_flush_parse(GstVideoDecoder * dec,gboolean at_eos)2190 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2191 {
2192 GstVideoDecoderPrivate *priv = dec->priv;
2193 GstFlowReturn res = GST_FLOW_OK;
2194 GList *walk;
2195 GstVideoDecoderClass *decoder_class;
2196
2197 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2198
2199 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2200
2201 /* Reverse the gather list, and prepend it to the parse list,
2202 * then flush to parse whatever we can */
2203 priv->gather = g_list_reverse (priv->gather);
2204 priv->parse = g_list_concat (priv->gather, priv->parse);
2205 priv->gather = NULL;
2206
2207 /* clear buffer and decoder state */
2208 gst_video_decoder_flush (dec, FALSE);
2209
2210 walk = priv->parse;
2211 while (walk) {
2212 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2213 GList *next = walk->next;
2214
2215 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2216 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2217 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2218 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2219
2220 /* parse buffer, resulting frames prepended to parse_gather queue */
2221 gst_buffer_ref (buf);
2222 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2223
2224 /* if we generated output, we can discard the buffer, else we
2225 * keep it in the queue */
2226 if (priv->parse_gather) {
2227 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2228 priv->parse = g_list_delete_link (priv->parse, walk);
2229 gst_buffer_unref (buf);
2230 } else {
2231 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2232 }
2233 walk = next;
2234 }
2235
2236 walk = priv->parse_gather;
2237 while (walk) {
2238 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2239 GList *walk2;
2240
2241 /* this is reverse playback, check if we need to apply some segment
2242 * to the output before decoding, as during decoding the segment.rate
2243 * must be used to determine if a buffer should be pushed or added to
2244 * the output list for reverse pushing.
2245 *
2246 * The new segment is not immediately pushed here because we must
2247 * wait for negotiation to happen before it can be pushed to avoid
2248 * pushing a segment before caps event. Negotiation only happens
2249 * when finish_frame is called.
2250 */
2251 for (walk2 = frame->events; walk2;) {
2252 GList *cur = walk2;
2253 GstEvent *event = walk2->data;
2254
2255 walk2 = g_list_next (walk2);
2256 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2257
2258 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2259 GstSegment segment;
2260
2261 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2262 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2263 gst_event_copy_segment (event, &segment);
2264 if (segment.format == GST_FORMAT_TIME) {
2265 dec->output_segment = segment;
2266 dec->priv->in_out_segment_sync =
2267 gst_segment_is_equal (&dec->input_segment, &segment);
2268 }
2269 }
2270 dec->priv->pending_events =
2271 g_list_append (dec->priv->pending_events, event);
2272 frame->events = g_list_delete_link (frame->events, cur);
2273 }
2274 }
2275
2276 walk = walk->next;
2277 }
2278
2279 /* now we can process frames. Start by moving each frame from the parse_gather
2280 * to the decode list, reverse the order as we go, and stopping when/if we
2281 * copy a keyframe. */
2282 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2283 walk = priv->parse_gather;
2284 while (walk) {
2285 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2286
2287 /* remove from the gather list */
2288 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2289
2290 /* move it to the front of the decode queue */
2291 priv->decode = g_list_concat (walk, priv->decode);
2292
2293 /* if we copied a keyframe, flush and decode the decode queue */
2294 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2295 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2296 ", DTS %" GST_TIME_FORMAT, frame,
2297 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2298 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2299 res = gst_video_decoder_flush_decode (dec);
2300 if (res != GST_FLOW_OK)
2301 goto done;
2302
2303 /* We need to tell the subclass to drain now.
2304 * We prefer the drain vfunc, but for backward-compat
2305 * we use a finish() vfunc if drain isn't implemented */
2306 if (decoder_class->drain) {
2307 GST_DEBUG_OBJECT (dec, "Draining");
2308 res = decoder_class->drain (dec);
2309 } else if (decoder_class->finish) {
2310 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2311 "Calling finish() for backwards-compat");
2312 res = decoder_class->finish (dec);
2313 }
2314
2315 if (res != GST_FLOW_OK)
2316 goto done;
2317
2318 /* now send queued data downstream */
2319 walk = priv->output_queued;
2320 while (walk) {
2321 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2322
2323 priv->output_queued =
2324 g_list_delete_link (priv->output_queued, priv->output_queued);
2325
2326 if (G_LIKELY (res == GST_FLOW_OK)) {
2327 /* avoid stray DISCONT from forward processing,
2328 * which have no meaning in reverse pushing */
2329 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2330
2331 /* Last chance to calculate a timestamp as we loop backwards
2332 * through the list */
2333 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2334 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2335 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2336 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2337 GST_BUFFER_TIMESTAMP (buf) =
2338 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2339 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2340 GST_LOG_OBJECT (dec,
2341 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2342 GST_TIME_ARGS (priv->last_timestamp_out));
2343 }
2344
2345 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2346 } else {
2347 gst_buffer_unref (buf);
2348 }
2349
2350 walk = priv->output_queued;
2351 }
2352
2353 /* clear buffer and decoder state again
2354 * before moving to the previous keyframe */
2355 gst_video_decoder_flush (dec, FALSE);
2356 }
2357
2358 walk = priv->parse_gather;
2359 }
2360
2361 done:
2362 return res;
2363 }
2364
2365 static GstFlowReturn
gst_video_decoder_chain_reverse(GstVideoDecoder * dec,GstBuffer * buf)2366 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2367 {
2368 GstVideoDecoderPrivate *priv = dec->priv;
2369 GstFlowReturn result = GST_FLOW_OK;
2370
2371 /* if we have a discont, move buffers to the decode list */
2372 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2373 GST_DEBUG_OBJECT (dec, "received discont");
2374
2375 /* parse and decode stuff in the gather and parse queues */
2376 result = gst_video_decoder_flush_parse (dec, FALSE);
2377 }
2378
2379 if (G_LIKELY (buf)) {
2380 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2381 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2382 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2383 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2384 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2385 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2386
2387 /* add buffer to gather queue */
2388 priv->gather = g_list_prepend (priv->gather, buf);
2389 }
2390
2391 return result;
2392 }
2393
2394 static GstFlowReturn
gst_video_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buf)2395 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2396 {
2397 GstVideoDecoder *decoder;
2398 GstFlowReturn ret = GST_FLOW_OK;
2399
2400 decoder = GST_VIDEO_DECODER (parent);
2401
2402 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2403 goto not_negotiated;
2404
2405 GST_LOG_OBJECT (decoder,
2406 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2407 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2408 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2409 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2410 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2411 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2412
2413 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2414
2415 /* NOTE:
2416 * requiring the pad to be negotiated makes it impossible to use
2417 * oggdemux or filesrc ! decoder */
2418
2419 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2420 GstEvent *event;
2421 GstSegment *segment = &decoder->input_segment;
2422
2423 GST_WARNING_OBJECT (decoder,
2424 "Received buffer without a new-segment. "
2425 "Assuming timestamps start from 0.");
2426
2427 gst_segment_init (segment, GST_FORMAT_TIME);
2428
2429 event = gst_event_new_segment (segment);
2430
2431 decoder->priv->current_frame_events =
2432 g_list_prepend (decoder->priv->current_frame_events, event);
2433 }
2434
2435 decoder->priv->had_input_data = TRUE;
2436
2437 if (decoder->input_segment.rate > 0.0)
2438 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2439 else
2440 ret = gst_video_decoder_chain_reverse (decoder, buf);
2441
2442 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2443 return ret;
2444
2445 /* ERRORS */
2446 not_negotiated:
2447 {
2448 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2449 ("decoder not initialized"));
2450 gst_buffer_unref (buf);
2451 return GST_FLOW_NOT_NEGOTIATED;
2452 }
2453 }
2454
2455 static GstStateChangeReturn
gst_video_decoder_change_state(GstElement * element,GstStateChange transition)2456 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2457 {
2458 GstVideoDecoder *decoder;
2459 GstVideoDecoderClass *decoder_class;
2460 GstStateChangeReturn ret;
2461
2462 decoder = GST_VIDEO_DECODER (element);
2463 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2464
2465 switch (transition) {
2466 case GST_STATE_CHANGE_NULL_TO_READY:
2467 /* open device/library if needed */
2468 if (decoder_class->open && !decoder_class->open (decoder))
2469 goto open_failed;
2470 break;
2471 case GST_STATE_CHANGE_READY_TO_PAUSED:
2472 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2473 gst_video_decoder_reset (decoder, TRUE, TRUE);
2474 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2475
2476 /* Initialize device/library if needed */
2477 if (decoder_class->start && !decoder_class->start (decoder))
2478 goto start_failed;
2479 break;
2480 default:
2481 break;
2482 }
2483
2484 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2485
2486 switch (transition) {
2487 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2488 gboolean stopped = TRUE;
2489
2490 if (decoder_class->stop)
2491 stopped = decoder_class->stop (decoder);
2492
2493 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2494 gst_video_decoder_reset (decoder, TRUE, TRUE);
2495 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2496
2497 if (!stopped)
2498 goto stop_failed;
2499
2500 break;
2501 }
2502 case GST_STATE_CHANGE_READY_TO_NULL:
2503 /* close device/library if needed */
2504 if (decoder_class->close && !decoder_class->close (decoder))
2505 goto close_failed;
2506 break;
2507 default:
2508 break;
2509 }
2510
2511 return ret;
2512
2513 /* Errors */
2514 open_failed:
2515 {
2516 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2517 ("Failed to open decoder"));
2518 return GST_STATE_CHANGE_FAILURE;
2519 }
2520
2521 start_failed:
2522 {
2523 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2524 ("Failed to start decoder"));
2525 return GST_STATE_CHANGE_FAILURE;
2526 }
2527
2528 stop_failed:
2529 {
2530 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2531 ("Failed to stop decoder"));
2532 return GST_STATE_CHANGE_FAILURE;
2533 }
2534
2535 close_failed:
2536 {
2537 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2538 ("Failed to close decoder"));
2539 return GST_STATE_CHANGE_FAILURE;
2540 }
2541 }
2542
2543 static GstVideoCodecFrame *
gst_video_decoder_new_frame(GstVideoDecoder * decoder)2544 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2545 {
2546 GstVideoDecoderPrivate *priv = decoder->priv;
2547 GstVideoCodecFrame *frame;
2548
2549 frame = g_slice_new0 (GstVideoCodecFrame);
2550
2551 frame->ref_count = 1;
2552
2553 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2554 frame->system_frame_number = priv->system_frame_number;
2555 priv->system_frame_number++;
2556 frame->decode_frame_number = priv->decode_frame_number;
2557 priv->decode_frame_number++;
2558
2559 frame->dts = GST_CLOCK_TIME_NONE;
2560 frame->pts = GST_CLOCK_TIME_NONE;
2561 frame->duration = GST_CLOCK_TIME_NONE;
2562 frame->events = priv->current_frame_events;
2563 priv->current_frame_events = NULL;
2564
2565 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2566
2567 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2568 frame, frame->system_frame_number);
2569
2570 return frame;
2571 }
2572
2573 static void
gst_video_decoder_push_event_list(GstVideoDecoder * decoder,GList * events)2574 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
2575 {
2576 GList *l;
2577
2578 /* events are stored in reverse order */
2579 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2580 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2581 gst_video_decoder_push_event (decoder, l->data);
2582 }
2583 g_list_free (events);
2584 }
2585
2586 static void
gst_video_decoder_prepare_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,gboolean dropping)2587 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
2588 decoder, GstVideoCodecFrame * frame, gboolean dropping)
2589 {
2590 GstVideoDecoderPrivate *priv = decoder->priv;
2591 GList *l, *events = NULL;
2592 gboolean sync;
2593
2594 #ifndef GST_DISABLE_GST_DEBUG
2595 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2596 g_list_length (priv->frames),
2597 gst_adapter_available (priv->input_adapter),
2598 gst_adapter_available (priv->output_adapter));
2599 #endif
2600
2601 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
2602
2603 GST_LOG_OBJECT (decoder,
2604 "finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
2605 GST_TIME_FORMAT,
2606 frame, frame->system_frame_number,
2607 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
2608
2609 /* Push all pending events that arrived before this frame */
2610 for (l = priv->frames; l; l = l->next) {
2611 GstVideoCodecFrame *tmp = l->data;
2612
2613 if (tmp->events) {
2614 events = g_list_concat (tmp->events, events);
2615 tmp->events = NULL;
2616 }
2617
2618 if (tmp == frame)
2619 break;
2620 }
2621
2622 if (dropping || !decoder->priv->output_state) {
2623 /* Push before the next frame that is not dropped */
2624 decoder->priv->pending_events =
2625 g_list_concat (events, decoder->priv->pending_events);
2626 } else {
2627 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
2628 decoder->priv->pending_events = NULL;
2629
2630 gst_video_decoder_push_event_list (decoder, events);
2631 }
2632
2633 /* Check if the data should not be displayed. For example altref/invisible
2634 * frame in vp8. In this case we should not update the timestamps. */
2635 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
2636 return;
2637
2638 /* If the frame is meant to be output but we don't have an output_buffer
2639 * we have a problem :) */
2640 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
2641 goto no_output_buffer;
2642
2643 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
2644 if (frame->pts != priv->base_timestamp) {
2645 GST_DEBUG_OBJECT (decoder,
2646 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
2647 GST_TIME_ARGS (frame->pts),
2648 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
2649 decoder->output_segment.start)));
2650 priv->base_timestamp = frame->pts;
2651 priv->base_picture_number = frame->decode_frame_number;
2652 }
2653 }
2654
2655 if (frame->duration == GST_CLOCK_TIME_NONE) {
2656 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
2657 GST_LOG_OBJECT (decoder,
2658 "Guessing duration %" GST_TIME_FORMAT " for frame...",
2659 GST_TIME_ARGS (frame->duration));
2660 }
2661
2662 /* PTS is expected montone ascending,
2663 * so a good guess is lowest unsent DTS */
2664 {
2665 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
2666 GstVideoCodecFrame *oframe = NULL;
2667 gboolean seen_none = FALSE;
2668
2669 /* some maintenance regardless */
2670 for (l = priv->frames; l; l = l->next) {
2671 GstVideoCodecFrame *tmp = l->data;
2672
2673 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
2674 seen_none = TRUE;
2675 continue;
2676 }
2677
2678 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
2679 min_ts = tmp->abidata.ABI.ts;
2680 oframe = tmp;
2681 }
2682 }
2683 /* save a ts if needed */
2684 if (oframe && oframe != frame) {
2685 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
2686 }
2687
2688 /* and set if needed;
2689 * valid delta means we have reasonable DTS input */
2690 /* also, if we ended up reordered, means this approach is conflicting
2691 * with some sparse existing PTS, and so it does not work out */
2692 if (!priv->reordered_output &&
2693 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
2694 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
2695 frame->pts = min_ts + priv->pts_delta;
2696 GST_DEBUG_OBJECT (decoder,
2697 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
2698 GST_TIME_ARGS (frame->pts));
2699 }
2700
2701 /* some more maintenance, ts2 holds PTS */
2702 min_ts = GST_CLOCK_TIME_NONE;
2703 seen_none = FALSE;
2704 for (l = priv->frames; l; l = l->next) {
2705 GstVideoCodecFrame *tmp = l->data;
2706
2707 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
2708 seen_none = TRUE;
2709 continue;
2710 }
2711
2712 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
2713 min_ts = tmp->abidata.ABI.ts2;
2714 oframe = tmp;
2715 }
2716 }
2717 /* save a ts if needed */
2718 if (oframe && oframe != frame) {
2719 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
2720 }
2721
2722 /* if we detected reordered output, then PTS are void,
2723 * however those were obtained; bogus input, subclass etc */
2724 if (priv->reordered_output && !seen_none) {
2725 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
2726 frame->pts = GST_CLOCK_TIME_NONE;
2727 }
2728
2729 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
2730 frame->pts = min_ts;
2731 GST_DEBUG_OBJECT (decoder,
2732 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
2733 GST_TIME_ARGS (frame->pts));
2734 }
2735 }
2736
2737
2738 if (frame->pts == GST_CLOCK_TIME_NONE) {
2739 /* Last ditch timestamp guess: Just add the duration to the previous
2740 * frame. If it's the first frame, just use the segment start. */
2741 if (frame->duration != GST_CLOCK_TIME_NONE) {
2742 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
2743 frame->pts = priv->last_timestamp_out + frame->duration;
2744 else if (decoder->output_segment.rate > 0.0)
2745 frame->pts = decoder->output_segment.start;
2746 GST_LOG_OBJECT (decoder,
2747 "Guessing timestamp %" GST_TIME_FORMAT " for frame...",
2748 GST_TIME_ARGS (frame->pts));
2749 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
2750 frame->pts = frame->dts;
2751 GST_LOG_OBJECT (decoder,
2752 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
2753 GST_TIME_ARGS (frame->pts));
2754 }
2755 }
2756
2757 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
2758 if (frame->pts < priv->last_timestamp_out) {
2759 GST_WARNING_OBJECT (decoder,
2760 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
2761 GST_TIME_FORMAT ")",
2762 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
2763 priv->reordered_output = TRUE;
2764 /* make it a bit less weird downstream */
2765 frame->pts = priv->last_timestamp_out;
2766 }
2767 }
2768
2769 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
2770 priv->last_timestamp_out = frame->pts;
2771
2772 return;
2773
2774 /* ERRORS */
2775 no_output_buffer:
2776 {
2777 GST_ERROR_OBJECT (decoder, "No buffer to output !");
2778 }
2779 }
2780
2781 /**
2782 * gst_video_decoder_release_frame:
2783 * @dec: a #GstVideoDecoder
2784 * @frame: (transfer full): the #GstVideoCodecFrame to release
2785 *
2786 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
2787 * without any processing other than removing it from list of pending frames,
2788 * after which it is considered finished and released.
2789 *
2790 * Since: 1.2.2
2791 */
2792 void
gst_video_decoder_release_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)2793 gst_video_decoder_release_frame (GstVideoDecoder * dec,
2794 GstVideoCodecFrame * frame)
2795 {
2796 GList *link;
2797
2798 /* unref once from the list */
2799 GST_VIDEO_DECODER_STREAM_LOCK (dec);
2800 link = g_list_find (dec->priv->frames, frame);
2801 if (link) {
2802 gst_video_codec_frame_unref (frame);
2803 dec->priv->frames = g_list_delete_link (dec->priv->frames, link);
2804 }
2805 if (frame->events) {
2806 dec->priv->pending_events =
2807 g_list_concat (frame->events, dec->priv->pending_events);
2808 frame->events = NULL;
2809 }
2810 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
2811
2812 /* unref because this function takes ownership */
2813 gst_video_codec_frame_unref (frame);
2814 }
2815
2816 /**
2817 * gst_video_decoder_drop_frame:
2818 * @dec: a #GstVideoDecoder
2819 * @frame: (transfer full): the #GstVideoCodecFrame to drop
2820 *
2821 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
2822 * case and posts a QoS message with the frame's details on the bus.
2823 * In any case, the frame is considered finished and released.
2824 *
2825 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
2826 */
2827 GstFlowReturn
gst_video_decoder_drop_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)2828 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
2829 {
2830 GstClockTime stream_time, jitter, earliest_time, qostime, timestamp;
2831 GstSegment *segment;
2832 GstMessage *qos_msg;
2833 gdouble proportion;
2834
2835 GST_LOG_OBJECT (dec, "drop frame %p", frame);
2836
2837 GST_VIDEO_DECODER_STREAM_LOCK (dec);
2838
2839 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
2840
2841 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
2842 GST_TIME_ARGS (frame->pts));
2843
2844 dec->priv->dropped++;
2845
2846 /* post QoS message */
2847 GST_OBJECT_LOCK (dec);
2848 proportion = dec->priv->proportion;
2849 earliest_time = dec->priv->earliest_time;
2850 GST_OBJECT_UNLOCK (dec);
2851
2852 timestamp = frame->pts;
2853 segment = &dec->output_segment;
2854 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
2855 segment = &dec->input_segment;
2856 stream_time =
2857 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
2858 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
2859 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
2860 qos_msg =
2861 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
2862 timestamp, GST_CLOCK_TIME_NONE);
2863 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
2864 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
2865 dec->priv->processed, dec->priv->dropped);
2866 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
2867
2868 /* now free the frame */
2869 gst_video_decoder_release_frame (dec, frame);
2870
2871 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
2872
2873 return GST_FLOW_OK;
2874 }
2875
2876 static gboolean
gst_video_decoder_transform_meta_default(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstMeta * meta)2877 gst_video_decoder_transform_meta_default (GstVideoDecoder *
2878 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
2879 {
2880 const GstMetaInfo *info = meta->info;
2881 const gchar *const *tags;
2882
2883 tags = gst_meta_api_type_get_tags (info->api);
2884
2885 if (!tags || (g_strv_length ((gchar **) tags) == 1
2886 && gst_meta_api_type_has_tag (info->api,
2887 g_quark_from_string (GST_META_TAG_VIDEO_STR))))
2888 return TRUE;
2889
2890 return FALSE;
2891 }
2892
2893 typedef struct
2894 {
2895 GstVideoDecoder *decoder;
2896 GstVideoCodecFrame *frame;
2897 } CopyMetaData;
2898
2899 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)2900 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
2901 {
2902 CopyMetaData *data = user_data;
2903 GstVideoDecoder *decoder = data->decoder;
2904 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2905 GstVideoCodecFrame *frame = data->frame;
2906 const GstMetaInfo *info = (*meta)->info;
2907 gboolean do_copy = FALSE;
2908
2909 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
2910 /* never call the transform_meta with memory specific metadata */
2911 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
2912 g_type_name (info->api));
2913 do_copy = FALSE;
2914 } else if (klass->transform_meta) {
2915 do_copy = klass->transform_meta (decoder, frame, *meta);
2916 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
2917 g_type_name (info->api), do_copy);
2918 }
2919
2920 /* we only copy metadata when the subclass implemented a transform_meta
2921 * function and when it returns %TRUE */
2922 if (do_copy && info->transform_func) {
2923 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
2924 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
2925 /* simply copy then */
2926 info->transform_func (frame->output_buffer, *meta, inbuf,
2927 _gst_meta_transform_copy, ©_data);
2928 }
2929 return TRUE;
2930 }
2931
2932 /**
2933 * gst_video_decoder_finish_frame:
2934 * @decoder: a #GstVideoDecoder
2935 * @frame: (transfer full): a decoded #GstVideoCodecFrame
2936 *
2937 * @frame should have a valid decoded data buffer, whose metadata fields
2938 * are then appropriately set according to frame data and pushed downstream.
2939 * If no output data is provided, @frame is considered skipped.
2940 * In any case, the frame is considered finished and released.
2941 *
2942 * After calling this function the output buffer of the frame is to be
2943 * considered read-only. This function will also change the metadata
2944 * of the buffer.
2945 *
2946 * Returns: a #GstFlowReturn resulting from sending data downstream
2947 */
2948 GstFlowReturn
gst_video_decoder_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)2949 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
2950 GstVideoCodecFrame * frame)
2951 {
2952 GstFlowReturn ret = GST_FLOW_OK;
2953 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2954 GstVideoDecoderPrivate *priv = decoder->priv;
2955 GstBuffer *output_buffer;
2956 gboolean needs_reconfigure = FALSE;
2957
2958 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
2959
2960 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2961
2962 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
2963 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
2964 && needs_reconfigure))) {
2965 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
2966 gst_pad_mark_reconfigure (decoder->srcpad);
2967 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
2968 ret = GST_FLOW_FLUSHING;
2969 else
2970 ret = GST_FLOW_NOT_NEGOTIATED;
2971 goto done;
2972 }
2973 }
2974
2975 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
2976 priv->processed++;
2977
2978 if (priv->tags_changed) {
2979 GstEvent *tags_event;
2980
2981 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
2982
2983 if (tags_event != NULL)
2984 gst_video_decoder_push_event (decoder, tags_event);
2985
2986 priv->tags_changed = FALSE;
2987 }
2988
2989 /* no buffer data means this frame is skipped */
2990 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
2991 GST_DEBUG_OBJECT (decoder, "skipping frame %" GST_TIME_FORMAT,
2992 GST_TIME_ARGS (frame->pts));
2993 goto done;
2994 }
2995
2996 /* We need a writable buffer for the metadata changes below */
2997 output_buffer = frame->output_buffer =
2998 gst_buffer_make_writable (frame->output_buffer);
2999
3000 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3001
3002 GST_BUFFER_PTS (output_buffer) = frame->pts;
3003 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3004 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3005
3006 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3007 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3008
3009 if (priv->discont) {
3010 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3011 }
3012
3013 if (decoder_class->transform_meta) {
3014 if (G_LIKELY (frame->input_buffer)) {
3015 CopyMetaData data;
3016
3017 data.decoder = decoder;
3018 data.frame = frame;
3019 gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
3020 } else {
3021 GST_WARNING_OBJECT (decoder,
3022 "Can't copy metadata because input frame disappeared");
3023 }
3024 }
3025
3026 /* Get an additional ref to the buffer, which is going to be pushed
3027 * downstream, the original ref is owned by the frame
3028 */
3029 output_buffer = gst_buffer_ref (output_buffer);
3030
3031 /* Release frame so the buffer is writable when we push it downstream
3032 * if possible, i.e. if the subclass does not hold additional references
3033 * to the frame
3034 */
3035 gst_video_decoder_release_frame (decoder, frame);
3036 frame = NULL;
3037
3038 if (decoder->output_segment.rate < 0.0
3039 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3040 GST_LOG_OBJECT (decoder, "queued frame");
3041 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3042 } else {
3043 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3044 }
3045
3046 done:
3047 if (frame)
3048 gst_video_decoder_release_frame (decoder, frame);
3049 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3050 return ret;
3051 }
3052
3053 /* With stream lock, takes the frame reference */
3054 static GstFlowReturn
gst_video_decoder_clip_and_push_buf(GstVideoDecoder * decoder,GstBuffer * buf)3055 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3056 {
3057 GstFlowReturn ret = GST_FLOW_OK;
3058 GstVideoDecoderPrivate *priv = decoder->priv;
3059 guint64 start, stop;
3060 guint64 cstart, cstop;
3061 GstSegment *segment;
3062 GstClockTime duration;
3063
3064 /* Check for clipping */
3065 start = GST_BUFFER_PTS (buf);
3066 duration = GST_BUFFER_DURATION (buf);
3067
3068 /* store that we have valid decoded data */
3069 priv->had_output_data = TRUE;
3070
3071 stop = GST_CLOCK_TIME_NONE;
3072
3073 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3074 stop = start + duration;
3075 } else if (GST_CLOCK_TIME_IS_VALID (start)
3076 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3077 /* If we don't clip away buffers that far before the segment we
3078 * can cause the pipeline to lockup. This can happen if audio is
3079 * properly clipped, and thus the audio sink does not preroll yet
3080 * but the video sink prerolls because we already outputted a
3081 * buffer here... and then queues run full.
3082 *
3083 * In the worst case we will clip one buffer too many here now if no
3084 * framerate is given, no buffer duration is given and the actual
3085 * framerate is lower than 25fps */
3086 stop = start + 40 * GST_MSECOND;
3087 }
3088
3089 segment = &decoder->output_segment;
3090 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3091 GST_BUFFER_PTS (buf) = cstart;
3092
3093 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3094 GST_BUFFER_DURATION (buf) = cstop - cstart;
3095
3096 GST_LOG_OBJECT (decoder,
3097 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3098 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3099 " time %" GST_TIME_FORMAT,
3100 GST_TIME_ARGS (cstart),
3101 GST_TIME_ARGS (cstop),
3102 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3103 GST_TIME_ARGS (segment->time));
3104 } else {
3105 GST_LOG_OBJECT (decoder,
3106 "dropping buffer outside segment: %" GST_TIME_FORMAT
3107 " %" GST_TIME_FORMAT
3108 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3109 " time %" GST_TIME_FORMAT,
3110 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3111 GST_TIME_ARGS (segment->start),
3112 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3113 /* only check and return EOS if upstream still
3114 * in the same segment and interested as such */
3115 if (decoder->priv->in_out_segment_sync) {
3116 if (segment->rate >= 0) {
3117 if (GST_BUFFER_PTS (buf) >= segment->stop)
3118 ret = GST_FLOW_EOS;
3119 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3120 ret = GST_FLOW_EOS;
3121 }
3122 }
3123 gst_buffer_unref (buf);
3124 goto done;
3125 }
3126
3127 /* Is buffer too late (QoS) ? */
3128 if (GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3129 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3130 GstClockTime deadline =
3131 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3132 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3133 GST_DEBUG_OBJECT (decoder,
3134 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3135 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3136 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3137 GST_TIME_ARGS (priv->earliest_time));
3138 gst_buffer_unref (buf);
3139 priv->discont = TRUE;
3140 goto done;
3141 }
3142 }
3143
3144 /* Set DISCONT flag here ! */
3145
3146 if (priv->discont) {
3147 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3148 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3149 priv->discont = FALSE;
3150 }
3151
3152 /* update rate estimate */
3153 GST_OBJECT_LOCK (decoder);
3154 priv->bytes_out += gst_buffer_get_size (buf);
3155 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3156 priv->time += duration;
3157 } else {
3158 /* FIXME : Use difference between current and previous outgoing
3159 * timestamp, and relate to difference between current and previous
3160 * bytes */
3161 /* better none than nothing valid */
3162 priv->time = GST_CLOCK_TIME_NONE;
3163 }
3164 GST_OBJECT_UNLOCK (decoder);
3165
3166 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3167 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3168 gst_buffer_get_size (buf),
3169 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3170 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3171
3172 /* we got data, so note things are looking up again, reduce
3173 * the error count, if there is one */
3174 if (G_UNLIKELY (priv->error_count))
3175 priv->error_count = 0;
3176
3177 #ifndef GST_DISABLE_DEBUG
3178 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3179 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3180
3181 /* First buffer since reset, report how long we took */
3182 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3183 " to produce", GST_TIME_ARGS (elapsed));
3184 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3185 }
3186 #endif
3187
3188 /* release STREAM_LOCK not to block upstream
3189 * while pushing buffer downstream */
3190 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3191 ret = gst_pad_push (decoder->srcpad, buf);
3192 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3193
3194 done:
3195 return ret;
3196 }
3197
3198 /**
3199 * gst_video_decoder_add_to_frame:
3200 * @decoder: a #GstVideoDecoder
3201 * @n_bytes: the number of bytes to add
3202 *
3203 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3204 */
3205 void
gst_video_decoder_add_to_frame(GstVideoDecoder * decoder,int n_bytes)3206 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3207 {
3208 GstVideoDecoderPrivate *priv = decoder->priv;
3209 GstBuffer *buf;
3210
3211 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3212
3213 if (n_bytes == 0)
3214 return;
3215
3216 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3217 if (gst_adapter_available (priv->output_adapter) == 0) {
3218 priv->frame_offset =
3219 priv->input_offset - gst_adapter_available (priv->input_adapter);
3220 }
3221 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3222
3223 gst_adapter_push (priv->output_adapter, buf);
3224 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3225 }
3226
3227 /**
3228 * gst_video_decoder_get_pending_frame_size:
3229 * @decoder: a #GstVideoDecoder
3230 *
3231 * Returns the number of bytes previously added to the current frame
3232 * by calling gst_video_decoder_add_to_frame().
3233 *
3234 * Returns: The number of bytes pending for the current frame
3235 *
3236 * Since: 1.4
3237 */
3238 gsize
gst_video_decoder_get_pending_frame_size(GstVideoDecoder * decoder)3239 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3240 {
3241 GstVideoDecoderPrivate *priv = decoder->priv;
3242 gsize ret;
3243
3244 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3245 ret = gst_adapter_available (priv->output_adapter);
3246 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3247
3248 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3249 ret);
3250
3251 return ret;
3252 }
3253
3254 static guint64
gst_video_decoder_get_frame_duration(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3255 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3256 GstVideoCodecFrame * frame)
3257 {
3258 GstVideoCodecState *state = decoder->priv->output_state;
3259
3260 /* it's possible that we don't have a state yet when we are dropping the
3261 * initial buffers */
3262 if (state == NULL)
3263 return GST_CLOCK_TIME_NONE;
3264
3265 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3266 return GST_CLOCK_TIME_NONE;
3267 }
3268
3269 /* FIXME: For interlaced frames this needs to take into account
3270 * the number of valid fields in the frame
3271 */
3272
3273 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3274 state->info.fps_n);
3275 }
3276
3277 /**
3278 * gst_video_decoder_have_frame:
3279 * @decoder: a #GstVideoDecoder
3280 *
3281 * Gathers all data collected for currently parsed frame, gathers corresponding
3282 * metadata and passes it along for further processing, i.e. @handle_frame.
3283 *
3284 * Returns: a #GstFlowReturn
3285 */
3286 GstFlowReturn
gst_video_decoder_have_frame(GstVideoDecoder * decoder)3287 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3288 {
3289 GstVideoDecoderPrivate *priv = decoder->priv;
3290 GstBuffer *buffer;
3291 int n_available;
3292 GstClockTime pts, dts, duration;
3293 guint flags;
3294 GstFlowReturn ret = GST_FLOW_OK;
3295
3296 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3297 priv->frame_offset);
3298
3299 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3300
3301 n_available = gst_adapter_available (priv->output_adapter);
3302 if (n_available) {
3303 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3304 } else {
3305 buffer = gst_buffer_new_and_alloc (0);
3306 }
3307
3308 priv->current_frame->input_buffer = buffer;
3309
3310 gst_video_decoder_get_buffer_info_at_offset (decoder,
3311 priv->frame_offset, &pts, &dts, &duration, &flags);
3312
3313 GST_BUFFER_PTS (buffer) = pts;
3314 GST_BUFFER_DTS (buffer) = dts;
3315 GST_BUFFER_DURATION (buffer) = duration;
3316 GST_BUFFER_FLAGS (buffer) = flags;
3317
3318 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3319 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3320 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3321 GST_TIME_ARGS (duration));
3322
3323 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3324 GST_LOG_OBJECT (decoder, "Marking as sync point");
3325 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3326 }
3327
3328 /* In reverse playback, just capture and queue frames for later processing */
3329 if (decoder->input_segment.rate < 0.0) {
3330 priv->parse_gather =
3331 g_list_prepend (priv->parse_gather, priv->current_frame);
3332 } else {
3333 /* Otherwise, decode the frame, which gives away our ref */
3334 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
3335 }
3336 /* Current frame is gone now, either way */
3337 priv->current_frame = NULL;
3338
3339 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3340
3341 return ret;
3342 }
3343
3344 /* Pass the frame in priv->current_frame through the
3345 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3346 * or dropping by passing to gvd_drop_frame() */
3347 static GstFlowReturn
gst_video_decoder_decode_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3348 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3349 GstVideoCodecFrame * frame)
3350 {
3351 GstVideoDecoderPrivate *priv = decoder->priv;
3352 GstVideoDecoderClass *decoder_class;
3353 GstFlowReturn ret = GST_FLOW_OK;
3354
3355 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3356
3357 /* FIXME : This should only have to be checked once (either the subclass has an
3358 * implementation, or it doesn't) */
3359 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3360
3361 frame->distance_from_sync = priv->distance_from_sync;
3362 priv->distance_from_sync++;
3363 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3364 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3365 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3366
3367 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3368 * durations. */
3369 /* FIXME upstream can be quite wrong about the keyframe aspect,
3370 * so we could be going off here as well,
3371 * maybe let subclass decide if it really is/was a keyframe */
3372 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame) &&
3373 GST_CLOCK_TIME_IS_VALID (frame->pts)
3374 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3375 /* just in case they are not equal as might ideally be,
3376 * e.g. quicktime has a (positive) delta approach */
3377 priv->pts_delta = frame->pts - frame->dts;
3378 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3379 (gint) (priv->pts_delta / GST_MSECOND));
3380 }
3381
3382 frame->abidata.ABI.ts = frame->dts;
3383 frame->abidata.ABI.ts2 = frame->pts;
3384
3385 GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
3386 ", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
3387 frame->distance_from_sync);
3388
3389 gst_video_codec_frame_ref (frame);
3390 priv->frames = g_list_append (priv->frames, frame);
3391
3392 if (g_list_length (priv->frames) > 10) {
3393 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
3394 "possible internal leaking?", g_list_length (priv->frames));
3395 }
3396
3397 frame->deadline =
3398 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3399 frame->pts);
3400
3401 /* do something with frame */
3402 ret = decoder_class->handle_frame (decoder, frame);
3403 if (ret != GST_FLOW_OK)
3404 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
3405
3406 /* the frame has either been added to parse_gather or sent to
3407 handle frame so there is no need to unref it */
3408 return ret;
3409 }
3410
3411
3412 /**
3413 * gst_video_decoder_get_output_state:
3414 * @decoder: a #GstVideoDecoder
3415 *
3416 * Get the #GstVideoCodecState currently describing the output stream.
3417 *
3418 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
3419 */
3420 GstVideoCodecState *
gst_video_decoder_get_output_state(GstVideoDecoder * decoder)3421 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
3422 {
3423 GstVideoCodecState *state = NULL;
3424
3425 GST_OBJECT_LOCK (decoder);
3426 if (decoder->priv->output_state)
3427 state = gst_video_codec_state_ref (decoder->priv->output_state);
3428 GST_OBJECT_UNLOCK (decoder);
3429
3430 return state;
3431 }
3432
3433 /**
3434 * gst_video_decoder_set_output_state:
3435 * @decoder: a #GstVideoDecoder
3436 * @fmt: a #GstVideoFormat
3437 * @width: The width in pixels
3438 * @height: The height in pixels
3439 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3440 *
3441 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
3442 * as the output state for the decoder.
3443 * Any previously set output state on @decoder will be replaced by the newly
3444 * created one.
3445 *
3446 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
3447 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
3448 * @reference.
3449 *
3450 * If the subclass wishes to override some fields from the output state (like
3451 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
3452 *
3453 * The new output state will only take effect (set on pads and buffers) starting
3454 * from the next call to #gst_video_decoder_finish_frame().
3455 *
3456 * Returns: (transfer full): the newly configured output state.
3457 */
3458 GstVideoCodecState *
gst_video_decoder_set_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,guint width,guint height,GstVideoCodecState * reference)3459 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
3460 GstVideoFormat fmt, guint width, guint height,
3461 GstVideoCodecState * reference)
3462 {
3463 return gst_video_decoder_set_interlaced_output_state (decoder, fmt,
3464 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference);
3465 }
3466
3467 /**
3468 * gst_video_decoder_set_interlaced_output_state:
3469 * @decoder: a #GstVideoDecoder
3470 * @fmt: a #GstVideoFormat
3471 * @width: The width in pixels
3472 * @height: The height in pixels
3473 * @mode: A #GstVideoInterlaceMode
3474 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3475 *
3476 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
3477 * the interlacing mode.
3478 *
3479 * Returns: (transfer full): the newly configured output state.
3480 *
3481 * Since: 1.16.
3482 */
3483 GstVideoCodecState *
gst_video_decoder_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode mode,guint width,guint height,GstVideoCodecState * reference)3484 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
3485 GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width, guint height,
3486 GstVideoCodecState * reference)
3487 {
3488 GstVideoDecoderPrivate *priv = decoder->priv;
3489 GstVideoCodecState *state;
3490
3491 GST_DEBUG_OBJECT (decoder, "fmt:%d, width:%d, height:%d, reference:%p",
3492 fmt, width, height, reference);
3493
3494 /* Create the new output state */
3495 state = _new_output_state (fmt, mode, width, height, reference);
3496 if (!state)
3497 return NULL;
3498
3499 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3500
3501 GST_OBJECT_LOCK (decoder);
3502 /* Replace existing output state by new one */
3503 if (priv->output_state)
3504 gst_video_codec_state_unref (priv->output_state);
3505 priv->output_state = gst_video_codec_state_ref (state);
3506
3507 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
3508 priv->qos_frame_duration =
3509 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
3510 priv->output_state->info.fps_n);
3511 } else {
3512 priv->qos_frame_duration = 0;
3513 }
3514 priv->output_state_changed = TRUE;
3515 GST_OBJECT_UNLOCK (decoder);
3516
3517 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3518
3519 return state;
3520 }
3521
3522
3523 /**
3524 * gst_video_decoder_get_oldest_frame:
3525 * @decoder: a #GstVideoDecoder
3526 *
3527 * Get the oldest pending unfinished #GstVideoCodecFrame
3528 *
3529 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
3530 */
3531 GstVideoCodecFrame *
gst_video_decoder_get_oldest_frame(GstVideoDecoder * decoder)3532 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
3533 {
3534 GstVideoCodecFrame *frame = NULL;
3535
3536 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3537 if (decoder->priv->frames)
3538 frame = gst_video_codec_frame_ref (decoder->priv->frames->data);
3539 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3540
3541 return (GstVideoCodecFrame *) frame;
3542 }
3543
3544 /**
3545 * gst_video_decoder_get_frame:
3546 * @decoder: a #GstVideoDecoder
3547 * @frame_number: system_frame_number of a frame
3548 *
3549 * Get a pending unfinished #GstVideoCodecFrame
3550 *
3551 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
3552 */
3553 GstVideoCodecFrame *
gst_video_decoder_get_frame(GstVideoDecoder * decoder,int frame_number)3554 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
3555 {
3556 GList *g;
3557 GstVideoCodecFrame *frame = NULL;
3558
3559 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
3560
3561 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3562 for (g = decoder->priv->frames; g; g = g->next) {
3563 GstVideoCodecFrame *tmp = g->data;
3564
3565 if (tmp->system_frame_number == frame_number) {
3566 frame = gst_video_codec_frame_ref (tmp);
3567 break;
3568 }
3569 }
3570 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3571
3572 return frame;
3573 }
3574
3575 /**
3576 * gst_video_decoder_get_frames:
3577 * @decoder: a #GstVideoDecoder
3578 *
3579 * Get all pending unfinished #GstVideoCodecFrame
3580 *
3581 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
3582 */
3583 GList *
gst_video_decoder_get_frames(GstVideoDecoder * decoder)3584 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
3585 {
3586 GList *frames;
3587
3588 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3589 frames = g_list_copy (decoder->priv->frames);
3590 g_list_foreach (frames, (GFunc) gst_video_codec_frame_ref, NULL);
3591 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3592
3593 return frames;
3594 }
3595
3596 static gboolean
gst_video_decoder_decide_allocation_default(GstVideoDecoder * decoder,GstQuery * query)3597 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
3598 GstQuery * query)
3599 {
3600 GstCaps *outcaps = NULL;
3601 GstBufferPool *pool = NULL;
3602 guint size, min, max;
3603 GstAllocator *allocator = NULL;
3604 GstAllocationParams params;
3605 GstStructure *config;
3606 gboolean update_pool, update_allocator;
3607 GstVideoInfo vinfo;
3608
3609 gst_query_parse_allocation (query, &outcaps, NULL);
3610 gst_video_info_init (&vinfo);
3611 if (outcaps)
3612 gst_video_info_from_caps (&vinfo, outcaps);
3613
3614 /* we got configuration from our peer or the decide_allocation method,
3615 * parse them */
3616 if (gst_query_get_n_allocation_params (query) > 0) {
3617 /* try the allocator */
3618 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3619 update_allocator = TRUE;
3620 } else {
3621 allocator = NULL;
3622 gst_allocation_params_init (¶ms);
3623 update_allocator = FALSE;
3624 }
3625
3626 if (gst_query_get_n_allocation_pools (query) > 0) {
3627 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
3628 size = MAX (size, vinfo.size);
3629 update_pool = TRUE;
3630 } else {
3631 pool = NULL;
3632 size = vinfo.size;
3633 min = max = 0;
3634
3635 update_pool = FALSE;
3636 }
3637
3638 if (pool == NULL) {
3639 /* no pool, we can make our own */
3640 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
3641 pool = gst_video_buffer_pool_new ();
3642 }
3643
3644 /* now configure */
3645 config = gst_buffer_pool_get_config (pool);
3646 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3647 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3648
3649 GST_DEBUG_OBJECT (decoder,
3650 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
3651 pool);
3652 if (!gst_buffer_pool_set_config (pool, config)) {
3653 config = gst_buffer_pool_get_config (pool);
3654
3655 /* If change are not acceptable, fallback to generic pool */
3656 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
3657 max)) {
3658 GST_DEBUG_OBJECT (decoder, "unsuported pool, making new pool");
3659
3660 gst_object_unref (pool);
3661 pool = gst_video_buffer_pool_new ();
3662 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3663 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3664 }
3665
3666 if (!gst_buffer_pool_set_config (pool, config))
3667 goto config_failed;
3668 }
3669
3670 if (update_allocator)
3671 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
3672 else
3673 gst_query_add_allocation_param (query, allocator, ¶ms);
3674 if (allocator)
3675 gst_object_unref (allocator);
3676
3677 if (update_pool)
3678 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
3679 else
3680 gst_query_add_allocation_pool (query, pool, size, min, max);
3681
3682 if (pool)
3683 gst_object_unref (pool);
3684
3685 return TRUE;
3686
3687 config_failed:
3688 if (allocator)
3689 gst_object_unref (allocator);
3690 if (pool)
3691 gst_object_unref (pool);
3692 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
3693 ("Failed to configure the buffer pool"),
3694 ("Configuration is most likely invalid, please report this issue."));
3695 return FALSE;
3696 }
3697
3698 static gboolean
gst_video_decoder_propose_allocation_default(GstVideoDecoder * decoder,GstQuery * query)3699 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
3700 GstQuery * query)
3701 {
3702 return TRUE;
3703 }
3704
3705 static gboolean
gst_video_decoder_negotiate_pool(GstVideoDecoder * decoder,GstCaps * caps)3706 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
3707 {
3708 GstVideoDecoderClass *klass;
3709 GstQuery *query = NULL;
3710 GstBufferPool *pool = NULL;
3711 GstAllocator *allocator;
3712 GstAllocationParams params;
3713 gboolean ret = TRUE;
3714
3715 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3716
3717 query = gst_query_new_allocation (caps, TRUE);
3718
3719 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
3720
3721 if (!gst_pad_peer_query (decoder->srcpad, query)) {
3722 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
3723 }
3724
3725 g_assert (klass->decide_allocation != NULL);
3726 ret = klass->decide_allocation (decoder, query);
3727
3728 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
3729 query);
3730
3731 if (!ret)
3732 goto no_decide_allocation;
3733
3734 /* we got configuration from our peer or the decide_allocation method,
3735 * parse them */
3736 if (gst_query_get_n_allocation_params (query) > 0) {
3737 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3738 } else {
3739 allocator = NULL;
3740 gst_allocation_params_init (¶ms);
3741 }
3742
3743 if (gst_query_get_n_allocation_pools (query) > 0)
3744 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
3745 if (!pool) {
3746 if (allocator)
3747 gst_object_unref (allocator);
3748 ret = FALSE;
3749 goto no_decide_allocation;
3750 }
3751
3752 if (decoder->priv->allocator)
3753 gst_object_unref (decoder->priv->allocator);
3754 decoder->priv->allocator = allocator;
3755 decoder->priv->params = params;
3756
3757 if (decoder->priv->pool) {
3758 /* do not set the bufferpool to inactive here, it will be done
3759 * on its finalize function. As videodecoder do late renegotiation
3760 * it might happen that some element downstream is already using this
3761 * same bufferpool and deactivating it will make it fail.
3762 * Happens when a downstream element changes from passthrough to
3763 * non-passthrough and gets this same bufferpool to use */
3764 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
3765 decoder->priv->pool);
3766 gst_object_unref (decoder->priv->pool);
3767 }
3768 decoder->priv->pool = pool;
3769
3770 /* and activate */
3771 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
3772 gst_buffer_pool_set_active (pool, TRUE);
3773
3774 done:
3775 if (query)
3776 gst_query_unref (query);
3777
3778 return ret;
3779
3780 /* Errors */
3781 no_decide_allocation:
3782 {
3783 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
3784 goto done;
3785 }
3786 }
3787
3788 static gboolean
gst_video_decoder_negotiate_default(GstVideoDecoder * decoder)3789 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
3790 {
3791 GstVideoCodecState *state = decoder->priv->output_state;
3792 gboolean ret = TRUE;
3793 GstVideoCodecFrame *frame;
3794 GstCaps *prevcaps;
3795
3796 if (!state) {
3797 GST_DEBUG_OBJECT (decoder,
3798 "Trying to negotiate the pool with out setting the o/p format");
3799 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
3800 goto done;
3801 }
3802
3803 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
3804 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
3805
3806 /* If the base class didn't set any multiview params, assume mono
3807 * now */
3808 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
3809 GST_VIDEO_MULTIVIEW_MODE_NONE) {
3810 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
3811 GST_VIDEO_MULTIVIEW_MODE_MONO;
3812 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
3813 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
3814 }
3815
3816 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
3817 state->info.par_n, state->info.par_d,
3818 state->info.fps_n, state->info.fps_d);
3819
3820 if (state->caps == NULL)
3821 state->caps = gst_video_info_to_caps (&state->info);
3822 if (state->allocation_caps == NULL)
3823 state->allocation_caps = gst_caps_ref (state->caps);
3824
3825 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
3826
3827 /* Push all pending pre-caps events of the oldest frame before
3828 * setting caps */
3829 frame = decoder->priv->frames ? decoder->priv->frames->data : NULL;
3830 if (frame || decoder->priv->current_frame_events) {
3831 GList **events, *l;
3832
3833 if (frame) {
3834 events = &frame->events;
3835 } else {
3836 events = &decoder->priv->current_frame_events;
3837 }
3838
3839 for (l = g_list_last (*events); l;) {
3840 GstEvent *event = GST_EVENT (l->data);
3841 GList *tmp;
3842
3843 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
3844 gst_video_decoder_push_event (decoder, event);
3845 tmp = l;
3846 l = l->prev;
3847 *events = g_list_delete_link (*events, tmp);
3848 } else {
3849 l = l->prev;
3850 }
3851 }
3852 }
3853
3854 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
3855 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
3856 if (!prevcaps) {
3857 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
3858 }
3859 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
3860 } else {
3861 ret = TRUE;
3862 GST_DEBUG_OBJECT (decoder,
3863 "current src pad and output state caps are the same");
3864 }
3865 if (prevcaps)
3866 gst_caps_unref (prevcaps);
3867
3868 if (!ret)
3869 goto done;
3870 decoder->priv->output_state_changed = FALSE;
3871 /* Negotiate pool */
3872 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
3873
3874 done:
3875 return ret;
3876 }
3877
3878 static gboolean
gst_video_decoder_negotiate_unlocked(GstVideoDecoder * decoder)3879 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
3880 {
3881 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3882 gboolean ret = TRUE;
3883
3884 if (G_LIKELY (klass->negotiate))
3885 ret = klass->negotiate (decoder);
3886
3887 return ret;
3888 }
3889
3890 /**
3891 * gst_video_decoder_negotiate:
3892 * @decoder: a #GstVideoDecoder
3893 *
3894 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
3895 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
3896 * negotiate fails.
3897 *
3898 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
3899 */
3900 gboolean
gst_video_decoder_negotiate(GstVideoDecoder * decoder)3901 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
3902 {
3903 GstVideoDecoderClass *klass;
3904 gboolean ret = TRUE;
3905
3906 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
3907
3908 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3909
3910 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3911 gst_pad_check_reconfigure (decoder->srcpad);
3912 if (klass->negotiate) {
3913 ret = klass->negotiate (decoder);
3914 if (!ret)
3915 gst_pad_mark_reconfigure (decoder->srcpad);
3916 }
3917 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3918
3919 return ret;
3920 }
3921
3922 /**
3923 * gst_video_decoder_allocate_output_buffer:
3924 * @decoder: a #GstVideoDecoder
3925 *
3926 * Helper function that allocates a buffer to hold a video frame for @decoder's
3927 * current #GstVideoCodecState.
3928 *
3929 * You should use gst_video_decoder_allocate_output_frame() instead of this
3930 * function, if possible at all.
3931 *
3932 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
3933 * allocated (e.g. when downstream is flushing or shutting down)
3934 */
3935 GstBuffer *
gst_video_decoder_allocate_output_buffer(GstVideoDecoder * decoder)3936 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
3937 {
3938 GstFlowReturn flow;
3939 GstBuffer *buffer = NULL;
3940 gboolean needs_reconfigure = FALSE;
3941
3942 GST_DEBUG ("alloc src buffer");
3943
3944 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3945 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3946 if (G_UNLIKELY (!decoder->priv->output_state
3947 || decoder->priv->output_state_changed || needs_reconfigure)) {
3948 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3949 if (decoder->priv->output_state) {
3950 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
3951 gst_pad_mark_reconfigure (decoder->srcpad);
3952 goto fallback;
3953 } else {
3954 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
3955 goto failed_allocation;
3956 }
3957 }
3958 }
3959
3960 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
3961
3962 if (flow != GST_FLOW_OK) {
3963 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
3964 gst_flow_get_name (flow));
3965 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
3966 goto fallback;
3967 else
3968 goto failed_allocation;
3969 }
3970 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3971
3972 return buffer;
3973
3974 fallback:
3975 GST_INFO_OBJECT (decoder,
3976 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
3977 buffer =
3978 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
3979 NULL);
3980
3981 failed_allocation:
3982 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
3983 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3984
3985 return buffer;
3986 }
3987
3988 /**
3989 * gst_video_decoder_allocate_output_frame:
3990 * @decoder: a #GstVideoDecoder
3991 * @frame: a #GstVideoCodecFrame
3992 *
3993 * Helper function that allocates a buffer to hold a video frame for @decoder's
3994 * current #GstVideoCodecState. Subclass should already have configured video
3995 * state and set src pad caps.
3996 *
3997 * The buffer allocated here is owned by the frame and you should only
3998 * keep references to the frame, not the buffer.
3999 *
4000 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4001 */
4002 GstFlowReturn
gst_video_decoder_allocate_output_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4003 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4004 decoder, GstVideoCodecFrame * frame)
4005 {
4006 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4007 NULL);
4008 }
4009
4010 /**
4011 * gst_video_decoder_allocate_output_frame_with_params:
4012 * @decoder: a #GstVideoDecoder
4013 * @frame: a #GstVideoCodecFrame
4014 * @params: a #GstBufferPoolAcquireParams
4015 *
4016 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4017 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4018 *
4019 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4020 *
4021 * Since: 1.12
4022 */
4023 GstFlowReturn
gst_video_decoder_allocate_output_frame_with_params(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBufferPoolAcquireParams * params)4024 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4025 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4026 {
4027 GstFlowReturn flow_ret;
4028 GstVideoCodecState *state;
4029 int num_bytes;
4030 gboolean needs_reconfigure = FALSE;
4031
4032 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4033 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4034
4035 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4036
4037 state = decoder->priv->output_state;
4038 if (state == NULL) {
4039 g_warning ("Output state should be set before allocating frame");
4040 goto error;
4041 }
4042 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4043 if (num_bytes == 0) {
4044 g_warning ("Frame size should not be 0");
4045 goto error;
4046 }
4047
4048 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4049 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4050 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4051 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4052 gst_pad_mark_reconfigure (decoder->srcpad);
4053 }
4054 }
4055
4056 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4057
4058 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4059 &frame->output_buffer, params);
4060
4061 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4062
4063 return flow_ret;
4064
4065 error:
4066 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4067 return GST_FLOW_ERROR;
4068 }
4069
4070 /**
4071 * gst_video_decoder_get_max_decode_time:
4072 * @decoder: a #GstVideoDecoder
4073 * @frame: a #GstVideoCodecFrame
4074 *
4075 * Determines maximum possible decoding time for @frame that will
4076 * allow it to decode and arrive in time (as determined by QoS events).
4077 * In particular, a negative result means decoding in time is no longer possible
4078 * and should therefore occur as soon/skippy as possible.
4079 *
4080 * Returns: max decoding time.
4081 */
4082 GstClockTimeDiff
gst_video_decoder_get_max_decode_time(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4083 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4084 decoder, GstVideoCodecFrame * frame)
4085 {
4086 GstClockTimeDiff deadline;
4087 GstClockTime earliest_time;
4088
4089 GST_OBJECT_LOCK (decoder);
4090 earliest_time = decoder->priv->earliest_time;
4091 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4092 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4093 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4094 else
4095 deadline = G_MAXINT64;
4096
4097 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4098 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4099 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4100 GST_STIME_ARGS (deadline));
4101
4102 GST_OBJECT_UNLOCK (decoder);
4103
4104 return deadline;
4105 }
4106
4107 /**
4108 * gst_video_decoder_get_qos_proportion:
4109 * @decoder: a #GstVideoDecoder
4110 * current QoS proportion, or %NULL
4111 *
4112 * Returns: The current QoS proportion.
4113 *
4114 * Since: 1.0.3
4115 */
4116 gdouble
gst_video_decoder_get_qos_proportion(GstVideoDecoder * decoder)4117 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4118 {
4119 gdouble proportion;
4120
4121 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4122
4123 GST_OBJECT_LOCK (decoder);
4124 proportion = decoder->priv->proportion;
4125 GST_OBJECT_UNLOCK (decoder);
4126
4127 return proportion;
4128 }
4129
4130 GstFlowReturn
_gst_video_decoder_error(GstVideoDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)4131 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4132 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4133 const gchar * function, gint line)
4134 {
4135 if (txt)
4136 GST_WARNING_OBJECT (dec, "error: %s", txt);
4137 if (dbg)
4138 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4139 dec->priv->error_count += weight;
4140 dec->priv->discont = TRUE;
4141 if (dec->priv->max_errors >= 0 &&
4142 dec->priv->error_count > dec->priv->max_errors) {
4143 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4144 domain, code, txt, dbg, file, function, line);
4145 return GST_FLOW_ERROR;
4146 } else {
4147 g_free (txt);
4148 g_free (dbg);
4149 return GST_FLOW_OK;
4150 }
4151 }
4152
4153 /**
4154 * gst_video_decoder_set_max_errors:
4155 * @dec: a #GstVideoDecoder
4156 * @num: max tolerated errors
4157 *
4158 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4159 * warned about, but more than tolerated will lead to fatal error. You can set
4160 * -1 for never returning fatal errors. Default is set to
4161 * GST_VIDEO_DECODER_MAX_ERRORS.
4162 *
4163 * The '-1' option was added in 1.4
4164 */
4165 void
gst_video_decoder_set_max_errors(GstVideoDecoder * dec,gint num)4166 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4167 {
4168 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4169
4170 dec->priv->max_errors = num;
4171 }
4172
4173 /**
4174 * gst_video_decoder_get_max_errors:
4175 * @dec: a #GstVideoDecoder
4176 *
4177 * Returns: currently configured decoder tolerated error count.
4178 */
4179 gint
gst_video_decoder_get_max_errors(GstVideoDecoder * dec)4180 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4181 {
4182 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4183
4184 return dec->priv->max_errors;
4185 }
4186
4187 /**
4188 * gst_video_decoder_set_needs_format:
4189 * @dec: a #GstVideoDecoder
4190 * @enabled: new state
4191 *
4192 * Configures decoder format needs. If enabled, subclass needs to be
4193 * negotiated with format caps before it can process any data. It will then
4194 * never be handed any data before it has been configured.
4195 * Otherwise, it might be handed data without having been configured and
4196 * is then expected being able to do so either by default
4197 * or based on the input data.
4198 *
4199 * Since: 1.4
4200 */
4201 void
gst_video_decoder_set_needs_format(GstVideoDecoder * dec,gboolean enabled)4202 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4203 {
4204 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4205
4206 dec->priv->needs_format = enabled;
4207 }
4208
4209 /**
4210 * gst_video_decoder_get_needs_format:
4211 * @dec: a #GstVideoDecoder
4212 *
4213 * Queries decoder required format handling.
4214 *
4215 * Returns: %TRUE if required format handling is enabled.
4216 *
4217 * Since: 1.4
4218 */
4219 gboolean
gst_video_decoder_get_needs_format(GstVideoDecoder * dec)4220 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4221 {
4222 gboolean result;
4223
4224 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4225
4226 result = dec->priv->needs_format;
4227
4228 return result;
4229 }
4230
4231 /**
4232 * gst_video_decoder_set_packetized:
4233 * @decoder: a #GstVideoDecoder
4234 * @packetized: whether the input data should be considered as packetized.
4235 *
4236 * Allows baseclass to consider input data as packetized or not. If the
4237 * input is packetized, then the @parse method will not be called.
4238 */
4239 void
gst_video_decoder_set_packetized(GstVideoDecoder * decoder,gboolean packetized)4240 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
4241 gboolean packetized)
4242 {
4243 decoder->priv->packetized = packetized;
4244 }
4245
4246 /**
4247 * gst_video_decoder_get_packetized:
4248 * @decoder: a #GstVideoDecoder
4249 *
4250 * Queries whether input data is considered packetized or not by the
4251 * base class.
4252 *
4253 * Returns: TRUE if input data is considered packetized.
4254 */
4255 gboolean
gst_video_decoder_get_packetized(GstVideoDecoder * decoder)4256 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
4257 {
4258 return decoder->priv->packetized;
4259 }
4260
4261 /**
4262 * gst_video_decoder_set_estimate_rate:
4263 * @dec: a #GstVideoDecoder
4264 * @enabled: whether to enable byte to time conversion
4265 *
4266 * Allows baseclass to perform byte to time estimated conversion.
4267 */
4268 void
gst_video_decoder_set_estimate_rate(GstVideoDecoder * dec,gboolean enabled)4269 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
4270 {
4271 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4272
4273 dec->priv->do_estimate_rate = enabled;
4274 }
4275
4276 /**
4277 * gst_video_decoder_get_estimate_rate:
4278 * @dec: a #GstVideoDecoder
4279 *
4280 * Returns: currently configured byte to time conversion setting
4281 */
4282 gboolean
gst_video_decoder_get_estimate_rate(GstVideoDecoder * dec)4283 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
4284 {
4285 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4286
4287 return dec->priv->do_estimate_rate;
4288 }
4289
4290 /**
4291 * gst_video_decoder_set_latency:
4292 * @decoder: a #GstVideoDecoder
4293 * @min_latency: minimum latency
4294 * @max_latency: maximum latency
4295 *
4296 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
4297 * latency is. Will also post a LATENCY message on the bus so the pipeline
4298 * can reconfigure its global latency.
4299 */
4300 void
gst_video_decoder_set_latency(GstVideoDecoder * decoder,GstClockTime min_latency,GstClockTime max_latency)4301 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
4302 GstClockTime min_latency, GstClockTime max_latency)
4303 {
4304 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
4305 g_return_if_fail (max_latency >= min_latency);
4306
4307 GST_OBJECT_LOCK (decoder);
4308 decoder->priv->min_latency = min_latency;
4309 decoder->priv->max_latency = max_latency;
4310 GST_OBJECT_UNLOCK (decoder);
4311
4312 gst_element_post_message (GST_ELEMENT_CAST (decoder),
4313 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
4314 }
4315
4316 /**
4317 * gst_video_decoder_get_latency:
4318 * @decoder: a #GstVideoDecoder
4319 * @min_latency: (out) (allow-none): address of variable in which to store the
4320 * configured minimum latency, or %NULL
4321 * @max_latency: (out) (allow-none): address of variable in which to store the
4322 * configured mximum latency, or %NULL
4323 *
4324 * Query the configured decoder latency. Results will be returned via
4325 * @min_latency and @max_latency.
4326 */
4327 void
gst_video_decoder_get_latency(GstVideoDecoder * decoder,GstClockTime * min_latency,GstClockTime * max_latency)4328 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
4329 GstClockTime * min_latency, GstClockTime * max_latency)
4330 {
4331 GST_OBJECT_LOCK (decoder);
4332 if (min_latency)
4333 *min_latency = decoder->priv->min_latency;
4334 if (max_latency)
4335 *max_latency = decoder->priv->max_latency;
4336 GST_OBJECT_UNLOCK (decoder);
4337 }
4338
4339 /**
4340 * gst_video_decoder_merge_tags:
4341 * @decoder: a #GstVideoDecoder
4342 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
4343 * previously-set tags
4344 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
4345 *
4346 * Sets the audio decoder tags and how they should be merged with any
4347 * upstream stream tags. This will override any tags previously-set
4348 * with gst_audio_decoder_merge_tags().
4349 *
4350 * Note that this is provided for convenience, and the subclass is
4351 * not required to use this and can still do tag handling on its own.
4352 *
4353 * MT safe.
4354 */
4355 void
gst_video_decoder_merge_tags(GstVideoDecoder * decoder,const GstTagList * tags,GstTagMergeMode mode)4356 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
4357 const GstTagList * tags, GstTagMergeMode mode)
4358 {
4359 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4360 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
4361 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
4362
4363 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4364 if (decoder->priv->tags != tags) {
4365 if (decoder->priv->tags) {
4366 gst_tag_list_unref (decoder->priv->tags);
4367 decoder->priv->tags = NULL;
4368 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
4369 }
4370 if (tags) {
4371 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
4372 decoder->priv->tags_merge_mode = mode;
4373 }
4374
4375 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
4376 decoder->priv->tags_changed = TRUE;
4377 }
4378 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4379 }
4380
4381 /**
4382 * gst_video_decoder_get_buffer_pool:
4383 * @decoder: a #GstVideoDecoder
4384 *
4385 * Returns: (transfer full): the instance of the #GstBufferPool used
4386 * by the decoder; free it after use it
4387 */
4388 GstBufferPool *
gst_video_decoder_get_buffer_pool(GstVideoDecoder * decoder)4389 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
4390 {
4391 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
4392
4393 if (decoder->priv->pool)
4394 return gst_object_ref (decoder->priv->pool);
4395
4396 return NULL;
4397 }
4398
4399 /**
4400 * gst_video_decoder_get_allocator:
4401 * @decoder: a #GstVideoDecoder
4402 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
4403 * used
4404 * @params: (out) (allow-none) (transfer full): the
4405 * #GstAllocationParams of @allocator
4406 *
4407 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
4408 * used by the base class and its @params.
4409 *
4410 * Unref the @allocator after use it.
4411 */
4412 void
gst_video_decoder_get_allocator(GstVideoDecoder * decoder,GstAllocator ** allocator,GstAllocationParams * params)4413 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
4414 GstAllocator ** allocator, GstAllocationParams * params)
4415 {
4416 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4417
4418 if (allocator)
4419 *allocator = decoder->priv->allocator ?
4420 gst_object_ref (decoder->priv->allocator) : NULL;
4421
4422 if (params)
4423 *params = decoder->priv->params;
4424 }
4425
4426 /**
4427 * gst_video_decoder_set_use_default_pad_acceptcaps:
4428 * @decoder: a #GstVideoDecoder
4429 * @use: if the default pad accept-caps query handling should be used
4430 *
4431 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
4432 * to use the default pad query handler to reply to accept-caps queries.
4433 *
4434 * By setting this to true it is possible to further customize the default
4435 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
4436 * %GST_PAD_SET_ACCEPT_TEMPLATE
4437 *
4438 * Since: 1.6
4439 */
4440 void
gst_video_decoder_set_use_default_pad_acceptcaps(GstVideoDecoder * decoder,gboolean use)4441 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
4442 gboolean use)
4443 {
4444 decoder->priv->use_default_pad_acceptcaps = use;
4445 }
4446