1 /* GStreamer
2  * Copyright (C) 2009 Igalia S.L.
3  * Author: Iago Toral Quiroga <itoral@igalia.com>
4  * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
5  * Copyright (C) 2011 Nokia Corporation. All rights reserved.
6  *   Contact: Stefan Kost <stefan.kost@nokia.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Library General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 /**
25  * SECTION:gstaudiodecoder
26  * @title: GstAudioDecoder
27  * @short_description: Base class for audio decoders
28  * @see_also: #GstBaseTransform
29  *
30  * This base class is for audio decoders turning encoded data into
31  * raw audio samples.
32  *
33  * GstAudioDecoder and subclass should cooperate as follows.
34  *
35  * ## Configuration
36  *
37  *   * Initially, GstAudioDecoder calls @start when the decoder element
38  *     is activated, which allows subclass to perform any global setup.
39  *     Base class (context) parameters can already be set according to subclass
40  *     capabilities (or possibly upon receive more information in subsequent
41  *     @set_format).
42  *   * GstAudioDecoder calls @set_format to inform subclass of the format
43  *     of input audio data that it is about to receive.
44  *     While unlikely, it might be called more than once, if changing input
45  *     parameters require reconfiguration.
46  *   * GstAudioDecoder calls @stop at end of all processing.
47  *
48  * As of configuration stage, and throughout processing, GstAudioDecoder
49  * provides various (context) parameters, e.g. describing the format of
50  * output audio data (valid when output caps have been set) or current parsing state.
51  * Conversely, subclass can and should configure context to inform
52  * base class of its expectation w.r.t. buffer handling.
53  *
54  * ## Data processing
55  *     * Base class gathers input data, and optionally allows subclass
56  *       to parse this into subsequently manageable (as defined by subclass)
57  *       chunks.  Such chunks are subsequently referred to as 'frames',
58  *       though they may or may not correspond to 1 (or more) audio format frame.
59  *     * Input frame is provided to subclass' @handle_frame.
60  *     * If codec processing results in decoded data, subclass should call
61  *       @gst_audio_decoder_finish_frame to have decoded data pushed
62  *       downstream.
63  *     * Just prior to actually pushing a buffer downstream,
64  *       it is passed to @pre_push.  Subclass should either use this callback
65  *       to arrange for additional downstream pushing or otherwise ensure such
66  *       custom pushing occurs after at least a method call has finished since
67  *       setting src pad caps.
68  *     * During the parsing process GstAudioDecoderClass will handle both
69  *       srcpad and sinkpad events. Sink events will be passed to subclass
70  *       if @event callback has been provided.
71  *
72  * ## Shutdown phase
73  *
74  *   * GstAudioDecoder class calls @stop to inform the subclass that data
75  *     parsing will be stopped.
76  *
77  * Subclass is responsible for providing pad template caps for
78  * source and sink pads. The pads need to be named "sink" and "src". It also
79  * needs to set the fixed caps on srcpad, when the format is ensured.  This
80  * is typically when base class calls subclass' @set_format function, though
81  * it might be delayed until calling @gst_audio_decoder_finish_frame.
82  *
83  * In summary, above process should have subclass concentrating on
84  * codec data processing while leaving other matters to base class,
85  * such as most notably timestamp handling.  While it may exert more control
86  * in this area (see e.g. @pre_push), it is very much not recommended.
87  *
88  * In particular, base class will try to arrange for perfect output timestamps
89  * as much as possible while tracking upstream timestamps.
90  * To this end, if deviation between the next ideal expected perfect timestamp
91  * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream
92  * occurs (which would happen always if the tolerance mechanism is disabled).
93  *
94  * In non-live pipelines, baseclass can also (configurably) arrange for
95  * output buffer aggregation which may help to redue large(r) numbers of
96  * small(er) buffers being pushed and processed downstream. Note that this
97  * feature is only available if the buffer layout is interleaved. For planar
98  * buffers, the decoder implementation is fully responsible for the output
99  * buffer size.
100  *
101  * On the other hand, it should be noted that baseclass only provides limited
102  * seeking support (upon explicit subclass request), as full-fledged support
103  * should rather be left to upstream demuxer, parser or alike.  This simple
104  * approach caters for seeking and duration reporting using estimated input
105  * bitrates.
106  *
107  * Things that subclass need to take care of:
108  *
109  *   * Provide pad templates
110  *   * Set source pad caps when appropriate
111  *   * Set user-configurable properties to sane defaults for format and
112  *      implementing codec at hand, and convey some subclass capabilities and
113  *      expectations in context.
114  *
115  *   * Accept data in @handle_frame and provide encoded results to
116  *      @gst_audio_decoder_finish_frame.  If it is prepared to perform
117  *      PLC, it should also accept NULL data in @handle_frame and provide for
118  *      data for indicated duration.
119  *
120  */
121 
122 #ifdef HAVE_CONFIG_H
123 #include "config.h"
124 #endif
125 
126 #include "gstaudiodecoder.h"
127 #include "gstaudioutilsprivate.h"
128 #include <gst/pbutils/descriptions.h>
129 
130 #include <string.h>
131 
132 GST_DEBUG_CATEGORY (audiodecoder_debug);
133 #define GST_CAT_DEFAULT audiodecoder_debug
134 
135 enum
136 {
137   LAST_SIGNAL
138 };
139 
140 enum
141 {
142   PROP_0,
143   PROP_LATENCY,
144   PROP_TOLERANCE,
145   PROP_PLC
146 };
147 
148 #define DEFAULT_LATENCY    0
149 #define DEFAULT_TOLERANCE  0
150 #define DEFAULT_PLC        FALSE
151 #define DEFAULT_DRAINABLE  TRUE
152 #define DEFAULT_NEEDS_FORMAT  FALSE
153 
154 typedef struct _GstAudioDecoderContext
155 {
156   /* last negotiated input caps */
157   GstCaps *input_caps;
158 
159   /* (output) audio format */
160   GstAudioInfo info;
161   GstCaps *caps;
162   gboolean output_format_changed;
163 
164   /* parsing state */
165   gboolean eos;
166   gboolean sync;
167 
168   gboolean had_output_data;
169   gboolean had_input_data;
170 
171   /* misc */
172   gint delay;
173 
174   /* output */
175   gboolean do_plc;
176   gboolean do_estimate_rate;
177   gint max_errors;
178   GstCaps *allocation_caps;
179   /* MT-protected (with LOCK) */
180   GstClockTime min_latency;
181   GstClockTime max_latency;
182 
183   GstAllocator *allocator;
184   GstAllocationParams params;
185 } GstAudioDecoderContext;
186 
187 struct _GstAudioDecoderPrivate
188 {
189   /* activation status */
190   gboolean active;
191 
192   /* input base/first ts as basis for output ts */
193   GstClockTime base_ts;
194   /* input samples processed and sent downstream so far (w.r.t. base_ts) */
195   guint64 samples;
196 
197   /* collected input data */
198   GstAdapter *adapter;
199   /* tracking input ts for changes */
200   GstClockTime prev_ts;
201   guint64 prev_distance;
202   /* frames obtained from input */
203   GQueue frames;
204   /* collected output data */
205   GstAdapter *adapter_out;
206   /* ts and duration for output data collected above */
207   GstClockTime out_ts, out_dur;
208   /* mark outgoing discont */
209   gboolean discont;
210 
211   /* subclass gave all it could already */
212   gboolean drained;
213   /* subclass currently being forcibly drained */
214   gboolean force;
215   /* input_segment are output_segment identical */
216   gboolean in_out_segment_sync;
217   /* expecting the buffer with DISCONT flag */
218   gboolean expecting_discont_buf;
219 
220   /* number of samples pushed out via _finish_subframe(), resets on _finish_frame() */
221   guint subframe_samples;
222 
223   /* input bps estimatation */
224   /* global in bytes seen */
225   guint64 bytes_in;
226   /* global samples sent out */
227   guint64 samples_out;
228   /* bytes flushed during parsing */
229   guint sync_flush;
230   /* error count */
231   gint error_count;
232 
233   /* upstream stream tags (global tags are passed through as-is) */
234   GstTagList *upstream_tags;
235 
236   /* subclass tags */
237   GstTagList *taglist;          /* FIXME: rename to decoder_tags */
238   GstTagMergeMode decoder_tags_merge_mode;
239 
240   gboolean taglist_changed;     /* FIXME: rename to tags_changed */
241 
242   /* whether circumstances allow output aggregation */
243   gint agg;
244 
245   /* reverse playback queues */
246   /* collect input */
247   GList *gather;
248   /* to-be-decoded */
249   GList *decode;
250   /* reversed output */
251   GList *queued;
252 
253   /* context storage */
254   GstAudioDecoderContext ctx;
255 
256   /* properties */
257   GstClockTime latency;
258   GstClockTime tolerance;
259   gboolean plc;
260   gboolean drainable;
261   gboolean needs_format;
262 
263   /* pending serialized sink events, will be sent from finish_frame() */
264   GList *pending_events;
265 
266   /* flags */
267   gboolean use_default_pad_acceptcaps;
268 };
269 
270 static void gst_audio_decoder_finalize (GObject * object);
271 static void gst_audio_decoder_set_property (GObject * object,
272     guint prop_id, const GValue * value, GParamSpec * pspec);
273 static void gst_audio_decoder_get_property (GObject * object,
274     guint prop_id, GValue * value, GParamSpec * pspec);
275 
276 static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec);
277 static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder *
278     dec, GstBuffer * buf);
279 
280 static GstStateChangeReturn gst_audio_decoder_change_state (GstElement *
281     element, GstStateChange transition);
282 static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec,
283     GstEvent * event);
284 static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec,
285     GstEvent * event);
286 static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
287     GstEvent * event);
288 static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent,
289     GstEvent * event);
290 static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec,
291     GstCaps * caps);
292 static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent,
293     GstBuffer * buf);
294 static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent,
295     GstQuery * query);
296 static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
297     GstQuery * query);
298 static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full);
299 
300 static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder *
301     dec, GstQuery * query);
302 static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder *
303     dec, GstQuery * query);
304 static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec);
305 static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec);
306 static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec,
307     GstEvent * event);
308 static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec,
309     GstQuery * query);
310 static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec,
311     GstQuery * query);
312 
313 static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder *
314     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
315 
316 static GstFlowReturn
317 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
318     GstBuffer * buf, gint frames);
319 
320 static GstElementClass *parent_class = NULL;
321 static gint private_offset = 0;
322 
323 static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass);
324 static void gst_audio_decoder_init (GstAudioDecoder * dec,
325     GstAudioDecoderClass * klass);
326 
327 GType
gst_audio_decoder_get_type(void)328 gst_audio_decoder_get_type (void)
329 {
330   static volatile gsize audio_decoder_type = 0;
331 
332   if (g_once_init_enter (&audio_decoder_type)) {
333     GType _type;
334     static const GTypeInfo audio_decoder_info = {
335       sizeof (GstAudioDecoderClass),
336       NULL,
337       NULL,
338       (GClassInitFunc) gst_audio_decoder_class_init,
339       NULL,
340       NULL,
341       sizeof (GstAudioDecoder),
342       0,
343       (GInstanceInitFunc) gst_audio_decoder_init,
344     };
345 
346     _type = g_type_register_static (GST_TYPE_ELEMENT,
347         "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT);
348 
349     private_offset =
350         g_type_add_instance_private (_type, sizeof (GstAudioDecoderPrivate));
351 
352     g_once_init_leave (&audio_decoder_type, _type);
353   }
354   return audio_decoder_type;
355 }
356 
357 static inline GstAudioDecoderPrivate *
gst_audio_decoder_get_instance_private(GstAudioDecoder * self)358 gst_audio_decoder_get_instance_private (GstAudioDecoder * self)
359 {
360   return (G_STRUCT_MEMBER_P (self, private_offset));
361 }
362 
363 static void
gst_audio_decoder_class_init(GstAudioDecoderClass * klass)364 gst_audio_decoder_class_init (GstAudioDecoderClass * klass)
365 {
366   GObjectClass *gobject_class;
367   GstElementClass *element_class;
368   GstAudioDecoderClass *audiodecoder_class;
369 
370   gobject_class = G_OBJECT_CLASS (klass);
371   element_class = GST_ELEMENT_CLASS (klass);
372   audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
373 
374   parent_class = g_type_class_peek_parent (klass);
375 
376   if (private_offset != 0)
377     g_type_class_adjust_private_offset (klass, &private_offset);
378 
379   GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0,
380       "audio decoder base class");
381 
382   gobject_class->set_property = gst_audio_decoder_set_property;
383   gobject_class->get_property = gst_audio_decoder_get_property;
384   gobject_class->finalize = gst_audio_decoder_finalize;
385 
386   element_class->change_state =
387       GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state);
388 
389   /* Properties */
390   g_object_class_install_property (gobject_class, PROP_LATENCY,
391       g_param_spec_int64 ("min-latency", "Minimum Latency",
392           "Aggregate output data to a minimum of latency time (ns)",
393           0, G_MAXINT64, DEFAULT_LATENCY,
394           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
395 
396   g_object_class_install_property (gobject_class, PROP_TOLERANCE,
397       g_param_spec_int64 ("tolerance", "Tolerance",
398           "Perfect ts while timestamp jitter/imperfection within tolerance (ns)",
399           0, G_MAXINT64, DEFAULT_TOLERANCE,
400           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
401 
402   g_object_class_install_property (gobject_class, PROP_PLC,
403       g_param_spec_boolean ("plc", "Packet Loss Concealment",
404           "Perform packet loss concealment (if supported)",
405           DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
406 
407   audiodecoder_class->sink_event =
408       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc);
409   audiodecoder_class->src_event =
410       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc);
411   audiodecoder_class->propose_allocation =
412       GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default);
413   audiodecoder_class->decide_allocation =
414       GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default);
415   audiodecoder_class->negotiate =
416       GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default);
417   audiodecoder_class->sink_query =
418       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default);
419   audiodecoder_class->src_query =
420       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default);
421   audiodecoder_class->transform_meta =
422       GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default);
423 }
424 
425 static void
gst_audio_decoder_init(GstAudioDecoder * dec,GstAudioDecoderClass * klass)426 gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass)
427 {
428   GstPadTemplate *pad_template;
429 
430   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init");
431 
432   dec->priv = gst_audio_decoder_get_instance_private (dec);
433 
434   /* Setup sink pad */
435   pad_template =
436       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
437   g_return_if_fail (pad_template != NULL);
438 
439   dec->sinkpad = gst_pad_new_from_template (pad_template, "sink");
440   gst_pad_set_event_function (dec->sinkpad,
441       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event));
442   gst_pad_set_chain_function (dec->sinkpad,
443       GST_DEBUG_FUNCPTR (gst_audio_decoder_chain));
444   gst_pad_set_query_function (dec->sinkpad,
445       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query));
446   gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
447   GST_DEBUG_OBJECT (dec, "sinkpad created");
448 
449   /* Setup source pad */
450   pad_template =
451       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
452   g_return_if_fail (pad_template != NULL);
453 
454   dec->srcpad = gst_pad_new_from_template (pad_template, "src");
455   gst_pad_set_event_function (dec->srcpad,
456       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event));
457   gst_pad_set_query_function (dec->srcpad,
458       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query));
459   gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
460   GST_DEBUG_OBJECT (dec, "srcpad created");
461 
462   dec->priv->adapter = gst_adapter_new ();
463   dec->priv->adapter_out = gst_adapter_new ();
464   g_queue_init (&dec->priv->frames);
465 
466   g_rec_mutex_init (&dec->stream_lock);
467 
468   /* property default */
469   dec->priv->latency = DEFAULT_LATENCY;
470   dec->priv->tolerance = DEFAULT_TOLERANCE;
471   dec->priv->plc = DEFAULT_PLC;
472   dec->priv->drainable = DEFAULT_DRAINABLE;
473   dec->priv->needs_format = DEFAULT_NEEDS_FORMAT;
474 
475   /* init state */
476   dec->priv->ctx.min_latency = 0;
477   dec->priv->ctx.max_latency = 0;
478   gst_audio_decoder_reset (dec, TRUE);
479   GST_DEBUG_OBJECT (dec, "init ok");
480 }
481 
482 static void
gst_audio_decoder_reset(GstAudioDecoder * dec,gboolean full)483 gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
484 {
485   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
486 
487   GST_AUDIO_DECODER_STREAM_LOCK (dec);
488 
489   if (full) {
490     dec->priv->active = FALSE;
491     GST_OBJECT_LOCK (dec);
492     dec->priv->bytes_in = 0;
493     dec->priv->samples_out = 0;
494     GST_OBJECT_UNLOCK (dec);
495     dec->priv->agg = -1;
496     dec->priv->error_count = 0;
497     gst_audio_decoder_clear_queues (dec);
498 
499     if (dec->priv->taglist) {
500       gst_tag_list_unref (dec->priv->taglist);
501       dec->priv->taglist = NULL;
502     }
503     dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
504     if (dec->priv->upstream_tags) {
505       gst_tag_list_unref (dec->priv->upstream_tags);
506       dec->priv->upstream_tags = NULL;
507     }
508     dec->priv->taglist_changed = FALSE;
509 
510     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
511     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
512     dec->priv->in_out_segment_sync = TRUE;
513 
514     g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
515     g_list_free (dec->priv->pending_events);
516     dec->priv->pending_events = NULL;
517 
518     if (dec->priv->ctx.allocator)
519       gst_object_unref (dec->priv->ctx.allocator);
520 
521     GST_OBJECT_LOCK (dec);
522     gst_caps_replace (&dec->priv->ctx.input_caps, NULL);
523     gst_caps_replace (&dec->priv->ctx.caps, NULL);
524     gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL);
525 
526     memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));
527 
528     gst_audio_info_init (&dec->priv->ctx.info);
529     GST_OBJECT_UNLOCK (dec);
530     dec->priv->ctx.max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
531     dec->priv->ctx.had_output_data = FALSE;
532     dec->priv->ctx.had_input_data = FALSE;
533   }
534 
535   g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
536   g_queue_clear (&dec->priv->frames);
537   gst_adapter_clear (dec->priv->adapter);
538   gst_adapter_clear (dec->priv->adapter_out);
539   dec->priv->out_ts = GST_CLOCK_TIME_NONE;
540   dec->priv->out_dur = 0;
541   dec->priv->prev_ts = GST_CLOCK_TIME_NONE;
542   dec->priv->prev_distance = 0;
543   dec->priv->drained = TRUE;
544   dec->priv->base_ts = GST_CLOCK_TIME_NONE;
545   dec->priv->samples = 0;
546   dec->priv->discont = TRUE;
547   dec->priv->sync_flush = FALSE;
548 
549   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
550 }
551 
552 static void
gst_audio_decoder_finalize(GObject * object)553 gst_audio_decoder_finalize (GObject * object)
554 {
555   GstAudioDecoder *dec;
556 
557   g_return_if_fail (GST_IS_AUDIO_DECODER (object));
558   dec = GST_AUDIO_DECODER (object);
559 
560   if (dec->priv->adapter) {
561     g_object_unref (dec->priv->adapter);
562   }
563   if (dec->priv->adapter_out) {
564     g_object_unref (dec->priv->adapter_out);
565   }
566 
567   g_rec_mutex_clear (&dec->stream_lock);
568 
569   G_OBJECT_CLASS (parent_class)->finalize (object);
570 }
571 
572 static GstEvent *
gst_audio_decoder_create_merged_tags_event(GstAudioDecoder * dec)573 gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec)
574 {
575   GstTagList *merged_tags;
576 
577   GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
578   GST_LOG_OBJECT (dec, "decoder  : %" GST_PTR_FORMAT, dec->priv->taglist);
579   GST_LOG_OBJECT (dec, "mode     : %d", dec->priv->decoder_tags_merge_mode);
580 
581   merged_tags =
582       gst_tag_list_merge (dec->priv->upstream_tags,
583       dec->priv->taglist, dec->priv->decoder_tags_merge_mode);
584 
585   GST_DEBUG_OBJECT (dec, "merged   : %" GST_PTR_FORMAT, merged_tags);
586 
587   if (merged_tags == NULL)
588     return NULL;
589 
590   if (gst_tag_list_is_empty (merged_tags)) {
591     gst_tag_list_unref (merged_tags);
592     return NULL;
593   }
594 
595   return gst_event_new_tag (merged_tags);
596 }
597 
598 static gboolean
gst_audio_decoder_push_event(GstAudioDecoder * dec,GstEvent * event)599 gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event)
600 {
601   switch (GST_EVENT_TYPE (event)) {
602     case GST_EVENT_SEGMENT:{
603       GstSegment seg;
604 
605       GST_AUDIO_DECODER_STREAM_LOCK (dec);
606       gst_event_copy_segment (event, &seg);
607 
608       GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
609 
610       dec->output_segment = seg;
611       dec->priv->in_out_segment_sync =
612           gst_segment_is_equal (&dec->input_segment, &seg);
613       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
614       break;
615     }
616     default:
617       break;
618   }
619 
620   return gst_pad_push_event (dec->srcpad, event);
621 }
622 
623 static gboolean
gst_audio_decoder_negotiate_default(GstAudioDecoder * dec)624 gst_audio_decoder_negotiate_default (GstAudioDecoder * dec)
625 {
626   GstAudioDecoderClass *klass;
627   gboolean res = TRUE;
628   GstCaps *caps;
629   GstCaps *prevcaps;
630   GstQuery *query = NULL;
631   GstAllocator *allocator;
632   GstAllocationParams params;
633 
634   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
635   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE);
636   g_return_val_if_fail (GST_IS_CAPS (dec->priv->ctx.caps), FALSE);
637 
638   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
639 
640   caps = dec->priv->ctx.caps;
641   if (dec->priv->ctx.allocation_caps == NULL)
642     dec->priv->ctx.allocation_caps = gst_caps_ref (caps);
643 
644   GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);
645 
646   if (dec->priv->pending_events) {
647     GList **pending_events, *l;
648 
649     pending_events = &dec->priv->pending_events;
650 
651     GST_DEBUG_OBJECT (dec, "Pushing pending events");
652     for (l = *pending_events; l;) {
653       GstEvent *event = GST_EVENT (l->data);
654       GList *tmp;
655 
656       if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
657         gst_audio_decoder_push_event (dec, l->data);
658         tmp = l;
659         l = l->next;
660         *pending_events = g_list_delete_link (*pending_events, tmp);
661       } else {
662         l = l->next;
663       }
664     }
665   }
666 
667   prevcaps = gst_pad_get_current_caps (dec->srcpad);
668   if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
669     res = gst_pad_set_caps (dec->srcpad, caps);
670   if (prevcaps)
671     gst_caps_unref (prevcaps);
672 
673   if (!res)
674     goto done;
675   dec->priv->ctx.output_format_changed = FALSE;
676 
677   query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE);
678   if (!gst_pad_peer_query (dec->srcpad, query)) {
679     GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
680   }
681 
682   g_assert (klass->decide_allocation != NULL);
683   res = klass->decide_allocation (dec, query);
684 
685   GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
686       query);
687 
688   if (!res)
689     goto no_decide_allocation;
690 
691   /* we got configuration from our peer or the decide_allocation method,
692    * parse them */
693   if (gst_query_get_n_allocation_params (query) > 0) {
694     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
695   } else {
696     allocator = NULL;
697     gst_allocation_params_init (&params);
698   }
699 
700   if (dec->priv->ctx.allocator)
701     gst_object_unref (dec->priv->ctx.allocator);
702   dec->priv->ctx.allocator = allocator;
703   dec->priv->ctx.params = params;
704 
705 done:
706 
707   if (query)
708     gst_query_unref (query);
709 
710   return res;
711 
712   /* ERRORS */
713 no_decide_allocation:
714   {
715     GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation");
716     goto done;
717   }
718 }
719 
720 static gboolean
gst_audio_decoder_negotiate_unlocked(GstAudioDecoder * dec)721 gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec)
722 {
723   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
724   gboolean ret = TRUE;
725 
726   if (G_LIKELY (klass->negotiate))
727     ret = klass->negotiate (dec);
728 
729   return ret;
730 }
731 
732 /**
733  * gst_audio_decoder_negotiate:
734  * @dec: a #GstAudioDecoder
735  *
736  * Negotiate with downstream elements to currently configured #GstAudioInfo.
737  * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
738  * negotiate fails.
739  *
740  * Returns: %TRUE if the negotiation succeeded, else %FALSE.
741  */
742 gboolean
gst_audio_decoder_negotiate(GstAudioDecoder * dec)743 gst_audio_decoder_negotiate (GstAudioDecoder * dec)
744 {
745   GstAudioDecoderClass *klass;
746   gboolean res = TRUE;
747 
748   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
749 
750   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
751 
752   GST_AUDIO_DECODER_STREAM_LOCK (dec);
753   gst_pad_check_reconfigure (dec->srcpad);
754   if (klass->negotiate) {
755     res = klass->negotiate (dec);
756     if (!res)
757       gst_pad_mark_reconfigure (dec->srcpad);
758   }
759   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
760 
761   return res;
762 }
763 
764 /**
765  * gst_audio_decoder_set_output_format:
766  * @dec: a #GstAudioDecoder
767  * @info: #GstAudioInfo
768  *
769  * Configure output info on the srcpad of @dec.
770  *
771  * Returns: %TRUE on success.
772  **/
773 gboolean
gst_audio_decoder_set_output_format(GstAudioDecoder * dec,const GstAudioInfo * info)774 gst_audio_decoder_set_output_format (GstAudioDecoder * dec,
775     const GstAudioInfo * info)
776 {
777   gboolean res = TRUE;
778   GstCaps *caps = NULL;
779 
780   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
781   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE);
782 
783   /* If the audio info can't be converted to caps,
784    * it was invalid */
785   caps = gst_audio_info_to_caps (info);
786   if (!caps) {
787     GST_WARNING_OBJECT (dec, "invalid output format");
788     return FALSE;
789   }
790 
791   res = gst_audio_decoder_set_output_caps (dec, caps);
792   gst_caps_unref (caps);
793 
794   return res;
795 }
796 
797 /**
798  * gst_audio_decoder_set_output_caps:
799  * @dec: a #GstAudioDecoder
800  * @caps: (transfer none): (fixed) #GstCaps
801  *
802  * Configure output caps on the srcpad of @dec. Similar to
803  * gst_audio_decoder_set_output_format(), but allows subclasses to specify
804  * output caps that can't be expressed via #GstAudioInfo e.g. caps that have
805  * caps features.
806  *
807  * Returns: %TRUE on success.
808  *
809  * Since: 1.16
810  **/
811 gboolean
gst_audio_decoder_set_output_caps(GstAudioDecoder * dec,GstCaps * caps)812 gst_audio_decoder_set_output_caps (GstAudioDecoder * dec, GstCaps * caps)
813 {
814   gboolean res = TRUE;
815   guint old_rate;
816   GstCaps *templ_caps;
817   GstAudioInfo info;
818 
819   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
820 
821   GST_DEBUG_OBJECT (dec, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
822 
823   GST_AUDIO_DECODER_STREAM_LOCK (dec);
824 
825   if (!gst_caps_is_fixed (caps))
826     goto refuse_caps;
827 
828   /* check if caps can be parsed */
829   if (!gst_audio_info_from_caps (&info, caps))
830     goto refuse_caps;
831 
832   /* Only allow caps that are a subset of the template caps */
833   templ_caps = gst_pad_get_pad_template_caps (dec->srcpad);
834   if (!gst_caps_is_subset (caps, templ_caps)) {
835     GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT
836         " do not match template %" GST_PTR_FORMAT, caps, templ_caps);
837     gst_caps_unref (templ_caps);
838     goto refuse_caps;
839   }
840   gst_caps_unref (templ_caps);
841 
842   /* adjust ts tracking to new sample rate */
843   old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info);
844   if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) {
845     dec->priv->base_ts +=
846         GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate);
847     dec->priv->samples = 0;
848   }
849 
850   /* copy the GstAudioInfo */
851   GST_OBJECT_LOCK (dec);
852   dec->priv->ctx.info = info;
853   GST_OBJECT_UNLOCK (dec);
854 
855   gst_caps_replace (&dec->priv->ctx.caps, caps);
856   dec->priv->ctx.output_format_changed = TRUE;
857 
858 done:
859   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
860 
861   return res;
862 
863   /* ERRORS */
864 refuse_caps:
865   {
866     GST_WARNING_OBJECT (dec, "invalid output format");
867     res = FALSE;
868     goto done;
869   }
870 }
871 
872 static gboolean
gst_audio_decoder_sink_setcaps(GstAudioDecoder * dec,GstCaps * caps)873 gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
874 {
875   GstAudioDecoderClass *klass;
876   gboolean res = TRUE;
877 
878   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
879 
880   GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
881 
882   GST_AUDIO_DECODER_STREAM_LOCK (dec);
883 
884   if (dec->priv->ctx.input_caps
885       && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) {
886     GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again");
887     goto done;
888   }
889 
890   /* NOTE pbutils only needed here */
891   /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
892 #if 0
893   if (!dec->priv->taglist)
894     dec->priv->taglist = gst_tag_list_new ();
895   dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist);
896   gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist,
897       GST_TAG_AUDIO_CODEC, caps);
898   dec->priv->taglist_changed = TRUE;
899 #endif
900 
901   if (klass->set_format)
902     res = klass->set_format (dec, caps);
903 
904   if (res)
905     gst_caps_replace (&dec->priv->ctx.input_caps, caps);
906 
907 done:
908   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
909 
910   return res;
911 }
912 
913 static void
gst_audio_decoder_setup(GstAudioDecoder * dec)914 gst_audio_decoder_setup (GstAudioDecoder * dec)
915 {
916   GstQuery *query;
917   gboolean res;
918 
919   /* check if in live pipeline, then latency messing is no-no */
920   query = gst_query_new_latency ();
921   res = gst_pad_peer_query (dec->sinkpad, query);
922   if (res) {
923     gst_query_parse_latency (query, &res, NULL, NULL);
924     res = !res;
925   }
926   gst_query_unref (query);
927 
928   /* normalize to bool */
929   dec->priv->agg = ! !res;
930 }
931 
932 static GstFlowReturn
gst_audio_decoder_push_forward(GstAudioDecoder * dec,GstBuffer * buf)933 gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf)
934 {
935   GstAudioDecoderClass *klass;
936   GstAudioDecoderPrivate *priv;
937   GstAudioDecoderContext *ctx;
938   GstFlowReturn ret = GST_FLOW_OK;
939   GstClockTime ts;
940 
941   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
942   priv = dec->priv;
943   ctx = &dec->priv->ctx;
944 
945   g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR);
946 
947   if (G_UNLIKELY (!buf)) {
948     g_assert_not_reached ();
949     return GST_FLOW_OK;
950   }
951 
952   ctx->had_output_data = TRUE;
953   ts = GST_BUFFER_TIMESTAMP (buf);
954 
955   GST_LOG_OBJECT (dec,
956       "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
957       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
958       GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
959       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
960 
961   /* clip buffer */
962   buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate,
963       ctx->info.bpf);
964   if (G_UNLIKELY (!buf)) {
965     GST_DEBUG_OBJECT (dec, "no data after clipping to segment");
966     /* only check and return EOS if upstream still
967      * in the same segment and interested as such */
968     if (dec->priv->in_out_segment_sync) {
969       if (dec->output_segment.rate >= 0) {
970         if (ts >= dec->output_segment.stop)
971           ret = GST_FLOW_EOS;
972       } else if (ts < dec->output_segment.start) {
973         ret = GST_FLOW_EOS;
974       }
975     }
976     goto exit;
977   }
978 
979   /* decorate */
980   if (G_UNLIKELY (priv->discont)) {
981     GST_LOG_OBJECT (dec, "marking discont");
982     GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
983     priv->discont = FALSE;
984   }
985 
986   /* track where we are */
987   if (G_LIKELY (GST_BUFFER_TIMESTAMP_IS_VALID (buf))) {
988     /* duration should always be valid for raw audio */
989     g_assert (GST_BUFFER_DURATION_IS_VALID (buf));
990     dec->output_segment.position =
991         GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
992   }
993 
994   if (klass->pre_push) {
995     /* last chance for subclass to do some dirty stuff */
996     ret = klass->pre_push (dec, &buf);
997     if (ret != GST_FLOW_OK || !buf) {
998       GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p",
999           gst_flow_get_name (ret), buf);
1000       if (buf)
1001         gst_buffer_unref (buf);
1002       goto exit;
1003     }
1004   }
1005 
1006   GST_LOG_OBJECT (dec,
1007       "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1008       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1009       GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1010       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1011 
1012   ret = gst_pad_push (dec->srcpad, buf);
1013 
1014 exit:
1015   return ret;
1016 }
1017 
1018 /* mini aggregator combining output buffers into fewer larger ones,
1019  * if so allowed/configured */
1020 static GstFlowReturn
gst_audio_decoder_output(GstAudioDecoder * dec,GstBuffer * buf)1021 gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf)
1022 {
1023   GstAudioDecoderPrivate *priv;
1024   GstFlowReturn ret = GST_FLOW_OK;
1025   GstBuffer *inbuf = NULL;
1026 
1027   priv = dec->priv;
1028 
1029   if (G_UNLIKELY (priv->agg < 0))
1030     gst_audio_decoder_setup (dec);
1031 
1032   if (G_LIKELY (buf)) {
1033     GST_LOG_OBJECT (dec,
1034         "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1035         ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1036         GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1037         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1038   }
1039 
1040 again:
1041   inbuf = NULL;
1042   if (priv->agg && dec->priv->latency > 0 &&
1043       priv->ctx.info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1044     gint av;
1045     gboolean assemble = FALSE;
1046     const GstClockTimeDiff tol = 10 * GST_MSECOND;
1047     GstClockTimeDiff diff = -100 * GST_MSECOND;
1048 
1049     av = gst_adapter_available (priv->adapter_out);
1050     if (G_UNLIKELY (!buf)) {
1051       /* forcibly send current */
1052       assemble = TRUE;
1053       GST_LOG_OBJECT (dec, "forcing fragment flush");
1054     } else if (av && (!GST_BUFFER_TIMESTAMP_IS_VALID (buf) ||
1055             !GST_CLOCK_TIME_IS_VALID (priv->out_ts) ||
1056             ((diff = GST_CLOCK_DIFF (GST_BUFFER_TIMESTAMP (buf),
1057                         priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) {
1058       assemble = TRUE;
1059       GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment",
1060           (gint) (diff / GST_MSECOND));
1061     } else {
1062       /* add or start collecting */
1063       if (!av) {
1064         GST_LOG_OBJECT (dec, "starting new fragment");
1065         priv->out_ts = GST_BUFFER_TIMESTAMP (buf);
1066       } else {
1067         GST_LOG_OBJECT (dec, "adding to fragment");
1068       }
1069       gst_adapter_push (priv->adapter_out, buf);
1070       priv->out_dur += GST_BUFFER_DURATION (buf);
1071       av += gst_buffer_get_size (buf);
1072       buf = NULL;
1073     }
1074     if (priv->out_dur > dec->priv->latency)
1075       assemble = TRUE;
1076     if (av && assemble) {
1077       GST_LOG_OBJECT (dec, "assembling fragment");
1078       inbuf = buf;
1079       buf = gst_adapter_take_buffer (priv->adapter_out, av);
1080       GST_BUFFER_TIMESTAMP (buf) = priv->out_ts;
1081       GST_BUFFER_DURATION (buf) = priv->out_dur;
1082       priv->out_ts = GST_CLOCK_TIME_NONE;
1083       priv->out_dur = 0;
1084     }
1085   }
1086 
1087   if (G_LIKELY (buf)) {
1088     if (dec->output_segment.rate > 0.0) {
1089       ret = gst_audio_decoder_push_forward (dec, buf);
1090       GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret));
1091     } else {
1092       ret = GST_FLOW_OK;
1093       priv->queued = g_list_prepend (priv->queued, buf);
1094       GST_LOG_OBJECT (dec, "buffer queued");
1095     }
1096 
1097     if (inbuf) {
1098       buf = inbuf;
1099       goto again;
1100     }
1101   }
1102 
1103   return ret;
1104 }
1105 
1106 static void
send_pending_events(GstAudioDecoder * dec)1107 send_pending_events (GstAudioDecoder * dec)
1108 {
1109   GstAudioDecoderPrivate *priv = dec->priv;
1110   GList *pending_events, *l;
1111 
1112   pending_events = priv->pending_events;
1113   priv->pending_events = NULL;
1114 
1115   GST_DEBUG_OBJECT (dec, "Pushing pending events");
1116   for (l = pending_events; l; l = l->next)
1117     gst_audio_decoder_push_event (dec, l->data);
1118   g_list_free (pending_events);
1119 }
1120 
1121 /* Iterate the list of pending events, and ensure
1122  * the current output segment is up to date for
1123  * decoding */
1124 static void
apply_pending_events(GstAudioDecoder * dec)1125 apply_pending_events (GstAudioDecoder * dec)
1126 {
1127   GstAudioDecoderPrivate *priv = dec->priv;
1128   GList *l;
1129 
1130   GST_DEBUG_OBJECT (dec, "Applying pending segments");
1131   for (l = priv->pending_events; l; l = l->next) {
1132     GstEvent *event = GST_EVENT (l->data);
1133     switch (GST_EVENT_TYPE (event)) {
1134       case GST_EVENT_SEGMENT:{
1135         GstSegment seg;
1136 
1137         GST_AUDIO_DECODER_STREAM_LOCK (dec);
1138         gst_event_copy_segment (event, &seg);
1139 
1140         GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
1141 
1142         dec->output_segment = seg;
1143         dec->priv->in_out_segment_sync =
1144             gst_segment_is_equal (&dec->input_segment, &seg);
1145         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1146         break;
1147       }
1148       default:
1149         break;
1150     }
1151   }
1152 }
1153 
1154 static GstFlowReturn
check_pending_reconfigure(GstAudioDecoder * dec)1155 check_pending_reconfigure (GstAudioDecoder * dec)
1156 {
1157   GstFlowReturn ret = GST_FLOW_OK;
1158   GstAudioDecoderContext *ctx;
1159   gboolean needs_reconfigure;
1160 
1161   ctx = &dec->priv->ctx;
1162 
1163   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
1164   if (G_UNLIKELY (ctx->output_format_changed ||
1165           (GST_AUDIO_INFO_IS_VALID (&ctx->info)
1166               && needs_reconfigure))) {
1167     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
1168       gst_pad_mark_reconfigure (dec->srcpad);
1169       if (GST_PAD_IS_FLUSHING (dec->srcpad))
1170         ret = GST_FLOW_FLUSHING;
1171       else
1172         ret = GST_FLOW_NOT_NEGOTIATED;
1173     }
1174   }
1175   return ret;
1176 }
1177 
1178 static gboolean
gst_audio_decoder_transform_meta_default(GstAudioDecoder * decoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)1179 gst_audio_decoder_transform_meta_default (GstAudioDecoder *
1180     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
1181 {
1182   const GstMetaInfo *info = meta->info;
1183   const gchar *const *tags;
1184 
1185   tags = gst_meta_api_type_get_tags (info->api);
1186 
1187   if (!tags || (g_strv_length ((gchar **) tags) == 1
1188           && gst_meta_api_type_has_tag (info->api,
1189               g_quark_from_string (GST_META_TAG_AUDIO_STR))))
1190     return TRUE;
1191 
1192   return FALSE;
1193 }
1194 
1195 typedef struct
1196 {
1197   GstAudioDecoder *decoder;
1198   GstBuffer *outbuf;
1199 } CopyMetaData;
1200 
1201 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)1202 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
1203 {
1204   CopyMetaData *data = user_data;
1205   GstAudioDecoder *decoder = data->decoder;
1206   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
1207   GstBuffer *outbuf = data->outbuf;
1208   const GstMetaInfo *info = (*meta)->info;
1209   gboolean do_copy = FALSE;
1210 
1211   if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
1212     /* never call the transform_meta with memory specific metadata */
1213     GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
1214         g_type_name (info->api));
1215     do_copy = FALSE;
1216   } else if (klass->transform_meta) {
1217     do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf);
1218     GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
1219         g_type_name (info->api), do_copy);
1220   }
1221 
1222   /* we only copy metadata when the subclass implemented a transform_meta
1223    * function and when it returns %TRUE */
1224   if (do_copy && info->transform_func) {
1225     GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
1226     GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
1227     /* simply copy then */
1228     info->transform_func (outbuf, *meta, inbuf,
1229         _gst_meta_transform_copy, &copy_data);
1230   }
1231   return TRUE;
1232 }
1233 
1234 /**
1235  * gst_audio_decoder_finish_subframe:
1236  * @dec: a #GstAudioDecoder
1237  * @buf: decoded data
1238  *
1239  * Collects decoded data and pushes it downstream. This function may be called
1240  * multiple times for a given input frame.
1241  *
1242  * @buf may be NULL in which case it is assumed that the current input frame is
1243  * finished. This is equivalent to calling gst_audio_decoder_finish_subframe()
1244  * with a NULL buffer and frames=1 after having pushed out all decoded audio
1245  * subframes using this function.
1246  *
1247  * When called with valid data in @buf the source pad caps must have been set
1248  * already.
1249  *
1250  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1251  * invalidated by a call to this function.
1252  *
1253  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1254  *
1255  * Since: 1.16
1256  */
1257 GstFlowReturn
gst_audio_decoder_finish_subframe(GstAudioDecoder * dec,GstBuffer * buf)1258 gst_audio_decoder_finish_subframe (GstAudioDecoder * dec, GstBuffer * buf)
1259 {
1260   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1261 
1262   if (buf == NULL)
1263     return gst_audio_decoder_finish_frame_or_subframe (dec, NULL, 1);
1264   else
1265     return gst_audio_decoder_finish_frame_or_subframe (dec, buf, 0);
1266 }
1267 
1268 /**
1269  * gst_audio_decoder_finish_frame:
1270  * @dec: a #GstAudioDecoder
1271  * @buf: decoded data
1272  * @frames: number of decoded frames represented by decoded data
1273  *
1274  * Collects decoded data and pushes it downstream.
1275  *
1276  * @buf may be NULL in which case the indicated number of frames
1277  * are discarded and considered to have produced no output
1278  * (e.g. lead-in or setup frames).
1279  * Otherwise, source pad caps must be set when it is called with valid
1280  * data in @buf.
1281  *
1282  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1283  * invalidated by a call to this function.
1284  *
1285  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1286  */
1287 GstFlowReturn
gst_audio_decoder_finish_frame(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1288 gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
1289     gint frames)
1290 {
1291   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1292 
1293   /* no dummy calls please */
1294   g_return_val_if_fail (frames != 0, GST_FLOW_ERROR);
1295 
1296   return gst_audio_decoder_finish_frame_or_subframe (dec, buf, frames);
1297 }
1298 
1299 /* frames == 0 indicates that this is a sub-frame and further sub-frames may
1300  * follow for the current input frame. */
1301 static GstFlowReturn
gst_audio_decoder_finish_frame_or_subframe(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1302 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
1303     GstBuffer * buf, gint frames)
1304 {
1305   GstAudioDecoderPrivate *priv;
1306   GstAudioDecoderContext *ctx;
1307   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1308   GstAudioMeta *meta;
1309   GstClockTime ts, next_ts;
1310   gsize size, samples = 0;
1311   GstFlowReturn ret = GST_FLOW_OK;
1312   GQueue inbufs = G_QUEUE_INIT;
1313   gboolean is_subframe = (frames == 0);
1314   gboolean do_check_resync;
1315 
1316   /* subclass should not hand us no data */
1317   g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
1318       GST_FLOW_ERROR);
1319 
1320   /* if it's a subframe (frames == 0) we must have a valid buffer */
1321   g_assert (!is_subframe || buf != NULL);
1322 
1323   priv = dec->priv;
1324   ctx = &dec->priv->ctx;
1325   meta = buf ? gst_buffer_get_audio_meta (buf) : NULL;
1326   size = buf ? gst_buffer_get_size (buf) : 0;
1327   samples = buf ? (meta ? meta->samples : size / ctx->info.bpf) : 0;
1328 
1329   /* must know the output format by now */
1330   g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info),
1331       GST_FLOW_ERROR);
1332 
1333   GST_LOG_OBJECT (dec,
1334       "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT
1335       " samples for %d frames", buf ? size : 0, samples, frames);
1336 
1337   GST_AUDIO_DECODER_STREAM_LOCK (dec);
1338 
1339   if (buf != NULL && priv->subframe_samples == 0) {
1340     ret = check_pending_reconfigure (dec);
1341     if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) {
1342       gst_buffer_unref (buf);
1343       goto exit;
1344     }
1345 
1346     if (priv->pending_events)
1347       send_pending_events (dec);
1348   }
1349 
1350   /* sanity checking */
1351   if (G_LIKELY (buf && ctx->info.bpf)) {
1352     if (!meta || meta->info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1353       /* output shoud be whole number of sample frames */
1354       if (size % ctx->info.bpf)
1355         goto wrong_buffer;
1356       /* output should have no additional padding */
1357       if (samples != size / ctx->info.bpf)
1358         goto wrong_samples;
1359     } else {
1360       /* can't have more samples than what the buffer fits */
1361       if (samples > size / ctx->info.bpf)
1362         goto wrong_samples;
1363     }
1364   }
1365 
1366   /* frame and ts book-keeping */
1367   if (G_UNLIKELY (frames < 0)) {
1368     if (G_UNLIKELY (-frames - 1 > priv->frames.length)) {
1369       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1370           ("received more decoded frames %d than provided %d", frames,
1371               priv->frames.length), (NULL));
1372       frames = 0;
1373     } else {
1374       frames = priv->frames.length + frames + 1;
1375     }
1376   } else if (G_UNLIKELY (frames > priv->frames.length)) {
1377     if (G_LIKELY (!priv->force)) {
1378       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1379           ("received more decoded frames %d than provided %d", frames,
1380               priv->frames.length), (NULL));
1381     }
1382     frames = priv->frames.length;
1383   }
1384 
1385   if (G_LIKELY (priv->frames.length))
1386     ts = GST_BUFFER_TIMESTAMP (priv->frames.head->data);
1387   else
1388     ts = GST_CLOCK_TIME_NONE;
1389 
1390   GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT,
1391       GST_TIME_ARGS (ts));
1392 
1393   if (is_subframe && priv->frames.length == 0)
1394     goto subframe_without_pending_input_frame;
1395 
1396   /* this will be skipped in the is_subframe case because frames will be 0 */
1397   while (priv->frames.length && frames) {
1398     g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames));
1399     dec->priv->ctx.delay = dec->priv->frames.length;
1400     frames--;
1401   }
1402 
1403   if (G_UNLIKELY (!buf))
1404     goto exit;
1405 
1406   /* lock on */
1407   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1408     priv->base_ts = ts;
1409     GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
1410   }
1411 
1412   /* still no valid ts, track the segment one */
1413   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) &&
1414       dec->output_segment.rate > 0.0) {
1415     priv->base_ts = dec->output_segment.start;
1416   }
1417 
1418   /* only check for resync at the beginning of an input/output frame */
1419   do_check_resync = !is_subframe || priv->subframe_samples == 0;
1420 
1421   /* slightly convoluted approach caters for perfect ts if subclass desires. */
1422   if (do_check_resync && GST_CLOCK_TIME_IS_VALID (ts)) {
1423     if (dec->priv->tolerance > 0) {
1424       GstClockTimeDiff diff;
1425 
1426       g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts));
1427       next_ts = priv->base_ts +
1428           gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
1429       GST_LOG_OBJECT (dec,
1430           "buffer is %" G_GUINT64_FORMAT " samples past base_ts %"
1431           GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples,
1432           GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1433       diff = GST_CLOCK_DIFF (next_ts, ts);
1434       GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1435       /* if within tolerance,
1436        * discard buffer ts and carry on producing perfect stream,
1437        * otherwise resync to ts */
1438       if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance ||
1439               diff > (gint64) dec->priv->tolerance)) {
1440         GST_DEBUG_OBJECT (dec, "base_ts resync");
1441         priv->base_ts = ts;
1442         priv->samples = 0;
1443       }
1444     } else {
1445       GST_DEBUG_OBJECT (dec, "base_ts resync");
1446       priv->base_ts = ts;
1447       priv->samples = 0;
1448     }
1449   }
1450 
1451   /* delayed one-shot stuff until confirmed data */
1452   if (priv->taglist && priv->taglist_changed) {
1453     GstEvent *tags_event;
1454 
1455     tags_event = gst_audio_decoder_create_merged_tags_event (dec);
1456 
1457     if (tags_event != NULL)
1458       gst_audio_decoder_push_event (dec, tags_event);
1459 
1460     priv->taglist_changed = FALSE;
1461   }
1462 
1463   buf = gst_buffer_make_writable (buf);
1464   if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1465     GST_BUFFER_TIMESTAMP (buf) =
1466         priv->base_ts +
1467         GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->info.rate);
1468     GST_BUFFER_DURATION (buf) = priv->base_ts +
1469         GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->info.rate) -
1470         GST_BUFFER_TIMESTAMP (buf);
1471   } else {
1472     GST_BUFFER_TIMESTAMP (buf) = GST_CLOCK_TIME_NONE;
1473     GST_BUFFER_DURATION (buf) =
1474         GST_FRAMES_TO_CLOCK_TIME (samples, ctx->info.rate);
1475   }
1476 
1477   if (klass->transform_meta) {
1478     if (inbufs.length) {
1479       GList *l;
1480       for (l = inbufs.head; l; l = l->next) {
1481         CopyMetaData data;
1482 
1483         data.decoder = dec;
1484         data.outbuf = buf;
1485         gst_buffer_foreach_meta (l->data, foreach_metadata, &data);
1486       }
1487     } else if (is_subframe) {
1488       CopyMetaData data;
1489       GstBuffer *in_buf;
1490 
1491       /* For subframes we assume a 1:N relationship for now, so we just take
1492        * metas from the first pending input buf */
1493       in_buf = g_queue_peek_head (&priv->frames);
1494       data.decoder = dec;
1495       data.outbuf = buf;
1496       gst_buffer_foreach_meta (in_buf, foreach_metadata, &data);
1497     } else {
1498       GST_WARNING_OBJECT (dec,
1499           "Can't copy metadata because input buffers disappeared");
1500     }
1501   }
1502 
1503   GST_OBJECT_LOCK (dec);
1504   priv->samples += samples;
1505   priv->samples_out += samples;
1506   GST_OBJECT_UNLOCK (dec);
1507 
1508   /* we got data, so note things are looking up */
1509   if (G_UNLIKELY (dec->priv->error_count))
1510     dec->priv->error_count = 0;
1511 
1512   ret = gst_audio_decoder_output (dec, buf);
1513 
1514 exit:
1515   g_queue_foreach (&inbufs, (GFunc) gst_buffer_unref, NULL);
1516   g_queue_clear (&inbufs);
1517 
1518   if (is_subframe)
1519     dec->priv->subframe_samples += samples;
1520   else
1521     dec->priv->subframe_samples = 0;
1522 
1523   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1524 
1525   return ret;
1526 
1527   /* ERRORS */
1528 wrong_buffer:
1529   {
1530     /* arguably more of a programming error? */
1531     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1532         ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d", size,
1533             ctx->info.bpf));
1534     gst_buffer_unref (buf);
1535     ret = GST_FLOW_ERROR;
1536     goto exit;
1537   }
1538 wrong_samples:
1539   {
1540     /* arguably more of a programming error? */
1541     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1542         ("GstAudioMeta samples (%" G_GSIZE_FORMAT ") are inconsistent with "
1543             "the buffer size and layout (size/bpf = %" G_GSIZE_FORMAT ")",
1544             meta->samples, size / ctx->info.bpf));
1545     gst_buffer_unref (buf);
1546     ret = GST_FLOW_ERROR;
1547     goto exit;
1548   }
1549 subframe_without_pending_input_frame:
1550   {
1551     /* arguably more of a programming error? */
1552     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1553         ("Received decoded subframe, but no pending frame"));
1554     gst_buffer_unref (buf);
1555     ret = GST_FLOW_ERROR;
1556     goto exit;
1557   }
1558 }
1559 
1560 static GstFlowReturn
gst_audio_decoder_handle_frame(GstAudioDecoder * dec,GstAudioDecoderClass * klass,GstBuffer * buffer)1561 gst_audio_decoder_handle_frame (GstAudioDecoder * dec,
1562     GstAudioDecoderClass * klass, GstBuffer * buffer)
1563 {
1564   /* Skip decoding and send a GAP instead if
1565    * GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO is set and we have timestamps
1566    * FIXME: We only do this for forward playback atm, because reverse
1567    * playback would require accumulating GAP events and pushing them
1568    * out in reverse order as for normal audio samples
1569    */
1570   if (G_UNLIKELY (dec->input_segment.rate > 0.0
1571           && dec->input_segment.flags & GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO)) {
1572     if (buffer) {
1573       GstClockTime ts = GST_BUFFER_PTS (buffer);
1574       if (GST_CLOCK_TIME_IS_VALID (ts)) {
1575         GstEvent *event = gst_event_new_gap (ts, GST_BUFFER_DURATION (buffer));
1576 
1577         gst_buffer_unref (buffer);
1578         GST_LOG_OBJECT (dec, "Skipping decode in trickmode and sending gap");
1579         gst_audio_decoder_handle_gap (dec, event);
1580         return GST_FLOW_OK;
1581       }
1582     }
1583   }
1584 
1585   if (G_LIKELY (buffer)) {
1586     gsize size = gst_buffer_get_size (buffer);
1587     /* keep around for admin */
1588     GST_LOG_OBJECT (dec,
1589         "tracking frame size %" G_GSIZE_FORMAT ", ts %" GST_TIME_FORMAT, size,
1590         GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
1591     g_queue_push_tail (&dec->priv->frames, buffer);
1592     dec->priv->ctx.delay = dec->priv->frames.length;
1593     GST_OBJECT_LOCK (dec);
1594     dec->priv->bytes_in += size;
1595     GST_OBJECT_UNLOCK (dec);
1596   } else {
1597     GST_LOG_OBJECT (dec, "providing subclass with NULL frame");
1598   }
1599 
1600   return klass->handle_frame (dec, buffer);
1601 }
1602 
1603 /* maybe subclass configurable instead, but this allows for a whole lot of
1604  * raw samples, so at least quite some encoded ... */
1605 #define GST_AUDIO_DECODER_MAX_SYNC     10 * 8 * 2 * 1024
1606 
1607 static GstFlowReturn
gst_audio_decoder_push_buffers(GstAudioDecoder * dec,gboolean force)1608 gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force)
1609 {
1610   GstAudioDecoderClass *klass;
1611   GstAudioDecoderPrivate *priv;
1612   GstAudioDecoderContext *ctx;
1613   GstFlowReturn ret = GST_FLOW_OK;
1614   GstBuffer *buffer;
1615   gint av, flush;
1616 
1617   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1618   priv = dec->priv;
1619   ctx = &dec->priv->ctx;
1620 
1621   g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1622 
1623   av = gst_adapter_available (priv->adapter);
1624   GST_DEBUG_OBJECT (dec, "available: %d", av);
1625 
1626   while (ret == GST_FLOW_OK) {
1627 
1628     flush = 0;
1629     ctx->eos = force;
1630 
1631     if (G_LIKELY (av)) {
1632       gint len;
1633       GstClockTime ts;
1634       guint64 distance;
1635 
1636       /* parse if needed */
1637       if (klass->parse) {
1638         gint offset = 0;
1639 
1640         /* limited (legacy) parsing; avoid whole of baseparse */
1641         GST_DEBUG_OBJECT (dec, "parsing available: %d", av);
1642         /* piggyback sync state on discont */
1643         ctx->sync = !priv->discont;
1644         ret = klass->parse (dec, priv->adapter, &offset, &len);
1645 
1646         g_assert (offset <= av);
1647         if (offset) {
1648           /* jumped a bit */
1649           GST_DEBUG_OBJECT (dec, "skipped %d; setting DISCONT", offset);
1650           gst_adapter_flush (priv->adapter, offset);
1651           flush = offset;
1652           /* avoid parsing indefinitely */
1653           priv->sync_flush += offset;
1654           if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC)
1655             goto parse_failed;
1656         }
1657 
1658         if (ret == GST_FLOW_EOS) {
1659           GST_LOG_OBJECT (dec, "no frame yet");
1660           ret = GST_FLOW_OK;
1661           break;
1662         } else if (ret == GST_FLOW_OK) {
1663           GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len);
1664           g_assert (len);
1665           g_assert (offset + len <= av);
1666           priv->sync_flush = 0;
1667         } else {
1668           break;
1669         }
1670       } else {
1671         len = av;
1672       }
1673       /* track upstream ts, but do not get stuck if nothing new upstream */
1674       ts = gst_adapter_prev_pts (priv->adapter, &distance);
1675       if (ts != priv->prev_ts || distance <= priv->prev_distance) {
1676         priv->prev_ts = ts;
1677         priv->prev_distance = distance;
1678       } else {
1679         GST_LOG_OBJECT (dec, "ts == prev_ts; discarding");
1680         ts = GST_CLOCK_TIME_NONE;
1681       }
1682       buffer = gst_adapter_take_buffer (priv->adapter, len);
1683       buffer = gst_buffer_make_writable (buffer);
1684       GST_BUFFER_TIMESTAMP (buffer) = ts;
1685       flush += len;
1686       priv->force = FALSE;
1687     } else {
1688       if (!force)
1689         break;
1690       if (!priv->drainable) {
1691         priv->drained = TRUE;
1692         break;
1693       }
1694       buffer = NULL;
1695       priv->force = TRUE;
1696     }
1697 
1698     ret = gst_audio_decoder_handle_frame (dec, klass, buffer);
1699 
1700     /* do not keep pushing it ... */
1701     if (G_UNLIKELY (!av)) {
1702       priv->drained = TRUE;
1703       break;
1704     }
1705 
1706     av -= flush;
1707     g_assert (av >= 0);
1708   }
1709 
1710   GST_LOG_OBJECT (dec, "done pushing to subclass");
1711   return ret;
1712 
1713   /* ERRORS */
1714 parse_failed:
1715   {
1716     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream"));
1717     return GST_FLOW_ERROR;
1718   }
1719 }
1720 
1721 static GstFlowReturn
gst_audio_decoder_drain(GstAudioDecoder * dec)1722 gst_audio_decoder_drain (GstAudioDecoder * dec)
1723 {
1724   GstFlowReturn ret;
1725 
1726   if (dec->priv->drained && !dec->priv->gather)
1727     return GST_FLOW_OK;
1728 
1729   /* Apply any pending events before draining, as that
1730    * may update the pending segment info */
1731   apply_pending_events (dec);
1732 
1733   /* dispatch reverse pending buffers */
1734   /* chain eventually calls upon drain as well, but by that time
1735    * gather list should be clear, so ok ... */
1736   if (dec->output_segment.rate < 0.0 && dec->priv->gather)
1737     gst_audio_decoder_chain_reverse (dec, NULL);
1738   /* have subclass give all it can */
1739   ret = gst_audio_decoder_push_buffers (dec, TRUE);
1740   if (ret != GST_FLOW_OK) {
1741     GST_WARNING_OBJECT (dec, "audio decoder push buffers failed");
1742     goto drain_failed;
1743   }
1744   /* ensure all output sent */
1745   ret = gst_audio_decoder_output (dec, NULL);
1746   if (ret != GST_FLOW_OK)
1747     GST_WARNING_OBJECT (dec, "audio decoder output failed");
1748 
1749 drain_failed:
1750   /* everything should be away now */
1751   if (dec->priv->frames.length) {
1752     /* not fatal/impossible though if subclass/codec eats stuff */
1753     GST_WARNING_OBJECT (dec, "still %d frames left after draining",
1754         dec->priv->frames.length);
1755     g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
1756     g_queue_clear (&dec->priv->frames);
1757   }
1758 
1759   /* discard (unparsed) leftover */
1760   gst_adapter_clear (dec->priv->adapter);
1761   return ret;
1762 }
1763 
1764 /* hard == FLUSH, otherwise discont */
1765 static GstFlowReturn
gst_audio_decoder_flush(GstAudioDecoder * dec,gboolean hard)1766 gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard)
1767 {
1768   GstAudioDecoderClass *klass;
1769   GstFlowReturn ret = GST_FLOW_OK;
1770 
1771   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1772 
1773   GST_LOG_OBJECT (dec, "flush hard %d", hard);
1774 
1775   if (!hard) {
1776     ret = gst_audio_decoder_drain (dec);
1777   } else {
1778     gst_audio_decoder_clear_queues (dec);
1779     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
1780     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
1781     dec->priv->error_count = 0;
1782   }
1783   /* only bother subclass with flushing if known it is already alive
1784    * and kicking out stuff */
1785   if (klass->flush && dec->priv->samples_out > 0)
1786     klass->flush (dec, hard);
1787   /* and get (re)set for the sequel */
1788   gst_audio_decoder_reset (dec, FALSE);
1789 
1790   return ret;
1791 }
1792 
1793 static GstFlowReturn
gst_audio_decoder_chain_forward(GstAudioDecoder * dec,GstBuffer * buffer)1794 gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer)
1795 {
1796   GstFlowReturn ret = GST_FLOW_OK;
1797 
1798   /* discard silly case, though maybe ts may be of value ?? */
1799   if (G_UNLIKELY (gst_buffer_get_size (buffer) == 0)) {
1800     GST_DEBUG_OBJECT (dec, "discarding empty buffer");
1801     gst_buffer_unref (buffer);
1802     goto exit;
1803   }
1804 
1805   /* grab buffer */
1806   gst_adapter_push (dec->priv->adapter, buffer);
1807   buffer = NULL;
1808   /* new stuff, so we can push subclass again */
1809   dec->priv->drained = FALSE;
1810 
1811   /* hand to subclass */
1812   ret = gst_audio_decoder_push_buffers (dec, FALSE);
1813 
1814 exit:
1815   GST_LOG_OBJECT (dec, "chain-done");
1816   return ret;
1817 }
1818 
1819 static void
gst_audio_decoder_clear_queues(GstAudioDecoder * dec)1820 gst_audio_decoder_clear_queues (GstAudioDecoder * dec)
1821 {
1822   GstAudioDecoderPrivate *priv = dec->priv;
1823 
1824   g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL);
1825   g_list_free (priv->queued);
1826   priv->queued = NULL;
1827   g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL);
1828   g_list_free (priv->gather);
1829   priv->gather = NULL;
1830   g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL);
1831   g_list_free (priv->decode);
1832   priv->decode = NULL;
1833 }
1834 
1835 /*
1836  * Input:
1837  *  Buffer decoding order:  7  8  9  4  5  6  3  1  2  EOS
1838  *  Discont flag:           D        D        D  D
1839  *
1840  * - Each Discont marks a discont in the decoding order.
1841  *
1842  * for vorbis, each buffer is a keyframe when we have the previous
1843  * buffer. This means that to decode buffer 7, we need buffer 6, which
1844  * arrives out of order.
1845  *
1846  * we first gather buffers in the gather queue until we get a DISCONT. We
1847  * prepend each incomming buffer so that they are in reversed order.
1848  *
1849  *    gather queue:    9  8  7
1850  *    decode queue:
1851  *    output queue:
1852  *
1853  * When a DISCONT is received (buffer 4), we move the gather queue to the
1854  * decode queue. This is simply done be taking the head of the gather queue
1855  * and prepending it to the decode queue. This yields:
1856  *
1857  *    gather queue:
1858  *    decode queue:    7  8  9
1859  *    output queue:
1860  *
1861  * Then we decode each buffer in the decode queue in order and put the output
1862  * buffer in the output queue. The first buffer (7) will not produce any output
1863  * because it needs the previous buffer (6) which did not arrive yet. This
1864  * yields:
1865  *
1866  *    gather queue:
1867  *    decode queue:    7  8  9
1868  *    output queue:    9  8
1869  *
1870  * Then we remove the consumed buffers from the decode queue. Buffer 7 is not
1871  * completely consumed, we need to keep it around for when we receive buffer
1872  * 6. This yields:
1873  *
1874  *    gather queue:
1875  *    decode queue:    7
1876  *    output queue:    9  8
1877  *
1878  * Then we accumulate more buffers:
1879  *
1880  *    gather queue:    6  5  4
1881  *    decode queue:    7
1882  *    output queue:
1883  *
1884  * prepending to the decode queue on DISCONT yields:
1885  *
1886  *    gather queue:
1887  *    decode queue:    4  5  6  7
1888  *    output queue:
1889  *
1890  * after decoding and keeping buffer 4:
1891  *
1892  *    gather queue:
1893  *    decode queue:    4
1894  *    output queue:    7  6  5
1895  *
1896  * Etc..
1897  */
1898 static GstFlowReturn
gst_audio_decoder_flush_decode(GstAudioDecoder * dec)1899 gst_audio_decoder_flush_decode (GstAudioDecoder * dec)
1900 {
1901   GstAudioDecoderPrivate *priv = dec->priv;
1902   GstFlowReturn res = GST_FLOW_OK;
1903   GstClockTime timestamp;
1904   GList *walk;
1905 
1906   walk = priv->decode;
1907 
1908   GST_DEBUG_OBJECT (dec, "flushing buffers to decoder");
1909 
1910   /* clear buffer and decoder state */
1911   gst_audio_decoder_flush (dec, FALSE);
1912 
1913   while (walk) {
1914     GList *next;
1915     GstBuffer *buf = GST_BUFFER_CAST (walk->data);
1916 
1917     GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT,
1918         buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
1919 
1920     next = g_list_next (walk);
1921     /* decode buffer, resulting data prepended to output queue */
1922     gst_buffer_ref (buf);
1923     res = gst_audio_decoder_chain_forward (dec, buf);
1924 
1925     /* if we generated output, we can discard the buffer, else we
1926      * keep it in the queue */
1927     if (priv->queued) {
1928       GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data);
1929       priv->decode = g_list_delete_link (priv->decode, walk);
1930       gst_buffer_unref (buf);
1931     } else {
1932       GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
1933     }
1934     walk = next;
1935   }
1936 
1937   /* drain any aggregation (or otherwise) leftover */
1938   gst_audio_decoder_drain (dec);
1939 
1940   /* now send queued data downstream */
1941   timestamp = GST_CLOCK_TIME_NONE;
1942   while (priv->queued) {
1943     GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
1944     GstClockTime duration;
1945 
1946     duration = GST_BUFFER_DURATION (buf);
1947 
1948     /* duration should always be valid for raw audio */
1949     g_assert (GST_CLOCK_TIME_IS_VALID (duration));
1950 
1951     /* interpolate (backward) if needed */
1952     if (G_LIKELY (timestamp != -1)) {
1953       if (timestamp > duration)
1954         timestamp -= duration;
1955       else
1956         timestamp = 0;
1957     }
1958 
1959     if (!GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
1960       GST_LOG_OBJECT (dec, "applying reverse interpolated ts %"
1961           GST_TIME_FORMAT, GST_TIME_ARGS (timestamp));
1962       GST_BUFFER_TIMESTAMP (buf) = timestamp;
1963     } else {
1964       /* track otherwise */
1965       timestamp = GST_BUFFER_TIMESTAMP (buf);
1966       GST_LOG_OBJECT (dec, "tracking ts %" GST_TIME_FORMAT,
1967           GST_TIME_ARGS (timestamp));
1968     }
1969 
1970     if (G_LIKELY (res == GST_FLOW_OK)) {
1971       GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
1972           "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
1973           gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1974           GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1975       /* should be already, but let's be sure */
1976       buf = gst_buffer_make_writable (buf);
1977       /* avoid stray DISCONT from forward processing,
1978        * which have no meaning in reverse pushing */
1979       GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
1980       res = gst_audio_decoder_push_forward (dec, buf);
1981     } else {
1982       gst_buffer_unref (buf);
1983     }
1984 
1985     priv->queued = g_list_delete_link (priv->queued, priv->queued);
1986   }
1987 
1988   return res;
1989 }
1990 
1991 static GstFlowReturn
gst_audio_decoder_chain_reverse(GstAudioDecoder * dec,GstBuffer * buf)1992 gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf)
1993 {
1994   GstAudioDecoderPrivate *priv = dec->priv;
1995   GstFlowReturn result = GST_FLOW_OK;
1996 
1997   /* if we have a discont, move buffers to the decode list */
1998   if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) {
1999     GST_DEBUG_OBJECT (dec, "received discont");
2000     while (priv->gather) {
2001       GstBuffer *gbuf;
2002 
2003       gbuf = GST_BUFFER_CAST (priv->gather->data);
2004       /* remove from the gather list */
2005       priv->gather = g_list_delete_link (priv->gather, priv->gather);
2006       /* copy to decode queue */
2007       priv->decode = g_list_prepend (priv->decode, gbuf);
2008     }
2009     /* decode stuff in the decode queue */
2010     gst_audio_decoder_flush_decode (dec);
2011   }
2012 
2013   if (G_LIKELY (buf)) {
2014     GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2015         "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2016         gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
2017         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2018 
2019     /* add buffer to gather queue */
2020     priv->gather = g_list_prepend (priv->gather, buf);
2021   }
2022 
2023   return result;
2024 }
2025 
2026 static GstFlowReturn
gst_audio_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2027 gst_audio_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2028 {
2029   GstAudioDecoder *dec;
2030   GstFlowReturn ret;
2031 
2032   dec = GST_AUDIO_DECODER (parent);
2033 
2034   GST_LOG_OBJECT (dec,
2035       "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
2036       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buffer),
2037       GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
2038       GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
2039 
2040   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2041 
2042   if (G_UNLIKELY (dec->priv->ctx.input_caps == NULL && dec->priv->needs_format))
2043     goto not_negotiated;
2044 
2045   dec->priv->ctx.had_input_data = TRUE;
2046 
2047   if (!dec->priv->expecting_discont_buf &&
2048       GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
2049     gint64 samples, ts;
2050 
2051     /* track present position */
2052     ts = dec->priv->base_ts;
2053     samples = dec->priv->samples;
2054 
2055     GST_DEBUG_OBJECT (dec, "handling discont");
2056     gst_audio_decoder_flush (dec, FALSE);
2057     dec->priv->discont = TRUE;
2058 
2059     /* buffer may claim DISCONT loudly, if it can't tell us where we are now,
2060      * we'll stick to where we were ...
2061      * Particularly useful/needed for upstream BYTE based */
2062     if (dec->input_segment.rate > 0.0
2063         && !GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) {
2064       GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking");
2065       dec->priv->base_ts = ts;
2066       dec->priv->samples = samples;
2067     }
2068   }
2069   dec->priv->expecting_discont_buf = FALSE;
2070 
2071   if (dec->input_segment.rate > 0.0)
2072     ret = gst_audio_decoder_chain_forward (dec, buffer);
2073   else
2074     ret = gst_audio_decoder_chain_reverse (dec, buffer);
2075 
2076   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2077 
2078   return ret;
2079 
2080   /* ERRORS */
2081 not_negotiated:
2082   {
2083     GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2084     GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL),
2085         ("decoder not initialized"));
2086     gst_buffer_unref (buffer);
2087     return GST_FLOW_NOT_NEGOTIATED;
2088   }
2089 }
2090 
2091 /* perform upstream byte <-> time conversion (duration, seeking)
2092  * if subclass allows and if enough data for moderately decent conversion */
2093 static inline gboolean
gst_audio_decoder_do_byte(GstAudioDecoder * dec)2094 gst_audio_decoder_do_byte (GstAudioDecoder * dec)
2095 {
2096   gboolean ret;
2097 
2098   GST_OBJECT_LOCK (dec);
2099   ret = dec->priv->ctx.do_estimate_rate && dec->priv->ctx.info.bpf &&
2100       dec->priv->ctx.info.rate <= dec->priv->samples_out;
2101   GST_OBJECT_UNLOCK (dec);
2102 
2103   return ret;
2104 }
2105 
2106 /* Must be called holding the GST_AUDIO_DECODER_STREAM_LOCK */
2107 static gboolean
gst_audio_decoder_negotiate_default_caps(GstAudioDecoder * dec)2108 gst_audio_decoder_negotiate_default_caps (GstAudioDecoder * dec)
2109 {
2110   GstCaps *caps, *templcaps;
2111   gint i;
2112   gint channels = 0;
2113   gint rate;
2114   guint64 channel_mask = 0;
2115   gint caps_size;
2116   GstStructure *structure;
2117   GstAudioInfo info;
2118 
2119   templcaps = gst_pad_get_pad_template_caps (dec->srcpad);
2120   caps = gst_pad_peer_query_caps (dec->srcpad, templcaps);
2121   if (caps)
2122     gst_caps_unref (templcaps);
2123   else
2124     caps = templcaps;
2125   templcaps = NULL;
2126 
2127   if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
2128     goto caps_error;
2129 
2130   GST_LOG_OBJECT (dec, "peer caps  %" GST_PTR_FORMAT, caps);
2131 
2132   /* before fixating, try to use whatever upstream provided */
2133   caps = gst_caps_make_writable (caps);
2134   caps_size = gst_caps_get_size (caps);
2135   if (dec->priv->ctx.input_caps) {
2136     GstCaps *sinkcaps = dec->priv->ctx.input_caps;
2137     GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
2138 
2139     if (gst_structure_get_int (structure, "rate", &rate)) {
2140       for (i = 0; i < caps_size; i++) {
2141         gst_structure_set (gst_caps_get_structure (caps, i), "rate",
2142             G_TYPE_INT, rate, NULL);
2143       }
2144     }
2145 
2146     if (gst_structure_get_int (structure, "channels", &channels)) {
2147       for (i = 0; i < caps_size; i++) {
2148         gst_structure_set (gst_caps_get_structure (caps, i), "channels",
2149             G_TYPE_INT, channels, NULL);
2150       }
2151     }
2152 
2153     if (gst_structure_get (structure, "channel-mask", GST_TYPE_BITMASK,
2154             &channel_mask, NULL)) {
2155       for (i = 0; i < caps_size; i++) {
2156         gst_structure_set (gst_caps_get_structure (caps, i), "channel-mask",
2157             GST_TYPE_BITMASK, channel_mask, NULL);
2158       }
2159     }
2160   }
2161 
2162   for (i = 0; i < caps_size; i++) {
2163     structure = gst_caps_get_structure (caps, i);
2164     if (gst_structure_has_field (structure, "channels"))
2165       gst_structure_fixate_field_nearest_int (structure,
2166           "channels", GST_AUDIO_DEF_CHANNELS);
2167     else
2168       gst_structure_set (structure, "channels", G_TYPE_INT,
2169           GST_AUDIO_DEF_CHANNELS, NULL);
2170     if (gst_structure_has_field (structure, "rate"))
2171       gst_structure_fixate_field_nearest_int (structure,
2172           "rate", GST_AUDIO_DEF_RATE);
2173     else
2174       gst_structure_set (structure, "rate", G_TYPE_INT, GST_AUDIO_DEF_RATE,
2175           NULL);
2176   }
2177   caps = gst_caps_fixate (caps);
2178   structure = gst_caps_get_structure (caps, 0);
2179 
2180   /* Need to add a channel-mask if channels > 2 */
2181   gst_structure_get_int (structure, "channels", &channels);
2182   if (channels > 2 && !gst_structure_has_field (structure, "channel-mask")) {
2183     channel_mask = gst_audio_channel_get_fallback_mask (channels);
2184     if (channel_mask != 0) {
2185       gst_structure_set (structure, "channel-mask",
2186           GST_TYPE_BITMASK, channel_mask, NULL);
2187     } else {
2188       GST_WARNING_OBJECT (dec, "No default channel-mask for %d channels",
2189           channels);
2190     }
2191   }
2192 
2193   if (!caps || !gst_audio_info_from_caps (&info, caps))
2194     goto caps_error;
2195 
2196   GST_OBJECT_LOCK (dec);
2197   dec->priv->ctx.info = info;
2198   dec->priv->ctx.caps = caps;
2199   GST_OBJECT_UNLOCK (dec);
2200 
2201   GST_INFO_OBJECT (dec,
2202       "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
2203 
2204   return TRUE;
2205 
2206 caps_error:
2207   {
2208     if (caps)
2209       gst_caps_unref (caps);
2210     return FALSE;
2211   }
2212 }
2213 
2214 static gboolean
gst_audio_decoder_handle_gap(GstAudioDecoder * dec,GstEvent * event)2215 gst_audio_decoder_handle_gap (GstAudioDecoder * dec, GstEvent * event)
2216 {
2217   gboolean ret;
2218   GstClockTime timestamp, duration;
2219   gboolean needs_reconfigure = FALSE;
2220 
2221   /* Ensure we have caps first */
2222   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2223   if (!GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)) {
2224     if (!gst_audio_decoder_negotiate_default_caps (dec)) {
2225       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2226       GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL),
2227           ("Decoder output not negotiated before GAP event."));
2228       gst_event_unref (event);
2229       return FALSE;
2230     }
2231     needs_reconfigure = TRUE;
2232   }
2233   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad)
2234       || needs_reconfigure;
2235   if (G_UNLIKELY (dec->priv->ctx.output_format_changed || needs_reconfigure)) {
2236     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
2237       GST_WARNING_OBJECT (dec, "Failed to negotiate with downstream");
2238       gst_pad_mark_reconfigure (dec->srcpad);
2239     }
2240   }
2241   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2242 
2243   gst_event_parse_gap (event, &timestamp, &duration);
2244 
2245   /* time progressed without data, see if we can fill the gap with
2246    * some concealment data */
2247   GST_DEBUG_OBJECT (dec,
2248       "gap event: plc %d, do_plc %d, position %" GST_TIME_FORMAT
2249       " duration %" GST_TIME_FORMAT,
2250       dec->priv->plc, dec->priv->ctx.do_plc,
2251       GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration));
2252 
2253   if (dec->priv->plc && dec->priv->ctx.do_plc && dec->input_segment.rate > 0.0) {
2254     GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2255     GstBuffer *buf;
2256 
2257     /* hand subclass empty frame with duration that needs covering */
2258     buf = gst_buffer_new ();
2259     GST_BUFFER_TIMESTAMP (buf) = timestamp;
2260     GST_BUFFER_DURATION (buf) = duration;
2261     /* best effort, not much error handling */
2262     gst_audio_decoder_handle_frame (dec, klass, buf);
2263     ret = TRUE;
2264     dec->priv->expecting_discont_buf = TRUE;
2265     gst_event_unref (event);
2266   } else {
2267     GstFlowReturn flowret;
2268 
2269     /* sub-class doesn't know how to handle empty buffers,
2270      * so just try sending GAP downstream */
2271     flowret = check_pending_reconfigure (dec);
2272     if (flowret == GST_FLOW_OK) {
2273       send_pending_events (dec);
2274       ret = gst_audio_decoder_push_event (dec, event);
2275     } else {
2276       ret = FALSE;
2277       gst_event_unref (event);
2278     }
2279   }
2280   return ret;
2281 }
2282 
2283 static GList *
_flush_events(GstPad * pad,GList * events)2284 _flush_events (GstPad * pad, GList * events)
2285 {
2286   GList *tmp;
2287 
2288   for (tmp = events; tmp; tmp = tmp->next) {
2289     if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
2290         GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
2291         GST_EVENT_IS_STICKY (tmp->data)) {
2292       gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
2293     }
2294     gst_event_unref (tmp->data);
2295   }
2296   g_list_free (events);
2297 
2298   return NULL;
2299 }
2300 
2301 static gboolean
gst_audio_decoder_sink_eventfunc(GstAudioDecoder * dec,GstEvent * event)2302 gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2303 {
2304   gboolean ret;
2305 
2306   switch (GST_EVENT_TYPE (event)) {
2307     case GST_EVENT_STREAM_START:
2308       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2309       /* finish any data in current segment and clear the decoder
2310        * to be ready for new stream data */
2311       gst_audio_decoder_drain (dec);
2312       gst_audio_decoder_flush (dec, FALSE);
2313 
2314       GST_DEBUG_OBJECT (dec, "received STREAM_START. Clearing taglist");
2315       /* Flush upstream tags after a STREAM_START */
2316       if (dec->priv->upstream_tags) {
2317         gst_tag_list_unref (dec->priv->upstream_tags);
2318         dec->priv->upstream_tags = NULL;
2319         dec->priv->taglist_changed = TRUE;
2320       }
2321       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2322 
2323       ret = gst_audio_decoder_push_event (dec, event);
2324       break;
2325     case GST_EVENT_SEGMENT:
2326     {
2327       GstSegment seg;
2328       GstFormat format;
2329 
2330       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2331       gst_event_copy_segment (event, &seg);
2332 
2333       format = seg.format;
2334       if (format == GST_FORMAT_TIME) {
2335         GST_DEBUG_OBJECT (dec, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
2336             &seg);
2337       } else {
2338         gint64 nstart;
2339         GST_DEBUG_OBJECT (dec, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
2340         /* handle newsegment resulting from legacy simple seeking */
2341         /* note that we need to convert this whether or not enough data
2342          * to handle initial newsegment */
2343         if (dec->priv->ctx.do_estimate_rate &&
2344             gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, seg.start,
2345                 GST_FORMAT_TIME, &nstart)) {
2346           /* best attempt convert */
2347           /* as these are only estimates, stop is kept open-ended to avoid
2348            * premature cutting */
2349           GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT,
2350               GST_TIME_ARGS (nstart));
2351           seg.format = GST_FORMAT_TIME;
2352           seg.start = nstart;
2353           seg.time = nstart;
2354           seg.stop = GST_CLOCK_TIME_NONE;
2355           /* replace event */
2356           gst_event_unref (event);
2357           event = gst_event_new_segment (&seg);
2358         } else {
2359           GST_DEBUG_OBJECT (dec, "unsupported format; ignoring");
2360           GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2361           gst_event_unref (event);
2362           ret = FALSE;
2363           break;
2364         }
2365       }
2366 
2367       /* prepare for next segment */
2368       /* Use the segment start as a base timestamp
2369        * in case upstream does not come up with anything better
2370        * (e.g. upstream BYTE) */
2371       if (format != GST_FORMAT_TIME) {
2372         dec->priv->base_ts = seg.start;
2373         dec->priv->samples = 0;
2374       }
2375 
2376       /* and follow along with segment */
2377       dec->priv->in_out_segment_sync = FALSE;
2378       dec->input_segment = seg;
2379       dec->priv->pending_events =
2380           g_list_append (dec->priv->pending_events, event);
2381       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2382 
2383       ret = TRUE;
2384       break;
2385     }
2386     case GST_EVENT_GAP:
2387       ret = gst_audio_decoder_handle_gap (dec, event);
2388       break;
2389     case GST_EVENT_FLUSH_STOP:
2390       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2391       /* prepare for fresh start */
2392       gst_audio_decoder_flush (dec, TRUE);
2393 
2394       dec->priv->pending_events = _flush_events (dec->srcpad,
2395           dec->priv->pending_events);
2396       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2397 
2398       /* Forward FLUSH_STOP, it is expected to be forwarded immediately
2399        * and no buffers are queued anyway. */
2400       ret = gst_audio_decoder_push_event (dec, event);
2401       break;
2402 
2403     case GST_EVENT_SEGMENT_DONE:
2404       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2405       gst_audio_decoder_drain (dec);
2406       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2407 
2408       /* Forward SEGMENT_DONE because no buffer or serialized event might come after
2409        * SEGMENT_DONE and nothing could trigger another _finish_frame() call. */
2410       if (dec->priv->pending_events)
2411         send_pending_events (dec);
2412       ret = gst_audio_decoder_push_event (dec, event);
2413       break;
2414 
2415     case GST_EVENT_EOS:
2416       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2417       gst_audio_decoder_drain (dec);
2418       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2419 
2420       if (dec->priv->ctx.had_input_data && !dec->priv->ctx.had_output_data) {
2421         GST_ELEMENT_ERROR (dec, STREAM, DECODE,
2422             ("No valid frames decoded before end of stream"),
2423             ("no valid frames found"));
2424       }
2425 
2426       /* Forward EOS because no buffer or serialized event will come after
2427        * EOS and nothing could trigger another _finish_frame() call. */
2428       if (dec->priv->pending_events)
2429         send_pending_events (dec);
2430       ret = gst_audio_decoder_push_event (dec, event);
2431       break;
2432 
2433     case GST_EVENT_CAPS:
2434     {
2435       GstCaps *caps;
2436 
2437       gst_event_parse_caps (event, &caps);
2438       ret = gst_audio_decoder_sink_setcaps (dec, caps);
2439       gst_event_unref (event);
2440       break;
2441     }
2442     case GST_EVENT_TAG:
2443     {
2444       GstTagList *tags;
2445 
2446       gst_event_parse_tag (event, &tags);
2447 
2448       if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
2449         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2450         if (dec->priv->upstream_tags != tags) {
2451           if (dec->priv->upstream_tags)
2452             gst_tag_list_unref (dec->priv->upstream_tags);
2453           dec->priv->upstream_tags = gst_tag_list_ref (tags);
2454           GST_INFO_OBJECT (dec, "upstream stream tags: %" GST_PTR_FORMAT, tags);
2455         }
2456         gst_event_unref (event);
2457         event = gst_audio_decoder_create_merged_tags_event (dec);
2458         dec->priv->taglist_changed = FALSE;
2459         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2460 
2461         /* No tags, go out of here instead of fall through */
2462         if (!event) {
2463           ret = TRUE;
2464           break;
2465         }
2466       }
2467 
2468       /* fall through */
2469     }
2470     default:
2471       if (!GST_EVENT_IS_SERIALIZED (event)) {
2472         ret =
2473             gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2474       } else {
2475         GST_DEBUG_OBJECT (dec, "Enqueuing event %d, %s", GST_EVENT_TYPE (event),
2476             GST_EVENT_TYPE_NAME (event));
2477         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2478         dec->priv->pending_events =
2479             g_list_append (dec->priv->pending_events, event);
2480         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2481         ret = TRUE;
2482       }
2483       break;
2484   }
2485   return ret;
2486 }
2487 
2488 static gboolean
gst_audio_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2489 gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
2490     GstEvent * event)
2491 {
2492   GstAudioDecoder *dec;
2493   GstAudioDecoderClass *klass;
2494   gboolean ret;
2495 
2496   dec = GST_AUDIO_DECODER (parent);
2497   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2498 
2499   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2500       GST_EVENT_TYPE_NAME (event));
2501 
2502   if (klass->sink_event)
2503     ret = klass->sink_event (dec, event);
2504   else {
2505     gst_event_unref (event);
2506     ret = FALSE;
2507   }
2508   return ret;
2509 }
2510 
2511 static gboolean
gst_audio_decoder_do_seek(GstAudioDecoder * dec,GstEvent * event)2512 gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event)
2513 {
2514   GstSeekFlags flags;
2515   GstSeekType start_type, end_type;
2516   GstFormat format;
2517   gdouble rate;
2518   gint64 start, start_time, end_time;
2519   GstSegment seek_segment;
2520   guint32 seqnum;
2521 
2522   gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
2523       &start_time, &end_type, &end_time);
2524 
2525   /* we'll handle plain open-ended flushing seeks with the simple approach */
2526   if (rate != 1.0) {
2527     GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
2528     return FALSE;
2529   }
2530 
2531   if (start_type != GST_SEEK_TYPE_SET) {
2532     GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
2533     return FALSE;
2534   }
2535 
2536   if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
2537       (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
2538     GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
2539     return FALSE;
2540   }
2541 
2542   if (!(flags & GST_SEEK_FLAG_FLUSH)) {
2543     GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
2544     return FALSE;
2545   }
2546 
2547   memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
2548   gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
2549       start_time, end_type, end_time, NULL);
2550   start_time = seek_segment.position;
2551 
2552   if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
2553           GST_FORMAT_BYTES, &start)) {
2554     GST_DEBUG_OBJECT (dec, "conversion failed");
2555     return FALSE;
2556   }
2557 
2558   seqnum = gst_event_get_seqnum (event);
2559   event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
2560       GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
2561   gst_event_set_seqnum (event, seqnum);
2562 
2563   GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
2564       G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
2565 
2566   return gst_pad_push_event (dec->sinkpad, event);
2567 }
2568 
2569 static gboolean
gst_audio_decoder_src_eventfunc(GstAudioDecoder * dec,GstEvent * event)2570 gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2571 {
2572   gboolean res;
2573 
2574   switch (GST_EVENT_TYPE (event)) {
2575     case GST_EVENT_SEEK:
2576     {
2577       GstFormat format;
2578       gdouble rate;
2579       GstSeekFlags flags;
2580       GstSeekType start_type, stop_type;
2581       gint64 start, stop;
2582       gint64 tstart, tstop;
2583       guint32 seqnum;
2584 
2585       gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
2586           &stop_type, &stop);
2587       seqnum = gst_event_get_seqnum (event);
2588 
2589       /* upstream gets a chance first */
2590       if ((res = gst_pad_push_event (dec->sinkpad, event)))
2591         break;
2592 
2593       /* if upstream fails for a time seek, maybe we can help if allowed */
2594       if (format == GST_FORMAT_TIME) {
2595         if (gst_audio_decoder_do_byte (dec))
2596           res = gst_audio_decoder_do_seek (dec, event);
2597         break;
2598       }
2599 
2600       /* ... though a non-time seek can be aided as well */
2601       /* First bring the requested format to time */
2602       if (!(res =
2603               gst_pad_query_convert (dec->srcpad, format, start,
2604                   GST_FORMAT_TIME, &tstart)))
2605         goto convert_error;
2606       if (!(res =
2607               gst_pad_query_convert (dec->srcpad, format, stop, GST_FORMAT_TIME,
2608                   &tstop)))
2609         goto convert_error;
2610 
2611       /* then seek with time on the peer */
2612       event = gst_event_new_seek (rate, GST_FORMAT_TIME,
2613           flags, start_type, tstart, stop_type, tstop);
2614       gst_event_set_seqnum (event, seqnum);
2615 
2616       res = gst_pad_push_event (dec->sinkpad, event);
2617       break;
2618     }
2619     default:
2620       res = gst_pad_event_default (dec->srcpad, GST_OBJECT_CAST (dec), event);
2621       break;
2622   }
2623 done:
2624   return res;
2625 
2626   /* ERRORS */
2627 convert_error:
2628   {
2629     GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek");
2630     goto done;
2631   }
2632 }
2633 
2634 static gboolean
gst_audio_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2635 gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2636 {
2637   GstAudioDecoder *dec;
2638   GstAudioDecoderClass *klass;
2639   gboolean ret;
2640 
2641   dec = GST_AUDIO_DECODER (parent);
2642   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2643 
2644   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2645       GST_EVENT_TYPE_NAME (event));
2646 
2647   if (klass->src_event)
2648     ret = klass->src_event (dec, event);
2649   else {
2650     gst_event_unref (event);
2651     ret = FALSE;
2652   }
2653 
2654   return ret;
2655 }
2656 
2657 static gboolean
gst_audio_decoder_decide_allocation_default(GstAudioDecoder * dec,GstQuery * query)2658 gst_audio_decoder_decide_allocation_default (GstAudioDecoder * dec,
2659     GstQuery * query)
2660 {
2661   GstAllocator *allocator = NULL;
2662   GstAllocationParams params;
2663   gboolean update_allocator;
2664 
2665   /* we got configuration from our peer or the decide_allocation method,
2666    * parse them */
2667   if (gst_query_get_n_allocation_params (query) > 0) {
2668     /* try the allocator */
2669     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
2670     update_allocator = TRUE;
2671   } else {
2672     allocator = NULL;
2673     gst_allocation_params_init (&params);
2674     update_allocator = FALSE;
2675   }
2676 
2677   if (update_allocator)
2678     gst_query_set_nth_allocation_param (query, 0, allocator, &params);
2679   else
2680     gst_query_add_allocation_param (query, allocator, &params);
2681   if (allocator)
2682     gst_object_unref (allocator);
2683 
2684   return TRUE;
2685 }
2686 
2687 static gboolean
gst_audio_decoder_propose_allocation_default(GstAudioDecoder * dec,GstQuery * query)2688 gst_audio_decoder_propose_allocation_default (GstAudioDecoder * dec,
2689     GstQuery * query)
2690 {
2691   return TRUE;
2692 }
2693 
2694 /**
2695  * gst_audio_decoder_proxy_getcaps:
2696  * @decoder: a #GstAudioDecoder
2697  * @caps: (allow-none): initial caps
2698  * @filter: (allow-none): filter caps
2699  *
2700  * Returns caps that express @caps (or sink template caps if @caps == NULL)
2701  * restricted to rate/channels/... combinations supported by downstream
2702  * elements.
2703  *
2704  * Returns: (transfer full): a #GstCaps owned by caller
2705  *
2706  * Since: 1.6
2707  */
2708 GstCaps *
gst_audio_decoder_proxy_getcaps(GstAudioDecoder * decoder,GstCaps * caps,GstCaps * filter)2709 gst_audio_decoder_proxy_getcaps (GstAudioDecoder * decoder, GstCaps * caps,
2710     GstCaps * filter)
2711 {
2712   return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2713       GST_AUDIO_DECODER_SINK_PAD (decoder),
2714       GST_AUDIO_DECODER_SRC_PAD (decoder), caps, filter);
2715 }
2716 
2717 static GstCaps *
gst_audio_decoder_sink_getcaps(GstAudioDecoder * decoder,GstCaps * filter)2718 gst_audio_decoder_sink_getcaps (GstAudioDecoder * decoder, GstCaps * filter)
2719 {
2720   GstAudioDecoderClass *klass;
2721   GstCaps *caps;
2722 
2723   klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
2724 
2725   if (klass->getcaps)
2726     caps = klass->getcaps (decoder, filter);
2727   else
2728     caps = gst_audio_decoder_proxy_getcaps (decoder, NULL, filter);
2729 
2730   GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2731 
2732   return caps;
2733 }
2734 
2735 static gboolean
gst_audio_decoder_sink_query_default(GstAudioDecoder * dec,GstQuery * query)2736 gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, GstQuery * query)
2737 {
2738   GstPad *pad = GST_AUDIO_DECODER_SINK_PAD (dec);
2739   gboolean res = FALSE;
2740 
2741   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2742 
2743   switch (GST_QUERY_TYPE (query)) {
2744     case GST_QUERY_FORMATS:
2745     {
2746       gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
2747       res = TRUE;
2748       break;
2749     }
2750     case GST_QUERY_CONVERT:
2751     {
2752       GstFormat src_fmt, dest_fmt;
2753       gint64 src_val, dest_val;
2754 
2755       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2756       GST_OBJECT_LOCK (dec);
2757       res = __gst_audio_encoded_audio_convert (&dec->priv->ctx.info,
2758           dec->priv->bytes_in, dec->priv->samples_out,
2759           src_fmt, src_val, &dest_fmt, &dest_val);
2760       GST_OBJECT_UNLOCK (dec);
2761       if (!res)
2762         goto error;
2763       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2764       break;
2765     }
2766     case GST_QUERY_ALLOCATION:
2767     {
2768       GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2769 
2770       if (klass->propose_allocation)
2771         res = klass->propose_allocation (dec, query);
2772       break;
2773     }
2774     case GST_QUERY_CAPS:{
2775       GstCaps *filter, *caps;
2776 
2777       gst_query_parse_caps (query, &filter);
2778       caps = gst_audio_decoder_sink_getcaps (dec, filter);
2779       gst_query_set_caps_result (query, caps);
2780       gst_caps_unref (caps);
2781       res = TRUE;
2782       break;
2783     }
2784     case GST_QUERY_ACCEPT_CAPS:{
2785       if (dec->priv->use_default_pad_acceptcaps) {
2786         res =
2787             gst_pad_query_default (GST_AUDIO_DECODER_SINK_PAD (dec),
2788             GST_OBJECT_CAST (dec), query);
2789       } else {
2790         GstCaps *caps;
2791         GstCaps *allowed_caps;
2792         GstCaps *template_caps;
2793         gboolean accept;
2794 
2795         gst_query_parse_accept_caps (query, &caps);
2796 
2797         template_caps = gst_pad_get_pad_template_caps (pad);
2798         accept = gst_caps_is_subset (caps, template_caps);
2799         gst_caps_unref (template_caps);
2800 
2801         if (accept) {
2802           allowed_caps = gst_pad_query_caps (GST_AUDIO_DECODER_SINK_PAD (dec),
2803               caps);
2804 
2805           accept = gst_caps_can_intersect (caps, allowed_caps);
2806 
2807           gst_caps_unref (allowed_caps);
2808         }
2809 
2810         gst_query_set_accept_caps_result (query, accept);
2811         res = TRUE;
2812       }
2813       break;
2814     }
2815     case GST_QUERY_SEEKING:
2816     {
2817       GstFormat format;
2818 
2819       /* non-TIME segments are discarded, so we won't seek that way either */
2820       gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
2821       if (format != GST_FORMAT_TIME) {
2822         GST_DEBUG_OBJECT (dec, "discarding non-TIME SEEKING query");
2823         res = FALSE;
2824         break;
2825       }
2826       /* fall-through */
2827     }
2828     default:
2829       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
2830       break;
2831   }
2832 
2833 error:
2834   return res;
2835 }
2836 
2837 static gboolean
gst_audio_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2838 gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
2839     GstQuery * query)
2840 {
2841   GstAudioDecoderClass *dec_class;
2842   GstAudioDecoder *dec;
2843   gboolean ret = FALSE;
2844 
2845   dec = GST_AUDIO_DECODER (parent);
2846   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
2847 
2848   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
2849 
2850   if (dec_class->sink_query)
2851     ret = dec_class->sink_query (dec, query);
2852 
2853   return ret;
2854 }
2855 
2856 /* FIXME ? are any of these queries (other than latency) a decoder's business ??
2857  * also, the conversion stuff might seem to make sense, but seems to not mind
2858  * segment stuff etc at all
2859  * Supposedly that's backward compatibility ... */
2860 static gboolean
gst_audio_decoder_src_query_default(GstAudioDecoder * dec,GstQuery * query)2861 gst_audio_decoder_src_query_default (GstAudioDecoder * dec, GstQuery * query)
2862 {
2863   GstPad *pad = GST_AUDIO_DECODER_SRC_PAD (dec);
2864   gboolean res = FALSE;
2865 
2866   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2867 
2868   switch (GST_QUERY_TYPE (query)) {
2869     case GST_QUERY_DURATION:
2870     {
2871       GstFormat format;
2872 
2873       /* upstream in any case */
2874       if ((res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query)))
2875         break;
2876 
2877       gst_query_parse_duration (query, &format, NULL);
2878       /* try answering TIME by converting from BYTE if subclass allows  */
2879       if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) {
2880         gint64 value;
2881 
2882         if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2883                 &value)) {
2884           GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2885           if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value,
2886                   GST_FORMAT_TIME, &value)) {
2887             gst_query_set_duration (query, GST_FORMAT_TIME, value);
2888             res = TRUE;
2889           }
2890         }
2891       }
2892       break;
2893     }
2894     case GST_QUERY_POSITION:
2895     {
2896       GstFormat format;
2897       gint64 time, value;
2898 
2899       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
2900         GST_LOG_OBJECT (dec, "returning peer response");
2901         break;
2902       }
2903 
2904       /* Refuse BYTES format queries. If it made sense to
2905        * answer them, upstream would have already */
2906       gst_query_parse_position (query, &format, NULL);
2907 
2908       if (format == GST_FORMAT_BYTES) {
2909         GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
2910         break;
2911       }
2912 
2913       /* we start from the last seen time */
2914       time = dec->output_segment.position;
2915       /* correct for the segment values */
2916       time =
2917           gst_segment_to_stream_time (&dec->output_segment, GST_FORMAT_TIME,
2918           time);
2919 
2920       GST_LOG_OBJECT (dec,
2921           "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
2922 
2923       /* and convert to the final format */
2924       if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
2925                   format, &value)))
2926         break;
2927 
2928       gst_query_set_position (query, format, value);
2929 
2930       GST_LOG_OBJECT (dec,
2931           "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
2932           format);
2933       break;
2934     }
2935     case GST_QUERY_FORMATS:
2936     {
2937       gst_query_set_formats (query, 3,
2938           GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
2939       res = TRUE;
2940       break;
2941     }
2942     case GST_QUERY_CONVERT:
2943     {
2944       GstFormat src_fmt, dest_fmt;
2945       gint64 src_val, dest_val;
2946 
2947       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2948       GST_OBJECT_LOCK (dec);
2949       res = gst_audio_info_convert (&dec->priv->ctx.info,
2950           src_fmt, src_val, dest_fmt, &dest_val);
2951       GST_OBJECT_UNLOCK (dec);
2952       if (!res)
2953         break;
2954       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2955       break;
2956     }
2957     case GST_QUERY_LATENCY:
2958     {
2959       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
2960         gboolean live;
2961         GstClockTime min_latency, max_latency;
2962 
2963         gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2964         GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %"
2965             GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2966             GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2967 
2968         GST_OBJECT_LOCK (dec);
2969         /* add our latency */
2970         min_latency += dec->priv->ctx.min_latency;
2971         if (max_latency == -1 || dec->priv->ctx.max_latency == -1)
2972           max_latency = -1;
2973         else
2974           max_latency += dec->priv->ctx.max_latency;
2975         GST_OBJECT_UNLOCK (dec);
2976 
2977         gst_query_set_latency (query, live, min_latency, max_latency);
2978       }
2979       break;
2980     }
2981     default:
2982       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
2983       break;
2984   }
2985 
2986   return res;
2987 }
2988 
2989 static gboolean
gst_audio_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2990 gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2991 {
2992   GstAudioDecoder *dec;
2993   GstAudioDecoderClass *dec_class;
2994   gboolean ret = FALSE;
2995 
2996   dec = GST_AUDIO_DECODER (parent);
2997   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
2998 
2999   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
3000 
3001   if (dec_class->src_query)
3002     ret = dec_class->src_query (dec, query);
3003 
3004   return ret;
3005 }
3006 
3007 static gboolean
gst_audio_decoder_stop(GstAudioDecoder * dec)3008 gst_audio_decoder_stop (GstAudioDecoder * dec)
3009 {
3010   GstAudioDecoderClass *klass;
3011   gboolean ret = TRUE;
3012 
3013   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop");
3014 
3015   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3016 
3017   if (klass->stop) {
3018     ret = klass->stop (dec);
3019   }
3020 
3021   /* clean up */
3022   gst_audio_decoder_reset (dec, TRUE);
3023 
3024   if (ret)
3025     dec->priv->active = FALSE;
3026 
3027   return ret;
3028 }
3029 
3030 static gboolean
gst_audio_decoder_start(GstAudioDecoder * dec)3031 gst_audio_decoder_start (GstAudioDecoder * dec)
3032 {
3033   GstAudioDecoderClass *klass;
3034   gboolean ret = TRUE;
3035 
3036   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start");
3037 
3038   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3039 
3040   /* arrange clean state */
3041   gst_audio_decoder_reset (dec, TRUE);
3042 
3043   if (klass->start) {
3044     ret = klass->start (dec);
3045   }
3046 
3047   if (ret)
3048     dec->priv->active = TRUE;
3049 
3050   return ret;
3051 }
3052 
3053 static void
gst_audio_decoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)3054 gst_audio_decoder_get_property (GObject * object, guint prop_id,
3055     GValue * value, GParamSpec * pspec)
3056 {
3057   GstAudioDecoder *dec;
3058 
3059   dec = GST_AUDIO_DECODER (object);
3060 
3061   switch (prop_id) {
3062     case PROP_LATENCY:
3063       g_value_set_int64 (value, dec->priv->latency);
3064       break;
3065     case PROP_TOLERANCE:
3066       g_value_set_int64 (value, dec->priv->tolerance);
3067       break;
3068     case PROP_PLC:
3069       g_value_set_boolean (value, dec->priv->plc);
3070       break;
3071     default:
3072       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3073       break;
3074   }
3075 }
3076 
3077 static void
gst_audio_decoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)3078 gst_audio_decoder_set_property (GObject * object, guint prop_id,
3079     const GValue * value, GParamSpec * pspec)
3080 {
3081   GstAudioDecoder *dec;
3082 
3083   dec = GST_AUDIO_DECODER (object);
3084 
3085   switch (prop_id) {
3086     case PROP_LATENCY:
3087       dec->priv->latency = g_value_get_int64 (value);
3088       break;
3089     case PROP_TOLERANCE:
3090       dec->priv->tolerance = g_value_get_int64 (value);
3091       break;
3092     case PROP_PLC:
3093       dec->priv->plc = g_value_get_boolean (value);
3094       break;
3095     default:
3096       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3097       break;
3098   }
3099 }
3100 
3101 static GstStateChangeReturn
gst_audio_decoder_change_state(GstElement * element,GstStateChange transition)3102 gst_audio_decoder_change_state (GstElement * element, GstStateChange transition)
3103 {
3104   GstAudioDecoder *codec;
3105   GstAudioDecoderClass *klass;
3106   GstStateChangeReturn ret;
3107 
3108   codec = GST_AUDIO_DECODER (element);
3109   klass = GST_AUDIO_DECODER_GET_CLASS (codec);
3110 
3111   switch (transition) {
3112     case GST_STATE_CHANGE_NULL_TO_READY:
3113       if (klass->open) {
3114         if (!klass->open (codec))
3115           goto open_failed;
3116       }
3117       break;
3118     case GST_STATE_CHANGE_READY_TO_PAUSED:
3119       if (!gst_audio_decoder_start (codec)) {
3120         goto start_failed;
3121       }
3122       break;
3123     case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
3124       break;
3125     default:
3126       break;
3127   }
3128 
3129   ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
3130 
3131   switch (transition) {
3132     case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
3133       break;
3134     case GST_STATE_CHANGE_PAUSED_TO_READY:
3135       if (!gst_audio_decoder_stop (codec)) {
3136         goto stop_failed;
3137       }
3138       break;
3139     case GST_STATE_CHANGE_READY_TO_NULL:
3140       if (klass->close) {
3141         if (!klass->close (codec))
3142           goto close_failed;
3143       }
3144       break;
3145     default:
3146       break;
3147   }
3148 
3149   return ret;
3150 
3151 start_failed:
3152   {
3153     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec"));
3154     return GST_STATE_CHANGE_FAILURE;
3155   }
3156 stop_failed:
3157   {
3158     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec"));
3159     return GST_STATE_CHANGE_FAILURE;
3160   }
3161 open_failed:
3162   {
3163     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to open codec"));
3164     return GST_STATE_CHANGE_FAILURE;
3165   }
3166 close_failed:
3167   {
3168     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to close codec"));
3169     return GST_STATE_CHANGE_FAILURE;
3170   }
3171 }
3172 
3173 GstFlowReturn
_gst_audio_decoder_error(GstAudioDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)3174 _gst_audio_decoder_error (GstAudioDecoder * dec, gint weight,
3175     GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
3176     const gchar * function, gint line)
3177 {
3178   if (txt)
3179     GST_WARNING_OBJECT (dec, "error: %s", txt);
3180   if (dbg)
3181     GST_WARNING_OBJECT (dec, "error: %s", dbg);
3182   dec->priv->error_count += weight;
3183   dec->priv->discont = TRUE;
3184   if (dec->priv->ctx.max_errors >= 0
3185       && dec->priv->ctx.max_errors < dec->priv->error_count) {
3186     gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, domain,
3187         code, txt, dbg, file, function, line);
3188     return GST_FLOW_ERROR;
3189   } else {
3190     g_free (txt);
3191     g_free (dbg);
3192     return GST_FLOW_OK;
3193   }
3194 }
3195 
3196 /**
3197  * gst_audio_decoder_get_audio_info:
3198  * @dec: a #GstAudioDecoder
3199  *
3200  * Returns: a #GstAudioInfo describing the input audio format
3201  */
3202 GstAudioInfo *
gst_audio_decoder_get_audio_info(GstAudioDecoder * dec)3203 gst_audio_decoder_get_audio_info (GstAudioDecoder * dec)
3204 {
3205   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL);
3206 
3207   return &dec->priv->ctx.info;
3208 }
3209 
3210 /**
3211  * gst_audio_decoder_set_plc_aware:
3212  * @dec: a #GstAudioDecoder
3213  * @plc: new plc state
3214  *
3215  * Indicates whether or not subclass handles packet loss concealment (plc).
3216  */
3217 void
gst_audio_decoder_set_plc_aware(GstAudioDecoder * dec,gboolean plc)3218 gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc)
3219 {
3220   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3221 
3222   dec->priv->ctx.do_plc = plc;
3223 }
3224 
3225 /**
3226  * gst_audio_decoder_get_plc_aware:
3227  * @dec: a #GstAudioDecoder
3228  *
3229  * Returns: currently configured plc handling
3230  */
3231 gint
gst_audio_decoder_get_plc_aware(GstAudioDecoder * dec)3232 gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec)
3233 {
3234   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3235 
3236   return dec->priv->ctx.do_plc;
3237 }
3238 
3239 /**
3240  * gst_audio_decoder_set_estimate_rate:
3241  * @dec: a #GstAudioDecoder
3242  * @enabled: whether to enable byte to time conversion
3243  *
3244  * Allows baseclass to perform byte to time estimated conversion.
3245  */
3246 void
gst_audio_decoder_set_estimate_rate(GstAudioDecoder * dec,gboolean enabled)3247 gst_audio_decoder_set_estimate_rate (GstAudioDecoder * dec, gboolean enabled)
3248 {
3249   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3250 
3251   dec->priv->ctx.do_estimate_rate = enabled;
3252 }
3253 
3254 /**
3255  * gst_audio_decoder_get_estimate_rate:
3256  * @dec: a #GstAudioDecoder
3257  *
3258  * Returns: currently configured byte to time conversion setting
3259  */
3260 gint
gst_audio_decoder_get_estimate_rate(GstAudioDecoder * dec)3261 gst_audio_decoder_get_estimate_rate (GstAudioDecoder * dec)
3262 {
3263   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3264 
3265   return dec->priv->ctx.do_estimate_rate;
3266 }
3267 
3268 /**
3269  * gst_audio_decoder_get_delay:
3270  * @dec: a #GstAudioDecoder
3271  *
3272  * Returns: currently configured decoder delay
3273  */
3274 gint
gst_audio_decoder_get_delay(GstAudioDecoder * dec)3275 gst_audio_decoder_get_delay (GstAudioDecoder * dec)
3276 {
3277   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3278 
3279   return dec->priv->ctx.delay;
3280 }
3281 
3282 /**
3283  * gst_audio_decoder_set_max_errors:
3284  * @dec: a #GstAudioDecoder
3285  * @num: max tolerated errors
3286  *
3287  * Sets numbers of tolerated decoder errors, where a tolerated one is then only
3288  * warned about, but more than tolerated will lead to fatal error. You can set
3289  * -1 for never returning fatal errors. Default is set to
3290  * GST_AUDIO_DECODER_MAX_ERRORS.
3291  */
3292 void
gst_audio_decoder_set_max_errors(GstAudioDecoder * dec,gint num)3293 gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num)
3294 {
3295   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3296 
3297   dec->priv->ctx.max_errors = num;
3298 }
3299 
3300 /**
3301  * gst_audio_decoder_get_max_errors:
3302  * @dec: a #GstAudioDecoder
3303  *
3304  * Returns: currently configured decoder tolerated error count.
3305  */
3306 gint
gst_audio_decoder_get_max_errors(GstAudioDecoder * dec)3307 gst_audio_decoder_get_max_errors (GstAudioDecoder * dec)
3308 {
3309   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3310 
3311   return dec->priv->ctx.max_errors;
3312 }
3313 
3314 /**
3315  * gst_audio_decoder_set_latency:
3316  * @dec: a #GstAudioDecoder
3317  * @min: minimum latency
3318  * @max: maximum latency
3319  *
3320  * Sets decoder latency.
3321  */
3322 void
gst_audio_decoder_set_latency(GstAudioDecoder * dec,GstClockTime min,GstClockTime max)3323 gst_audio_decoder_set_latency (GstAudioDecoder * dec,
3324     GstClockTime min, GstClockTime max)
3325 {
3326   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3327   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
3328   g_return_if_fail (min <= max);
3329 
3330   GST_OBJECT_LOCK (dec);
3331   dec->priv->ctx.min_latency = min;
3332   dec->priv->ctx.max_latency = max;
3333   GST_OBJECT_UNLOCK (dec);
3334 
3335   /* post latency message on the bus */
3336   gst_element_post_message (GST_ELEMENT (dec),
3337       gst_message_new_latency (GST_OBJECT (dec)));
3338 }
3339 
3340 /**
3341  * gst_audio_decoder_get_latency:
3342  * @dec: a #GstAudioDecoder
3343  * @min: (out) (allow-none): a pointer to storage to hold minimum latency
3344  * @max: (out) (allow-none): a pointer to storage to hold maximum latency
3345  *
3346  * Sets the variables pointed to by @min and @max to the currently configured
3347  * latency.
3348  */
3349 void
gst_audio_decoder_get_latency(GstAudioDecoder * dec,GstClockTime * min,GstClockTime * max)3350 gst_audio_decoder_get_latency (GstAudioDecoder * dec,
3351     GstClockTime * min, GstClockTime * max)
3352 {
3353   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3354 
3355   GST_OBJECT_LOCK (dec);
3356   if (min)
3357     *min = dec->priv->ctx.min_latency;
3358   if (max)
3359     *max = dec->priv->ctx.max_latency;
3360   GST_OBJECT_UNLOCK (dec);
3361 }
3362 
3363 /**
3364  * gst_audio_decoder_get_parse_state:
3365  * @dec: a #GstAudioDecoder
3366  * @sync: (out) (optional): a pointer to a variable to hold the current sync state
3367  * @eos: (out) (optional): a pointer to a variable to hold the current eos state
3368  *
3369  * Return current parsing (sync and eos) state.
3370  */
3371 void
gst_audio_decoder_get_parse_state(GstAudioDecoder * dec,gboolean * sync,gboolean * eos)3372 gst_audio_decoder_get_parse_state (GstAudioDecoder * dec,
3373     gboolean * sync, gboolean * eos)
3374 {
3375   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3376 
3377   if (sync)
3378     *sync = dec->priv->ctx.sync;
3379   if (eos)
3380     *eos = dec->priv->ctx.eos;
3381 }
3382 
3383 /**
3384  * gst_audio_decoder_set_allocation_caps:
3385  * @dec: a #GstAudioDecoder
3386  * @allocation_caps: (allow-none): a #GstCaps or %NULL
3387  *
3388  * Sets a caps in allocation query which are different from the set
3389  * pad's caps. Use this function before calling
3390  * gst_audio_decoder_negotiate(). Setting to %NULL the allocation
3391  * query will use the caps from the pad.
3392  *
3393  * Since: 1.10
3394  */
3395 void
gst_audio_decoder_set_allocation_caps(GstAudioDecoder * dec,GstCaps * allocation_caps)3396 gst_audio_decoder_set_allocation_caps (GstAudioDecoder * dec,
3397     GstCaps * allocation_caps)
3398 {
3399   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3400 
3401   gst_caps_replace (&dec->priv->ctx.allocation_caps, allocation_caps);
3402 }
3403 
3404 /**
3405  * gst_audio_decoder_set_plc:
3406  * @dec: a #GstAudioDecoder
3407  * @enabled: new state
3408  *
3409  * Enable or disable decoder packet loss concealment, provided subclass
3410  * and codec are capable and allow handling plc.
3411  *
3412  * MT safe.
3413  */
3414 void
gst_audio_decoder_set_plc(GstAudioDecoder * dec,gboolean enabled)3415 gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled)
3416 {
3417   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3418 
3419   GST_LOG_OBJECT (dec, "enabled: %d", enabled);
3420 
3421   GST_OBJECT_LOCK (dec);
3422   dec->priv->plc = enabled;
3423   GST_OBJECT_UNLOCK (dec);
3424 }
3425 
3426 /**
3427  * gst_audio_decoder_get_plc:
3428  * @dec: a #GstAudioDecoder
3429  *
3430  * Queries decoder packet loss concealment handling.
3431  *
3432  * Returns: TRUE if packet loss concealment is enabled.
3433  *
3434  * MT safe.
3435  */
3436 gboolean
gst_audio_decoder_get_plc(GstAudioDecoder * dec)3437 gst_audio_decoder_get_plc (GstAudioDecoder * dec)
3438 {
3439   gboolean result;
3440 
3441   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3442 
3443   GST_OBJECT_LOCK (dec);
3444   result = dec->priv->plc;
3445   GST_OBJECT_UNLOCK (dec);
3446 
3447   return result;
3448 }
3449 
3450 /**
3451  * gst_audio_decoder_set_min_latency:
3452  * @dec: a #GstAudioDecoder
3453  * @num: new minimum latency
3454  *
3455  * Sets decoder minimum aggregation latency.
3456  *
3457  * MT safe.
3458  */
3459 void
gst_audio_decoder_set_min_latency(GstAudioDecoder * dec,GstClockTime num)3460 gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, GstClockTime num)
3461 {
3462   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3463 
3464   GST_OBJECT_LOCK (dec);
3465   dec->priv->latency = num;
3466   GST_OBJECT_UNLOCK (dec);
3467 }
3468 
3469 /**
3470  * gst_audio_decoder_get_min_latency:
3471  * @dec: a #GstAudioDecoder
3472  *
3473  * Queries decoder's latency aggregation.
3474  *
3475  * Returns: aggregation latency.
3476  *
3477  * MT safe.
3478  */
3479 GstClockTime
gst_audio_decoder_get_min_latency(GstAudioDecoder * dec)3480 gst_audio_decoder_get_min_latency (GstAudioDecoder * dec)
3481 {
3482   GstClockTime result;
3483 
3484   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3485 
3486   GST_OBJECT_LOCK (dec);
3487   result = dec->priv->latency;
3488   GST_OBJECT_UNLOCK (dec);
3489 
3490   return result;
3491 }
3492 
3493 /**
3494  * gst_audio_decoder_set_tolerance:
3495  * @dec: a #GstAudioDecoder
3496  * @tolerance: new tolerance
3497  *
3498  * Configures decoder audio jitter tolerance threshold.
3499  *
3500  * MT safe.
3501  */
3502 void
gst_audio_decoder_set_tolerance(GstAudioDecoder * dec,GstClockTime tolerance)3503 gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, GstClockTime tolerance)
3504 {
3505   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3506 
3507   GST_OBJECT_LOCK (dec);
3508   dec->priv->tolerance = tolerance;
3509   GST_OBJECT_UNLOCK (dec);
3510 }
3511 
3512 /**
3513  * gst_audio_decoder_get_tolerance:
3514  * @dec: a #GstAudioDecoder
3515  *
3516  * Queries current audio jitter tolerance threshold.
3517  *
3518  * Returns: decoder audio jitter tolerance threshold.
3519  *
3520  * MT safe.
3521  */
3522 GstClockTime
gst_audio_decoder_get_tolerance(GstAudioDecoder * dec)3523 gst_audio_decoder_get_tolerance (GstAudioDecoder * dec)
3524 {
3525   GstClockTime result;
3526 
3527   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3528 
3529   GST_OBJECT_LOCK (dec);
3530   result = dec->priv->tolerance;
3531   GST_OBJECT_UNLOCK (dec);
3532 
3533   return result;
3534 }
3535 
3536 /**
3537  * gst_audio_decoder_set_drainable:
3538  * @dec: a #GstAudioDecoder
3539  * @enabled: new state
3540  *
3541  * Configures decoder drain handling.  If drainable, subclass might
3542  * be handed a NULL buffer to have it return any leftover decoded data.
3543  * Otherwise, it is not considered so capable and will only ever be passed
3544  * real data.
3545  *
3546  * MT safe.
3547  */
3548 void
gst_audio_decoder_set_drainable(GstAudioDecoder * dec,gboolean enabled)3549 gst_audio_decoder_set_drainable (GstAudioDecoder * dec, gboolean enabled)
3550 {
3551   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3552 
3553   GST_OBJECT_LOCK (dec);
3554   dec->priv->drainable = enabled;
3555   GST_OBJECT_UNLOCK (dec);
3556 }
3557 
3558 /**
3559  * gst_audio_decoder_get_drainable:
3560  * @dec: a #GstAudioDecoder
3561  *
3562  * Queries decoder drain handling.
3563  *
3564  * Returns: TRUE if drainable handling is enabled.
3565  *
3566  * MT safe.
3567  */
3568 gboolean
gst_audio_decoder_get_drainable(GstAudioDecoder * dec)3569 gst_audio_decoder_get_drainable (GstAudioDecoder * dec)
3570 {
3571   gboolean result;
3572 
3573   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3574 
3575   GST_OBJECT_LOCK (dec);
3576   result = dec->priv->drainable;
3577   GST_OBJECT_UNLOCK (dec);
3578 
3579   return result;
3580 }
3581 
3582 /**
3583  * gst_audio_decoder_set_needs_format:
3584  * @dec: a #GstAudioDecoder
3585  * @enabled: new state
3586  *
3587  * Configures decoder format needs.  If enabled, subclass needs to be
3588  * negotiated with format caps before it can process any data.  It will then
3589  * never be handed any data before it has been configured.
3590  * Otherwise, it might be handed data without having been configured and
3591  * is then expected being able to do so either by default
3592  * or based on the input data.
3593  *
3594  * MT safe.
3595  */
3596 void
gst_audio_decoder_set_needs_format(GstAudioDecoder * dec,gboolean enabled)3597 gst_audio_decoder_set_needs_format (GstAudioDecoder * dec, gboolean enabled)
3598 {
3599   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3600 
3601   GST_OBJECT_LOCK (dec);
3602   dec->priv->needs_format = enabled;
3603   GST_OBJECT_UNLOCK (dec);
3604 }
3605 
3606 /**
3607  * gst_audio_decoder_get_needs_format:
3608  * @dec: a #GstAudioDecoder
3609  *
3610  * Queries decoder required format handling.
3611  *
3612  * Returns: TRUE if required format handling is enabled.
3613  *
3614  * MT safe.
3615  */
3616 gboolean
gst_audio_decoder_get_needs_format(GstAudioDecoder * dec)3617 gst_audio_decoder_get_needs_format (GstAudioDecoder * dec)
3618 {
3619   gboolean result;
3620 
3621   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3622 
3623   GST_OBJECT_LOCK (dec);
3624   result = dec->priv->needs_format;
3625   GST_OBJECT_UNLOCK (dec);
3626 
3627   return result;
3628 }
3629 
3630 /**
3631  * gst_audio_decoder_merge_tags:
3632  * @dec: a #GstAudioDecoder
3633  * @tags: (allow-none): a #GstTagList to merge, or NULL
3634  * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
3635  *
3636  * Sets the audio decoder tags and how they should be merged with any
3637  * upstream stream tags. This will override any tags previously-set
3638  * with gst_audio_decoder_merge_tags().
3639  *
3640  * Note that this is provided for convenience, and the subclass is
3641  * not required to use this and can still do tag handling on its own.
3642  */
3643 void
gst_audio_decoder_merge_tags(GstAudioDecoder * dec,const GstTagList * tags,GstTagMergeMode mode)3644 gst_audio_decoder_merge_tags (GstAudioDecoder * dec,
3645     const GstTagList * tags, GstTagMergeMode mode)
3646 {
3647   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3648   g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
3649   g_return_if_fail (mode != GST_TAG_MERGE_UNDEFINED);
3650 
3651   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3652   if (dec->priv->taglist != tags) {
3653     if (dec->priv->taglist) {
3654       gst_tag_list_unref (dec->priv->taglist);
3655       dec->priv->taglist = NULL;
3656       dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
3657     }
3658     if (tags) {
3659       dec->priv->taglist = gst_tag_list_ref ((GstTagList *) tags);
3660       dec->priv->decoder_tags_merge_mode = mode;
3661     }
3662 
3663     GST_DEBUG_OBJECT (dec, "setting decoder tags to %" GST_PTR_FORMAT, tags);
3664     dec->priv->taglist_changed = TRUE;
3665   }
3666   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3667 }
3668 
3669 /**
3670  * gst_audio_decoder_allocate_output_buffer:
3671  * @dec: a #GstAudioDecoder
3672  * @size: size of the buffer
3673  *
3674  * Helper function that allocates a buffer to hold an audio frame
3675  * for @dec's current output format.
3676  *
3677  * Returns: (transfer full): allocated buffer
3678  */
3679 GstBuffer *
gst_audio_decoder_allocate_output_buffer(GstAudioDecoder * dec,gsize size)3680 gst_audio_decoder_allocate_output_buffer (GstAudioDecoder * dec, gsize size)
3681 {
3682   GstBuffer *buffer = NULL;
3683   gboolean needs_reconfigure = FALSE;
3684 
3685   g_return_val_if_fail (size > 0, NULL);
3686 
3687   GST_DEBUG ("alloc src buffer");
3688 
3689   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3690 
3691   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
3692   if (G_UNLIKELY (dec->priv->ctx.output_format_changed ||
3693           (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)
3694               && needs_reconfigure))) {
3695     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
3696       GST_INFO_OBJECT (dec, "Failed to negotiate, fallback allocation");
3697       gst_pad_mark_reconfigure (dec->srcpad);
3698       goto fallback;
3699     }
3700   }
3701 
3702   buffer =
3703       gst_buffer_new_allocate (dec->priv->ctx.allocator, size,
3704       &dec->priv->ctx.params);
3705   if (!buffer) {
3706     GST_INFO_OBJECT (dec, "couldn't allocate output buffer");
3707     goto fallback;
3708   }
3709 
3710   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3711 
3712   return buffer;
3713 fallback:
3714   buffer = gst_buffer_new_allocate (NULL, size, NULL);
3715   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3716 
3717   return buffer;
3718 }
3719 
3720 /**
3721  * gst_audio_decoder_get_allocator:
3722  * @dec: a #GstAudioDecoder
3723  * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
3724  * used
3725  * @params: (out) (allow-none) (transfer full): the
3726  * #GstAllocationParams of @allocator
3727  *
3728  * Lets #GstAudioDecoder sub-classes to know the memory @allocator
3729  * used by the base class and its @params.
3730  *
3731  * Unref the @allocator after use it.
3732  */
3733 void
gst_audio_decoder_get_allocator(GstAudioDecoder * dec,GstAllocator ** allocator,GstAllocationParams * params)3734 gst_audio_decoder_get_allocator (GstAudioDecoder * dec,
3735     GstAllocator ** allocator, GstAllocationParams * params)
3736 {
3737   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3738 
3739   if (allocator)
3740     *allocator = dec->priv->ctx.allocator ?
3741         gst_object_ref (dec->priv->ctx.allocator) : NULL;
3742 
3743   if (params)
3744     *params = dec->priv->ctx.params;
3745 }
3746 
3747 /**
3748  * gst_audio_decoder_set_use_default_pad_acceptcaps:
3749  * @decoder: a #GstAudioDecoder
3750  * @use: if the default pad accept-caps query handling should be used
3751  *
3752  * Lets #GstAudioDecoder sub-classes decide if they want the sink pad
3753  * to use the default pad query handler to reply to accept-caps queries.
3754  *
3755  * By setting this to true it is possible to further customize the default
3756  * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
3757  * %GST_PAD_SET_ACCEPT_TEMPLATE
3758  *
3759  * Since: 1.6
3760  */
3761 void
gst_audio_decoder_set_use_default_pad_acceptcaps(GstAudioDecoder * decoder,gboolean use)3762 gst_audio_decoder_set_use_default_pad_acceptcaps (GstAudioDecoder * decoder,
3763     gboolean use)
3764 {
3765   decoder->priv->use_default_pad_acceptcaps = use;
3766 }
3767