1 /* GStreamer
2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 *
7 * gstmultiqueue.c:
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25 /**
26 * SECTION:element-multiqueue
27 * @title: multiqueue
28 * @see_also: #GstQueue
29 *
30 * Multiqueue is similar to a normal #GstQueue with the following additional
31 * features:
32 *
33 * 1) Multiple streamhandling
34 *
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
40 *
41 * 2) Non-starvation on multiple stream
42 *
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
49 *
50 * 3) Non-linked srcpads graceful handling
51 *
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
60 * before it.
61 *
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
67 *
68 *
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
70 * currently unused.
71 *
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
75 * other queues.
76 *
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
79 * queues is filled.
80 * Both signals are emitted from the context of the streaming thread.
81 *
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
91 */
92
93 #ifdef HAVE_CONFIG_H
94 # include "config.h"
95 #endif
96
97 #include <gst/gst.h>
98 #include <stdio.h>
99 #include "gstmultiqueue.h"
100 #include <gst/glib-compat-private.h>
101
102 /**
103 * GstSingleQueue:
104 * @sinkpad: associated sink #GstPad
105 * @srcpad: associated source #GstPad
106 *
107 * Structure containing all information and properties about
108 * a single queue.
109 */
110 typedef struct _GstSingleQueue GstSingleQueue;
111
112 struct _GstSingleQueue
113 {
114 /* unique identifier of the queue */
115 guint id;
116 /* group of streams to which this queue belongs to */
117 guint groupid;
118 GstClockTimeDiff group_high_time;
119
120 GstMultiQueue *mqueue;
121
122 GstPad *sinkpad;
123 GstPad *srcpad;
124
125 /* flowreturn of previous srcpad push */
126 GstFlowReturn srcresult;
127 /* If something was actually pushed on
128 * this pad after flushing/pad activation
129 * and the srcresult corresponds to something
130 * real
131 */
132 gboolean pushed;
133
134 /* segments */
135 GstSegment sink_segment;
136 GstSegment src_segment;
137 gboolean has_src_segment; /* preferred over initializing the src_segment to
138 * UNDEFINED as this doesn't requires adding ifs
139 * in every segment usage */
140
141 /* position of src/sink */
142 GstClockTimeDiff sinktime, srctime;
143 /* cached input value, used for interleave */
144 GstClockTimeDiff cached_sinktime;
145 /* TRUE if either position needs to be recalculated */
146 gboolean sink_tainted, src_tainted;
147
148 /* queue of data */
149 GstDataQueue *queue;
150 GstDataQueueSize max_size, extra_size;
151 GstClockTime cur_time;
152 gboolean is_eos;
153 gboolean is_segment_done;
154 gboolean is_sparse;
155 gboolean flushing;
156 gboolean active;
157
158 /* Protected by global lock */
159 guint32 nextid; /* ID of the next object waiting to be pushed */
160 guint32 oldid; /* ID of the last object pushed (last in a series) */
161 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
162 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
163 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
164 GCond turn; /* SingleQueue turn waiting conditional */
165
166 /* for serialized queries */
167 GCond query_handled;
168 gboolean last_query;
169 GstQuery *last_handled_query;
170
171 /* For interleave calculation */
172 GThread *thread; /* Streaming thread of SingleQueue */
173 GstClockTime interleave; /* Calculated interleve within the thread */
174 };
175
176
177 /* Extension of GstDataQueueItem structure for our usage */
178 typedef struct _GstMultiQueueItem GstMultiQueueItem;
179
180 struct _GstMultiQueueItem
181 {
182 GstMiniObject *object;
183 guint size;
184 guint64 duration;
185 gboolean visible;
186
187 GDestroyNotify destroy;
188 guint32 posid;
189
190 gboolean is_query;
191 };
192
193 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
194 static void gst_single_queue_free (GstSingleQueue * squeue);
195
196 static void wake_up_next_non_linked (GstMultiQueue * mq);
197 static void compute_high_id (GstMultiQueue * mq);
198 static void compute_high_time (GstMultiQueue * mq, guint groupid);
199 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
200 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
201
202 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
203 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
204 static void recheck_buffering_status (GstMultiQueue * mq);
205
206 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
207
208 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
209
210 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
211 GST_PAD_SINK,
212 GST_PAD_REQUEST,
213 GST_STATIC_CAPS_ANY);
214
215 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
216 GST_PAD_SRC,
217 GST_PAD_SOMETIMES,
218 GST_STATIC_CAPS_ANY);
219
220 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
221 #define GST_CAT_DEFAULT (multi_queue_debug)
222
223 /* Signals and args */
224 enum
225 {
226 SIGNAL_UNDERRUN,
227 SIGNAL_OVERRUN,
228 LAST_SIGNAL
229 };
230
231 /* default limits, we try to keep up to 2 seconds of data and if there is not
232 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
233 * there is data in the queues. Normally, the byte and time limits are not hit
234 * in theses conditions. */
235 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
236 #define DEFAULT_MAX_SIZE_BUFFERS 5
237 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
238
239 /* second limits. When we hit one of the above limits we are probably dealing
240 * with a badly muxed file and we scale the limits to these emergency values.
241 * This is currently not yet implemented.
242 * Since we dynamically scale the queue buffer size up to the limits but avoid
243 * going above the max-size-buffers when we can, we don't really need this
244 * additional extra size. */
245 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
246 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
247 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
248
249 #define DEFAULT_USE_BUFFERING FALSE
250 #define DEFAULT_LOW_WATERMARK 0.01
251 #define DEFAULT_HIGH_WATERMARK 0.99
252 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
253 #define DEFAULT_USE_INTERLEAVE FALSE
254 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
255
256 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
257
258 enum
259 {
260 PROP_0,
261 PROP_EXTRA_SIZE_BYTES,
262 PROP_EXTRA_SIZE_BUFFERS,
263 PROP_EXTRA_SIZE_TIME,
264 PROP_MAX_SIZE_BYTES,
265 PROP_MAX_SIZE_BUFFERS,
266 PROP_MAX_SIZE_TIME,
267 PROP_USE_BUFFERING,
268 PROP_LOW_PERCENT,
269 PROP_HIGH_PERCENT,
270 PROP_LOW_WATERMARK,
271 PROP_HIGH_WATERMARK,
272 PROP_SYNC_BY_RUNNING_TIME,
273 PROP_USE_INTERLEAVE,
274 PROP_UNLINKED_CACHE_TIME,
275 PROP_MINIMUM_INTERLEAVE,
276 PROP_LAST
277 };
278
279 /* Explanation for buffer levels and percentages:
280 *
281 * The buffering_level functions here return a value in a normalized range
282 * that specifies the current fill level of a queue. The range goes from 0 to
283 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
284 *
285 * This is not to be confused with the buffering_percent value, which is
286 * a *relative* quantity - relative to the low/high watermarks.
287 * buffering_percent = 0% means overall buffering_level is at the low watermark.
288 * buffering_percent = 100% means overall buffering_level is at the high watermark.
289 * buffering_percent is used for determining if the fill level has reached
290 * the high watermark, and for producing BUFFERING messages. This value
291 * always uses a 0..100 range (since it is a percentage).
292 *
293 * To avoid future confusions, whenever "buffering level" is mentioned, it
294 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
295 * range. Whenever "buffering_percent" is mentioned, it refers to the
296 * percentage value that is relative to the low/high watermark. */
297
298 /* Using a buffering level range of 0..1000000 to allow for a
299 * resolution in ppm (1 ppm = 0.0001%) */
300 #define MAX_BUFFERING_LEVEL 1000000
301
302 /* How much 1% makes up in the buffer level range */
303 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
304
305 /* GstMultiQueuePad */
306
307 #define DEFAULT_PAD_GROUP_ID 0
308
309 enum
310 {
311 PROP_PAD_0,
312 PROP_PAD_GROUP_ID,
313 };
314
315 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
316 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
317 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
318 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
319 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
320 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
321
322 struct _GstMultiQueuePad
323 {
324 GstPad parent;
325
326 GstSingleQueue *sq;
327 };
328
329 struct _GstMultiQueuePadClass
330 {
331 GstPadClass parent_class;
332 };
333
334 GType gst_multiqueue_pad_get_type (void);
335
336 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
337 static void
gst_multiqueue_pad_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)338 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
339 GValue * value, GParamSpec * pspec)
340 {
341 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
342
343 switch (prop_id) {
344 case PROP_PAD_GROUP_ID:
345 if (pad->sq)
346 g_value_set_uint (value, pad->sq->groupid);
347 break;
348 default:
349 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
350 break;
351 }
352 }
353
354 static void
gst_multiqueue_pad_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)355 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
356 const GValue * value, GParamSpec * pspec)
357 {
358 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
359
360 switch (prop_id) {
361 case PROP_PAD_GROUP_ID:
362 GST_OBJECT_LOCK (pad);
363 if (pad->sq)
364 pad->sq->groupid = g_value_get_uint (value);
365 GST_OBJECT_UNLOCK (pad);
366 break;
367 default:
368 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
369 break;
370 }
371 }
372
373 static void
gst_multiqueue_pad_class_init(GstMultiQueuePadClass * klass)374 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
375 {
376 GObjectClass *gobject_class = (GObjectClass *) klass;
377
378 gobject_class->set_property = gst_multiqueue_pad_set_property;
379 gobject_class->get_property = gst_multiqueue_pad_get_property;
380
381 /**
382 * GstMultiQueuePad:group-id:
383 *
384 * Group to which this pad belongs.
385 *
386 * Since: 1.10
387 */
388 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
389 g_param_spec_uint ("group-id", "Group ID",
390 "Group to which this pad belongs", 0, G_MAXUINT32,
391 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
392 }
393
394 static void
gst_multiqueue_pad_init(GstMultiQueuePad * pad)395 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
396 {
397
398 }
399
400
401 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
402 g_mutex_lock (&q->qlock); \
403 } G_STMT_END
404
405 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
406 g_mutex_unlock (&q->qlock); \
407 } G_STMT_END
408
409 #define SET_PERCENT(mq, perc) G_STMT_START { \
410 if (perc != mq->buffering_percent) { \
411 mq->buffering_percent = perc; \
412 mq->buffering_percent_changed = TRUE; \
413 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
414 } \
415 } G_STMT_END
416
417 /* Convenience function */
418 static inline GstClockTimeDiff
my_segment_to_running_time(GstSegment * segment,GstClockTime val)419 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
420 {
421 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
422
423 if (GST_CLOCK_TIME_IS_VALID (val)) {
424 gboolean sign =
425 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
426 if (sign > 0)
427 res = val;
428 else if (sign < 0)
429 res = -val;
430 }
431 return res;
432 }
433
434 static void gst_multi_queue_finalize (GObject * object);
435 static void gst_multi_queue_set_property (GObject * object,
436 guint prop_id, const GValue * value, GParamSpec * pspec);
437 static void gst_multi_queue_get_property (GObject * object,
438 guint prop_id, GValue * value, GParamSpec * pspec);
439
440 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
441 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
442 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
443 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
444 element, GstStateChange transition);
445
446 static void gst_multi_queue_loop (GstPad * pad);
447
448 #define _do_init \
449 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
450 #define gst_multi_queue_parent_class parent_class
451 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
452 _do_init);
453
454 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
455
456 static void
gst_multi_queue_class_init(GstMultiQueueClass * klass)457 gst_multi_queue_class_init (GstMultiQueueClass * klass)
458 {
459 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
460 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
461
462 gobject_class->set_property = gst_multi_queue_set_property;
463 gobject_class->get_property = gst_multi_queue_get_property;
464
465 /* SIGNALS */
466
467 /**
468 * GstMultiQueue::underrun:
469 * @multiqueue: the multiqueue instance
470 *
471 * This signal is emitted from the streaming thread when there is
472 * no data in any of the queues inside the multiqueue instance (underrun).
473 *
474 * This indicates either starvation or EOS from the upstream data sources.
475 */
476 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
477 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
478 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
479 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
480
481 /**
482 * GstMultiQueue::overrun:
483 * @multiqueue: the multiqueue instance
484 *
485 * Reports that one of the queues in the multiqueue is full (overrun).
486 * A queue is full if the total amount of data inside it (num-buffers, time,
487 * size) is higher than the boundary values which can be set through the
488 * GObject properties.
489 *
490 * This can be used as an indicator of pre-roll.
491 */
492 gst_multi_queue_signals[SIGNAL_OVERRUN] =
493 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
494 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
495 g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
496
497 /* PROPERTIES */
498
499 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
500 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
501 "Max. amount of data in the queue (bytes, 0=disable)",
502 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
503 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
504 G_PARAM_STATIC_STRINGS));
505 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
506 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
507 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
508 DEFAULT_MAX_SIZE_BUFFERS,
509 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
510 G_PARAM_STATIC_STRINGS));
511 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
512 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
513 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
514 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
515 G_PARAM_STATIC_STRINGS));
516
517 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
518 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
519 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
520 " (NOT IMPLEMENTED)",
521 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
522 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
523 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
524 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
525 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
526 " (NOT IMPLEMENTED)",
527 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
528 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
529 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
530 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
531 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
532 " (NOT IMPLEMENTED)",
533 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
534 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
535
536 /**
537 * GstMultiQueue:use-buffering:
538 *
539 * Enable the buffering option in multiqueue so that BUFFERING messages are
540 * emitted based on low-/high-percent thresholds.
541 */
542 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
543 g_param_spec_boolean ("use-buffering", "Use buffering",
544 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds",
545 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
546 G_PARAM_STATIC_STRINGS));
547 /**
548 * GstMultiQueue:low-percent:
549 *
550 * Low threshold percent for buffering to start.
551 */
552 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
553 g_param_spec_int ("low-percent", "Low percent",
554 "Low threshold for buffering to start. Only used if use-buffering is True "
555 "(Deprecated: use low-watermark instead)",
556 0, 100, DEFAULT_LOW_WATERMARK * 100,
557 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
558 /**
559 * GstMultiQueue:high-percent:
560 *
561 * High threshold percent for buffering to finish.
562 */
563 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
564 g_param_spec_int ("high-percent", "High percent",
565 "High threshold for buffering to finish. Only used if use-buffering is True "
566 "(Deprecated: use high-watermark instead)",
567 0, 100, DEFAULT_HIGH_WATERMARK * 100,
568 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
569 /**
570 * GstMultiQueue:low-watermark:
571 *
572 * Low threshold watermark for buffering to start.
573 *
574 * Since: 1.10
575 */
576 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
577 g_param_spec_double ("low-watermark", "Low watermark",
578 "Low threshold for buffering to start. Only used if use-buffering is True",
579 0.0, 1.0, DEFAULT_LOW_WATERMARK,
580 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
581 /**
582 * GstMultiQueue:high-watermark:
583 *
584 * High threshold watermark for buffering to finish.
585 *
586 * Since: 1.10
587 */
588 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
589 g_param_spec_double ("high-watermark", "High watermark",
590 "High threshold for buffering to finish. Only used if use-buffering is True",
591 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
592 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
593
594 /**
595 * GstMultiQueue:sync-by-running-time:
596 *
597 * If enabled multiqueue will synchronize deactivated or not-linked streams
598 * to the activated and linked streams by taking the running time.
599 * Otherwise multiqueue will synchronize the deactivated or not-linked
600 * streams by keeping the order in which buffers and events arrived compared
601 * to active and linked streams.
602 */
603 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
604 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
605 "Synchronize deactivated or not-linked streams by running time",
606 DEFAULT_SYNC_BY_RUNNING_TIME,
607 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
608
609 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
610 g_param_spec_boolean ("use-interleave", "Use interleave",
611 "Adjust time limits based on input interleave",
612 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
613
614 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
615 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
616 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
617 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
618 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
619 G_PARAM_STATIC_STRINGS));
620
621 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
622 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
623 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
624 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
625 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
626 G_PARAM_STATIC_STRINGS));
627
628 gobject_class->finalize = gst_multi_queue_finalize;
629
630 gst_element_class_set_static_metadata (gstelement_class,
631 "MultiQueue",
632 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
633 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
634 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
635 gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
636
637 gstelement_class->request_new_pad =
638 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
639 gstelement_class->release_pad =
640 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
641 gstelement_class->change_state =
642 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
643 }
644
645 static void
gst_multi_queue_init(GstMultiQueue * mqueue)646 gst_multi_queue_init (GstMultiQueue * mqueue)
647 {
648 mqueue->nbqueues = 0;
649 mqueue->queues = NULL;
650
651 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
652 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
653 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
654
655 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
656 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
657 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
658
659 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
660 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
661 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
662
663 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
664 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
665 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
666 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
667
668 mqueue->counter = 1;
669 mqueue->highid = -1;
670 mqueue->high_time = GST_CLOCK_STIME_NONE;
671
672 g_mutex_init (&mqueue->qlock);
673 g_mutex_init (&mqueue->buffering_post_lock);
674 }
675
676 static void
gst_multi_queue_finalize(GObject * object)677 gst_multi_queue_finalize (GObject * object)
678 {
679 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
680
681 g_list_foreach (mqueue->queues, (GFunc) gst_single_queue_free, NULL);
682 g_list_free (mqueue->queues);
683 mqueue->queues = NULL;
684 mqueue->queues_cookie++;
685
686 /* free/unref instance data */
687 g_mutex_clear (&mqueue->qlock);
688 g_mutex_clear (&mqueue->buffering_post_lock);
689
690 G_OBJECT_CLASS (parent_class)->finalize (object);
691 }
692
693 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
694 GList * tmp = mq->queues; \
695 while (tmp) { \
696 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
697 q->max_size.format = mq->max_size.format; \
698 update_buffering (mq, q); \
699 gst_data_queue_limits_changed (q->queue); \
700 tmp = g_list_next(tmp); \
701 }; \
702 } G_STMT_END
703
704 static void
gst_multi_queue_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)705 gst_multi_queue_set_property (GObject * object, guint prop_id,
706 const GValue * value, GParamSpec * pspec)
707 {
708 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
709
710 switch (prop_id) {
711 case PROP_MAX_SIZE_BYTES:
712 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
713 mq->max_size.bytes = g_value_get_uint (value);
714 SET_CHILD_PROPERTY (mq, bytes);
715 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
716 gst_multi_queue_post_buffering (mq);
717 break;
718 case PROP_MAX_SIZE_BUFFERS:
719 {
720 GList *tmp;
721 gint new_size = g_value_get_uint (value);
722
723 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
724
725 mq->max_size.visible = new_size;
726
727 tmp = mq->queues;
728 while (tmp) {
729 GstDataQueueSize size;
730 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
731 gst_data_queue_get_level (q->queue, &size);
732
733 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
734 " current: %d, current max %d", q->id, new_size, size.visible,
735 q->max_size.visible);
736
737 /* do not reduce max size below current level if the single queue
738 * has grown because of empty queue */
739 if (new_size == 0) {
740 q->max_size.visible = new_size;
741 } else if (q->max_size.visible == 0) {
742 q->max_size.visible = MAX (new_size, size.visible);
743 } else if (new_size > size.visible) {
744 q->max_size.visible = new_size;
745 }
746 update_buffering (mq, q);
747 gst_data_queue_limits_changed (q->queue);
748 tmp = g_list_next (tmp);
749 }
750
751 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
752 gst_multi_queue_post_buffering (mq);
753
754 break;
755 }
756 case PROP_MAX_SIZE_TIME:
757 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
758 mq->max_size.time = g_value_get_uint64 (value);
759 SET_CHILD_PROPERTY (mq, time);
760 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
761 gst_multi_queue_post_buffering (mq);
762 break;
763 case PROP_EXTRA_SIZE_BYTES:
764 mq->extra_size.bytes = g_value_get_uint (value);
765 break;
766 case PROP_EXTRA_SIZE_BUFFERS:
767 mq->extra_size.visible = g_value_get_uint (value);
768 break;
769 case PROP_EXTRA_SIZE_TIME:
770 mq->extra_size.time = g_value_get_uint64 (value);
771 break;
772 case PROP_USE_BUFFERING:
773 mq->use_buffering = g_value_get_boolean (value);
774 recheck_buffering_status (mq);
775 break;
776 case PROP_LOW_PERCENT:
777 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
778 /* Recheck buffering status - the new low_watermark value might
779 * be above the current fill level. If the old low_watermark one
780 * was below the current level, this means that mq->buffering is
781 * disabled and needs to be re-enabled. */
782 recheck_buffering_status (mq);
783 break;
784 case PROP_HIGH_PERCENT:
785 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
786 recheck_buffering_status (mq);
787 break;
788 case PROP_LOW_WATERMARK:
789 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
790 recheck_buffering_status (mq);
791 break;
792 case PROP_HIGH_WATERMARK:
793 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
794 recheck_buffering_status (mq);
795 break;
796 case PROP_SYNC_BY_RUNNING_TIME:
797 mq->sync_by_running_time = g_value_get_boolean (value);
798 break;
799 case PROP_USE_INTERLEAVE:
800 mq->use_interleave = g_value_get_boolean (value);
801 break;
802 case PROP_UNLINKED_CACHE_TIME:
803 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
804 mq->unlinked_cache_time = g_value_get_uint64 (value);
805 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
806 gst_multi_queue_post_buffering (mq);
807 break;
808 case PROP_MINIMUM_INTERLEAVE:
809 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
810 mq->min_interleave_time = g_value_get_uint64 (value);
811 if (mq->use_interleave)
812 calculate_interleave (mq, NULL);
813 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
814 break;
815 default:
816 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
817 break;
818 }
819 }
820
821 static void
gst_multi_queue_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)822 gst_multi_queue_get_property (GObject * object, guint prop_id,
823 GValue * value, GParamSpec * pspec)
824 {
825 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
826
827 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
828
829 switch (prop_id) {
830 case PROP_EXTRA_SIZE_BYTES:
831 g_value_set_uint (value, mq->extra_size.bytes);
832 break;
833 case PROP_EXTRA_SIZE_BUFFERS:
834 g_value_set_uint (value, mq->extra_size.visible);
835 break;
836 case PROP_EXTRA_SIZE_TIME:
837 g_value_set_uint64 (value, mq->extra_size.time);
838 break;
839 case PROP_MAX_SIZE_BYTES:
840 g_value_set_uint (value, mq->max_size.bytes);
841 break;
842 case PROP_MAX_SIZE_BUFFERS:
843 g_value_set_uint (value, mq->max_size.visible);
844 break;
845 case PROP_MAX_SIZE_TIME:
846 g_value_set_uint64 (value, mq->max_size.time);
847 break;
848 case PROP_USE_BUFFERING:
849 g_value_set_boolean (value, mq->use_buffering);
850 break;
851 case PROP_LOW_PERCENT:
852 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
853 break;
854 case PROP_HIGH_PERCENT:
855 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
856 break;
857 case PROP_LOW_WATERMARK:
858 g_value_set_double (value, mq->low_watermark /
859 (gdouble) MAX_BUFFERING_LEVEL);
860 break;
861 case PROP_HIGH_WATERMARK:
862 g_value_set_double (value, mq->high_watermark /
863 (gdouble) MAX_BUFFERING_LEVEL);
864 break;
865 case PROP_SYNC_BY_RUNNING_TIME:
866 g_value_set_boolean (value, mq->sync_by_running_time);
867 break;
868 case PROP_USE_INTERLEAVE:
869 g_value_set_boolean (value, mq->use_interleave);
870 break;
871 case PROP_UNLINKED_CACHE_TIME:
872 g_value_set_uint64 (value, mq->unlinked_cache_time);
873 break;
874 case PROP_MINIMUM_INTERLEAVE:
875 g_value_set_uint64 (value, mq->min_interleave_time);
876 break;
877 default:
878 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
879 break;
880 }
881
882 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
883 }
884
885 static GstIterator *
gst_multi_queue_iterate_internal_links(GstPad * pad,GstObject * parent)886 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
887 {
888 GstIterator *it = NULL;
889 GstPad *opad;
890 GstSingleQueue *squeue;
891 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
892 GValue val = { 0, };
893
894 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
895 squeue = gst_pad_get_element_private (pad);
896 if (!squeue)
897 goto out;
898
899 if (squeue->sinkpad == pad)
900 opad = gst_object_ref (squeue->srcpad);
901 else if (squeue->srcpad == pad)
902 opad = gst_object_ref (squeue->sinkpad);
903 else
904 goto out;
905
906 g_value_init (&val, GST_TYPE_PAD);
907 g_value_set_object (&val, opad);
908 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
909 g_value_unset (&val);
910
911 gst_object_unref (opad);
912
913 out:
914 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
915
916 return it;
917 }
918
919
920 /*
921 * GstElement methods
922 */
923
924 static GstPad *
gst_multi_queue_request_new_pad(GstElement * element,GstPadTemplate * temp,const gchar * name,const GstCaps * caps)925 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
926 const gchar * name, const GstCaps * caps)
927 {
928 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
929 GstSingleQueue *squeue;
930 GstPad *new_pad;
931 guint temp_id = -1;
932
933 if (name) {
934 sscanf (name + 4, "_%u", &temp_id);
935 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
936 }
937
938 /* Create a new single queue, add the sink and source pad and return the sink pad */
939 squeue = gst_single_queue_new (mqueue, temp_id);
940
941 new_pad = squeue ? squeue->sinkpad : NULL;
942
943 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
944
945 return new_pad;
946 }
947
948 static void
gst_multi_queue_release_pad(GstElement * element,GstPad * pad)949 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
950 {
951 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
952 GstSingleQueue *sq = NULL;
953 GList *tmp;
954
955 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
956
957 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
958 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
959 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
960 sq = (GstSingleQueue *) tmp->data;
961
962 if (sq->sinkpad == pad)
963 break;
964 }
965
966 if (!tmp) {
967 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
968 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
969 return;
970 }
971
972 /* FIXME: The removal of the singlequeue should probably not happen until it
973 * finishes draining */
974
975 /* remove it from the list */
976 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
977 mqueue->queues_cookie++;
978
979 /* FIXME : recompute next-non-linked */
980 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
981
982 /* delete SingleQueue */
983 gst_data_queue_set_flushing (sq->queue, TRUE);
984
985 gst_pad_set_active (sq->srcpad, FALSE);
986 gst_pad_set_active (sq->sinkpad, FALSE);
987 gst_pad_set_element_private (sq->srcpad, NULL);
988 gst_pad_set_element_private (sq->sinkpad, NULL);
989 gst_element_remove_pad (element, sq->srcpad);
990 gst_element_remove_pad (element, sq->sinkpad);
991 gst_single_queue_free (sq);
992 }
993
994 static GstStateChangeReturn
gst_multi_queue_change_state(GstElement * element,GstStateChange transition)995 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
996 {
997 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
998 GstSingleQueue *sq = NULL;
999 GstStateChangeReturn result;
1000
1001 switch (transition) {
1002 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1003 GList *tmp;
1004
1005 /* Set all pads to non-flushing */
1006 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1007 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1008 sq = (GstSingleQueue *) tmp->data;
1009 sq->flushing = FALSE;
1010 }
1011
1012 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1013 SET_CHILD_PROPERTY (mqueue, visible);
1014
1015 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1016 gst_multi_queue_post_buffering (mqueue);
1017
1018 break;
1019 }
1020 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1021 GList *tmp;
1022
1023 /* Un-wait all waiting pads */
1024 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1025 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1026 sq = (GstSingleQueue *) tmp->data;
1027 sq->flushing = TRUE;
1028 g_cond_signal (&sq->turn);
1029
1030 sq->last_query = FALSE;
1031 g_cond_signal (&sq->query_handled);
1032 }
1033 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1034 break;
1035 }
1036 default:
1037 break;
1038 }
1039
1040 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1041
1042 switch (transition) {
1043 default:
1044 break;
1045 }
1046
1047 return result;
1048 }
1049
1050 static gboolean
gst_single_queue_start(GstMultiQueue * mq,GstSingleQueue * sq)1051 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1052 {
1053 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1054 return gst_pad_start_task (sq->srcpad,
1055 (GstTaskFunction) gst_multi_queue_loop, sq->srcpad, NULL);
1056 }
1057
1058 static gboolean
gst_single_queue_pause(GstMultiQueue * mq,GstSingleQueue * sq)1059 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1060 {
1061 gboolean result;
1062
1063 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1064 result = gst_pad_pause_task (sq->srcpad);
1065 sq->sink_tainted = sq->src_tainted = TRUE;
1066 return result;
1067 }
1068
1069 static gboolean
gst_single_queue_stop(GstMultiQueue * mq,GstSingleQueue * sq)1070 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1071 {
1072 gboolean result;
1073
1074 GST_LOG_OBJECT (mq, "SingleQueue %d : stopping task", sq->id);
1075 result = gst_pad_stop_task (sq->srcpad);
1076 sq->sink_tainted = sq->src_tainted = TRUE;
1077 return result;
1078 }
1079
1080 static void
gst_single_queue_flush(GstMultiQueue * mq,GstSingleQueue * sq,gboolean flush,gboolean full)1081 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1082 gboolean full)
1083 {
1084 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1085 sq->id);
1086
1087 if (flush) {
1088 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1089 sq->srcresult = GST_FLOW_FLUSHING;
1090 gst_data_queue_set_flushing (sq->queue, TRUE);
1091
1092 sq->flushing = TRUE;
1093
1094 /* wake up non-linked task */
1095 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1096 sq->id);
1097 g_cond_signal (&sq->turn);
1098 sq->last_query = FALSE;
1099 g_cond_signal (&sq->query_handled);
1100 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1101 } else {
1102 gst_single_queue_flush_queue (sq, full);
1103
1104 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1105 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1106 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1107 sq->has_src_segment = FALSE;
1108 /* All pads start off not-linked for a smooth kick-off */
1109 sq->srcresult = GST_FLOW_OK;
1110 sq->pushed = FALSE;
1111 sq->cur_time = 0;
1112 sq->max_size.visible = mq->max_size.visible;
1113 sq->is_eos = FALSE;
1114 sq->is_segment_done = FALSE;
1115 sq->nextid = 0;
1116 sq->oldid = 0;
1117 sq->last_oldid = G_MAXUINT32;
1118 sq->next_time = GST_CLOCK_STIME_NONE;
1119 sq->last_time = GST_CLOCK_STIME_NONE;
1120 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1121 sq->group_high_time = GST_CLOCK_STIME_NONE;
1122 gst_data_queue_set_flushing (sq->queue, FALSE);
1123
1124 /* We will become active again on the next buffer/gap */
1125 sq->active = FALSE;
1126
1127 /* Reset high time to be recomputed next */
1128 mq->high_time = GST_CLOCK_STIME_NONE;
1129
1130 sq->flushing = FALSE;
1131 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1132 }
1133 }
1134
1135 /* WITH LOCK TAKEN */
1136 static gint
get_buffering_level(GstSingleQueue * sq)1137 get_buffering_level (GstSingleQueue * sq)
1138 {
1139 GstDataQueueSize size;
1140 gint buffering_level, tmp;
1141
1142 gst_data_queue_get_level (sq->queue, &size);
1143
1144 GST_DEBUG_OBJECT (sq->mqueue,
1145 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1146 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1147 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1148
1149 /* get bytes and time buffer levels and take the max */
1150 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1151 || sq->is_sparse) {
1152 buffering_level = MAX_BUFFERING_LEVEL;
1153 } else {
1154 buffering_level = 0;
1155 if (sq->max_size.time > 0) {
1156 tmp =
1157 gst_util_uint64_scale (sq->cur_time,
1158 MAX_BUFFERING_LEVEL, sq->max_size.time);
1159 buffering_level = MAX (buffering_level, tmp);
1160 }
1161 if (sq->max_size.bytes > 0) {
1162 tmp =
1163 gst_util_uint64_scale_int (size.bytes,
1164 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1165 buffering_level = MAX (buffering_level, tmp);
1166 }
1167 }
1168
1169 return buffering_level;
1170 }
1171
1172 /* WITH LOCK TAKEN */
1173 static void
update_buffering(GstMultiQueue * mq,GstSingleQueue * sq)1174 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1175 {
1176 gint buffering_level, percent;
1177
1178 /* nothing to dowhen we are not in buffering mode */
1179 if (!mq->use_buffering)
1180 return;
1181
1182 buffering_level = get_buffering_level (sq);
1183
1184 /* scale so that if buffering_level equals the high watermark,
1185 * the percentage is 100% */
1186 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1187 /* clip */
1188 if (percent > 100)
1189 percent = 100;
1190
1191 if (mq->buffering) {
1192 if (buffering_level >= mq->high_watermark) {
1193 mq->buffering = FALSE;
1194 }
1195 /* make sure it increases */
1196 percent = MAX (mq->buffering_percent, percent);
1197
1198 SET_PERCENT (mq, percent);
1199 } else {
1200 GList *iter;
1201 gboolean is_buffering = TRUE;
1202
1203 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1204 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1205
1206 if (get_buffering_level (oq) >= mq->high_watermark) {
1207 is_buffering = FALSE;
1208
1209 break;
1210 }
1211 }
1212
1213 if (is_buffering && buffering_level < mq->low_watermark) {
1214 mq->buffering = TRUE;
1215 SET_PERCENT (mq, percent);
1216 }
1217 }
1218 }
1219
1220 static void
gst_multi_queue_post_buffering(GstMultiQueue * mq)1221 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1222 {
1223 GstMessage *msg = NULL;
1224
1225 g_mutex_lock (&mq->buffering_post_lock);
1226 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1227 if (mq->buffering_percent_changed) {
1228 gint percent = mq->buffering_percent;
1229
1230 mq->buffering_percent_changed = FALSE;
1231
1232 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1233 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1234 }
1235 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1236
1237 if (msg != NULL)
1238 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1239
1240 g_mutex_unlock (&mq->buffering_post_lock);
1241 }
1242
1243 static void
recheck_buffering_status(GstMultiQueue * mq)1244 recheck_buffering_status (GstMultiQueue * mq)
1245 {
1246 if (!mq->use_buffering && mq->buffering) {
1247 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1248 mq->buffering = FALSE;
1249 GST_DEBUG_OBJECT (mq,
1250 "Buffering property disabled, but queue was still buffering; "
1251 "setting buffering percentage to 100%%");
1252 SET_PERCENT (mq, 100);
1253 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1254 }
1255
1256 if (mq->use_buffering) {
1257 GList *tmp;
1258 gint old_perc;
1259
1260 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1261
1262 /* force buffering percentage to be recalculated */
1263 old_perc = mq->buffering_percent;
1264 mq->buffering_percent = 0;
1265
1266 tmp = mq->queues;
1267 while (tmp) {
1268 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1269 update_buffering (mq, q);
1270 gst_data_queue_limits_changed (q->queue);
1271 tmp = g_list_next (tmp);
1272 }
1273
1274 GST_DEBUG_OBJECT (mq,
1275 "Recalculated buffering percentage: old: %d%% new: %d%%",
1276 old_perc, mq->buffering_percent);
1277
1278 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1279 }
1280
1281 gst_multi_queue_post_buffering (mq);
1282 }
1283
1284 static void
calculate_interleave(GstMultiQueue * mq,GstSingleQueue * sq)1285 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1286 {
1287 GstClockTimeDiff low, high;
1288 GstClockTime interleave, other_interleave = 0;
1289 GList *tmp;
1290
1291 low = high = GST_CLOCK_STIME_NONE;
1292 interleave = mq->interleave;
1293 /* Go over all single queues and calculate lowest/highest value */
1294 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1295 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1296 /* Ignore sparse streams for interleave calculation */
1297 if (oq->is_sparse)
1298 continue;
1299 /* If a stream is not active yet (hasn't received any buffers), set
1300 * a maximum interleave to allow it to receive more data */
1301 if (!oq->active) {
1302 GST_LOG_OBJECT (mq,
1303 "queue %d is not active yet, forcing interleave to 5s", oq->id);
1304 mq->interleave = 5 * GST_SECOND;
1305 /* Update max-size time */
1306 mq->max_size.time = mq->interleave;
1307 SET_CHILD_PROPERTY (mq, time);
1308 goto beach;
1309 }
1310
1311 /* Calculate within each streaming thread */
1312 if (sq && sq->thread != oq->thread) {
1313 if (oq->interleave > other_interleave)
1314 other_interleave = oq->interleave;
1315 continue;
1316 }
1317
1318 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime)) {
1319 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1320 low = oq->cached_sinktime;
1321 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1322 high = oq->cached_sinktime;
1323 }
1324 GST_LOG_OBJECT (mq,
1325 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1326 " high:%" GST_STIME_FORMAT, oq->id,
1327 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1328 GST_STIME_ARGS (high));
1329 }
1330
1331 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1332 interleave = high - low;
1333 /* Padding of interleave and minimum value */
1334 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1335 if (sq)
1336 sq->interleave = interleave;
1337
1338 interleave = MAX (interleave, other_interleave);
1339
1340 /* Update the stored interleave if:
1341 * * No data has arrived yet (high == low)
1342 * * Or it went higher
1343 * * Or it went lower and we've gone past the previous interleave needed */
1344 if (high == low || interleave > mq->interleave ||
1345 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1346 mq->interleave)) < low)
1347 && interleave < (mq->interleave * 3 / 4))) {
1348 /* Update the interleave */
1349 mq->interleave = interleave;
1350 mq->last_interleave_update = high;
1351 /* Update max-size time */
1352 mq->max_size.time = mq->interleave;
1353 SET_CHILD_PROPERTY (mq, time);
1354 }
1355 }
1356
1357 beach:
1358 GST_DEBUG_OBJECT (mq,
1359 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1360 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1361 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1362 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1363 GST_TIME_ARGS (mq->interleave),
1364 GST_STIME_ARGS (mq->last_interleave_update));
1365 }
1366
1367
1368 /* calculate the diff between running time on the sink and src of the queue.
1369 * This is the total amount of time in the queue.
1370 * WITH LOCK TAKEN */
1371 static void
update_time_level(GstMultiQueue * mq,GstSingleQueue * sq)1372 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1373 {
1374 GstClockTimeDiff sink_time, src_time;
1375
1376 if (sq->sink_tainted) {
1377 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1378 sq->sink_segment.position);
1379
1380 GST_DEBUG_OBJECT (mq,
1381 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1382 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1383 GST_STIME_ARGS (sink_time));
1384
1385 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1386 /* If the single queue still doesn't have a last_time set, this means
1387 * that nothing has been pushed out yet.
1388 * In order for the high_time computation to be as efficient as possible,
1389 * we set the last_time */
1390 sq->last_time = sink_time;
1391 }
1392 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1393 /* if we have a time, we become untainted and use the time */
1394 sq->sink_tainted = FALSE;
1395 if (mq->use_interleave) {
1396 sq->cached_sinktime = sink_time;
1397 calculate_interleave (mq, sq);
1398 }
1399 }
1400 } else
1401 sink_time = sq->sinktime;
1402
1403 if (sq->src_tainted) {
1404 GstSegment *segment;
1405 gint64 position;
1406
1407 if (sq->has_src_segment) {
1408 segment = &sq->src_segment;
1409 position = sq->src_segment.position;
1410 } else {
1411 /*
1412 * If the src pad had no segment yet, use the sink segment
1413 * to avoid signalling overrun if the received sink segment has a
1414 * a position > max-size-time while the src pad time would be the default=0
1415 *
1416 * This can happen when switching pads on chained/adaptive streams and the
1417 * new chain has a segment with a much larger position
1418 */
1419 segment = &sq->sink_segment;
1420 position = sq->sink_segment.position;
1421 }
1422
1423 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1424 /* if we have a time, we become untainted and use the time */
1425 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1426 sq->src_tainted = FALSE;
1427 }
1428 } else
1429 src_time = sq->srctime;
1430
1431 GST_DEBUG_OBJECT (mq,
1432 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1433 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1434
1435 /* This allows for streams with out of order timestamping - sometimes the
1436 * emerging timestamp is later than the arriving one(s) */
1437 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1438 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1439 sq->cur_time = sink_time - src_time;
1440 else
1441 sq->cur_time = 0;
1442
1443 /* updating the time level can change the buffering state */
1444 update_buffering (mq, sq);
1445
1446 return;
1447 }
1448
1449 /* take a SEGMENT event and apply the values to segment, updating the time
1450 * level of queue. */
1451 static void
apply_segment(GstMultiQueue * mq,GstSingleQueue * sq,GstEvent * event,GstSegment * segment)1452 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1453 GstSegment * segment)
1454 {
1455 gst_event_copy_segment (event, segment);
1456
1457 /* now configure the values, we use these to track timestamps on the
1458 * sinkpad. */
1459 if (segment->format != GST_FORMAT_TIME) {
1460 /* non-time format, pretent the current time segment is closed with a
1461 * 0 start and unknown stop time. */
1462 segment->format = GST_FORMAT_TIME;
1463 segment->start = 0;
1464 segment->stop = -1;
1465 segment->time = 0;
1466 }
1467 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1468
1469 /* Make sure we have a valid initial segment position (and not garbage
1470 * from upstream) */
1471 if (segment->rate > 0.0)
1472 segment->position = segment->start;
1473 else
1474 segment->position = segment->stop;
1475 if (segment == &sq->sink_segment)
1476 sq->sink_tainted = TRUE;
1477 else {
1478 sq->has_src_segment = TRUE;
1479 sq->src_tainted = TRUE;
1480 }
1481
1482 GST_DEBUG_OBJECT (mq,
1483 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1484
1485 /* segment can update the time level of the queue */
1486 update_time_level (mq, sq);
1487
1488 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1489 gst_multi_queue_post_buffering (mq);
1490 }
1491
1492 /* take a buffer and update segment, updating the time level of the queue. */
1493 static void
apply_buffer(GstMultiQueue * mq,GstSingleQueue * sq,GstClockTime timestamp,GstClockTime duration,GstSegment * segment)1494 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1495 GstClockTime duration, GstSegment * segment)
1496 {
1497 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1498
1499 /* if no timestamp is set, assume it's continuous with the previous
1500 * time */
1501 if (timestamp == GST_CLOCK_TIME_NONE)
1502 timestamp = segment->position;
1503
1504 /* add duration */
1505 if (duration != GST_CLOCK_TIME_NONE)
1506 timestamp += duration;
1507
1508 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1509 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1510 GST_TIME_ARGS (timestamp));
1511
1512 segment->position = timestamp;
1513
1514 if (segment == &sq->sink_segment)
1515 sq->sink_tainted = TRUE;
1516 else
1517 sq->src_tainted = TRUE;
1518
1519 /* calc diff with other end */
1520 update_time_level (mq, sq);
1521 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1522 gst_multi_queue_post_buffering (mq);
1523 }
1524
1525 static void
apply_gap(GstMultiQueue * mq,GstSingleQueue * sq,GstEvent * event,GstSegment * segment)1526 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1527 GstSegment * segment)
1528 {
1529 GstClockTime timestamp;
1530 GstClockTime duration;
1531
1532 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1533
1534 gst_event_parse_gap (event, ×tamp, &duration);
1535
1536 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1537
1538 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1539 timestamp += duration;
1540 }
1541
1542 segment->position = timestamp;
1543
1544 if (segment == &sq->sink_segment)
1545 sq->sink_tainted = TRUE;
1546 else
1547 sq->src_tainted = TRUE;
1548
1549 /* calc diff with other end */
1550 update_time_level (mq, sq);
1551 }
1552
1553 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1554 gst_multi_queue_post_buffering (mq);
1555 }
1556
1557 static GstClockTimeDiff
get_running_time(GstSegment * segment,GstMiniObject * object,gboolean end)1558 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1559 {
1560 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1561
1562 if (GST_IS_BUFFER (object)) {
1563 GstBuffer *buf = GST_BUFFER_CAST (object);
1564 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1565
1566 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1567 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1568 btime += GST_BUFFER_DURATION (buf);
1569 time = my_segment_to_running_time (segment, btime);
1570 }
1571 } else if (GST_IS_BUFFER_LIST (object)) {
1572 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1573 gint i, n;
1574 GstBuffer *buf;
1575
1576 n = gst_buffer_list_length (list);
1577 for (i = 0; i < n; i++) {
1578 GstClockTime btime;
1579 buf = gst_buffer_list_get (list, i);
1580 btime = GST_BUFFER_DTS_OR_PTS (buf);
1581 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1582 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1583 btime += GST_BUFFER_DURATION (buf);
1584 time = my_segment_to_running_time (segment, btime);
1585 if (!end)
1586 goto done;
1587 } else if (!end) {
1588 goto done;
1589 }
1590 }
1591 } else if (GST_IS_EVENT (object)) {
1592 GstEvent *event = GST_EVENT_CAST (object);
1593
1594 /* For newsegment events return the running time of the start position */
1595 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1596 const GstSegment *new_segment;
1597
1598 gst_event_parse_segment (event, &new_segment);
1599 if (new_segment->format == GST_FORMAT_TIME) {
1600 time =
1601 my_segment_to_running_time ((GstSegment *) new_segment,
1602 new_segment->start);
1603 }
1604 }
1605 }
1606
1607 done:
1608 return time;
1609 }
1610
1611 static GstFlowReturn
gst_single_queue_push_one(GstMultiQueue * mq,GstSingleQueue * sq,GstMiniObject * object,gboolean * allow_drop)1612 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
1613 GstMiniObject * object, gboolean * allow_drop)
1614 {
1615 GstFlowReturn result = sq->srcresult;
1616
1617 if (GST_IS_BUFFER (object)) {
1618 GstBuffer *buffer;
1619 GstClockTime timestamp, duration;
1620
1621 buffer = GST_BUFFER_CAST (object);
1622 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
1623 duration = GST_BUFFER_DURATION (buffer);
1624
1625 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
1626
1627 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
1628 gst_data_queue_limits_changed (sq->queue);
1629
1630 if (G_UNLIKELY (*allow_drop)) {
1631 GST_DEBUG_OBJECT (mq,
1632 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
1633 sq->id, buffer, GST_TIME_ARGS (timestamp));
1634 gst_buffer_unref (buffer);
1635 } else {
1636 GST_DEBUG_OBJECT (mq,
1637 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
1638 sq->id, buffer, GST_TIME_ARGS (timestamp));
1639 result = gst_pad_push (sq->srcpad, buffer);
1640 }
1641 } else if (GST_IS_EVENT (object)) {
1642 GstEvent *event;
1643
1644 event = GST_EVENT_CAST (object);
1645
1646 switch (GST_EVENT_TYPE (event)) {
1647 case GST_EVENT_SEGMENT_DONE:
1648 *allow_drop = FALSE;
1649 break;
1650 case GST_EVENT_EOS:
1651 result = GST_FLOW_EOS;
1652 if (G_UNLIKELY (*allow_drop))
1653 *allow_drop = FALSE;
1654 break;
1655 case GST_EVENT_STREAM_START:
1656 result = GST_FLOW_OK;
1657 if (G_UNLIKELY (*allow_drop))
1658 *allow_drop = FALSE;
1659 break;
1660 case GST_EVENT_SEGMENT:
1661 apply_segment (mq, sq, event, &sq->src_segment);
1662 /* Applying the segment may have made the queue non-full again, unblock it if needed */
1663 gst_data_queue_limits_changed (sq->queue);
1664 if (G_UNLIKELY (*allow_drop)) {
1665 result = GST_FLOW_OK;
1666 *allow_drop = FALSE;
1667 }
1668 break;
1669 case GST_EVENT_GAP:
1670 apply_gap (mq, sq, event, &sq->src_segment);
1671 /* Applying the gap may have made the queue non-full again, unblock it if needed */
1672 gst_data_queue_limits_changed (sq->queue);
1673 break;
1674 default:
1675 break;
1676 }
1677
1678 if (G_UNLIKELY (*allow_drop)) {
1679 GST_DEBUG_OBJECT (mq,
1680 "SingleQueue %d : Dropping EOS event %p of type %s",
1681 sq->id, event, GST_EVENT_TYPE_NAME (event));
1682 gst_event_unref (event);
1683 } else {
1684 GST_DEBUG_OBJECT (mq,
1685 "SingleQueue %d : Pushing event %p of type %s",
1686 sq->id, event, GST_EVENT_TYPE_NAME (event));
1687
1688 gst_pad_push_event (sq->srcpad, event);
1689 }
1690 } else if (GST_IS_QUERY (object)) {
1691 GstQuery *query;
1692 gboolean res;
1693
1694 query = GST_QUERY_CAST (object);
1695
1696 if (G_UNLIKELY (*allow_drop)) {
1697 GST_DEBUG_OBJECT (mq,
1698 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
1699 gst_query_unref (query);
1700 res = FALSE;
1701 } else {
1702 res = gst_pad_peer_query (sq->srcpad, query);
1703 }
1704
1705 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1706 sq->last_query = res;
1707 sq->last_handled_query = query;
1708 g_cond_signal (&sq->query_handled);
1709 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1710 } else {
1711 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
1712 sq->id);
1713 }
1714 return result;
1715
1716 /* ERRORS */
1717 }
1718
1719 static GstMiniObject *
gst_multi_queue_item_steal_object(GstMultiQueueItem * item)1720 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
1721 {
1722 GstMiniObject *res;
1723
1724 res = item->object;
1725 item->object = NULL;
1726
1727 return res;
1728 }
1729
1730 static void
gst_multi_queue_item_destroy(GstMultiQueueItem * item)1731 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
1732 {
1733 if (!item->is_query && item->object)
1734 gst_mini_object_unref (item->object);
1735 g_slice_free (GstMultiQueueItem, item);
1736 }
1737
1738 /* takes ownership of passed mini object! */
1739 static GstMultiQueueItem *
gst_multi_queue_buffer_item_new(GstMiniObject * object,guint32 curid)1740 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
1741 {
1742 GstMultiQueueItem *item;
1743
1744 item = g_slice_new (GstMultiQueueItem);
1745 item->object = object;
1746 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1747 item->posid = curid;
1748 item->is_query = GST_IS_QUERY (object);
1749
1750 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
1751 item->duration = GST_BUFFER_DURATION (object);
1752 if (item->duration == GST_CLOCK_TIME_NONE)
1753 item->duration = 0;
1754 item->visible = TRUE;
1755 return item;
1756 }
1757
1758 static GstMultiQueueItem *
gst_multi_queue_mo_item_new(GstMiniObject * object,guint32 curid)1759 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
1760 {
1761 GstMultiQueueItem *item;
1762
1763 item = g_slice_new (GstMultiQueueItem);
1764 item->object = object;
1765 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
1766 item->posid = curid;
1767 item->is_query = GST_IS_QUERY (object);
1768
1769 item->size = 0;
1770 item->duration = 0;
1771 item->visible = FALSE;
1772 return item;
1773 }
1774
1775 /* Each main loop attempts to push buffers until the return value
1776 * is not-linked. not-linked pads are not allowed to push data beyond
1777 * any linked pads, so they don't 'rush ahead of the pack'.
1778 */
1779 static void
gst_multi_queue_loop(GstPad * pad)1780 gst_multi_queue_loop (GstPad * pad)
1781 {
1782 GstSingleQueue *sq;
1783 GstMultiQueueItem *item;
1784 GstDataQueueItem *sitem;
1785 GstMultiQueue *mq;
1786 GstMiniObject *object = NULL;
1787 guint32 newid;
1788 GstFlowReturn result;
1789 GstClockTimeDiff next_time;
1790 gboolean is_buffer;
1791 gboolean do_update_buffering = FALSE;
1792 gboolean dropping = FALSE;
1793
1794 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
1795 mq = sq->mqueue;
1796
1797 next:
1798 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
1799
1800 if (sq->flushing)
1801 goto out_flushing;
1802
1803 /* Get something from the queue, blocking until that happens, or we get
1804 * flushed */
1805 if (!(gst_data_queue_pop (sq->queue, &sitem)))
1806 goto out_flushing;
1807
1808 item = (GstMultiQueueItem *) sitem;
1809 newid = item->posid;
1810
1811 /* steal the object and destroy the item */
1812 object = gst_multi_queue_item_steal_object (item);
1813 gst_multi_queue_item_destroy (item);
1814
1815 is_buffer = GST_IS_BUFFER (object);
1816
1817 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
1818 next_time = get_running_time (&sq->src_segment, object, FALSE);
1819
1820 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
1821 sq->id, newid, sq->last_oldid);
1822
1823 /* If we're not-linked, we do some extra work because we might need to
1824 * wait before pushing. If we're linked but there's a gap in the IDs,
1825 * or it's the first loop, or we just passed the previous highid,
1826 * we might need to wake some sleeping pad up, so there's extra work
1827 * there too */
1828 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1829 if (sq->srcresult == GST_FLOW_NOT_LINKED
1830 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
1831 || sq->last_oldid > mq->highid) {
1832 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
1833 gst_flow_get_name (sq->srcresult));
1834
1835 /* Check again if we're flushing after the lock is taken,
1836 * the flush flag might have been changed in the meantime */
1837 if (sq->flushing) {
1838 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1839 goto out_flushing;
1840 }
1841
1842 /* Update the nextid so other threads know when to wake us up */
1843 sq->nextid = newid;
1844 /* Take into account the extra cache time since we're unlinked */
1845 if (GST_CLOCK_STIME_IS_VALID (next_time))
1846 next_time += mq->unlinked_cache_time;
1847 sq->next_time = next_time;
1848
1849 /* Update the oldid (the last ID we output) for highid tracking */
1850 if (sq->last_oldid != G_MAXUINT32)
1851 sq->oldid = sq->last_oldid;
1852
1853 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
1854 gboolean should_wait;
1855 /* Go to sleep until it's time to push this buffer */
1856
1857 /* Recompute the highid */
1858 compute_high_id (mq);
1859 /* Recompute the high time */
1860 compute_high_time (mq, sq->groupid);
1861
1862 GST_DEBUG_OBJECT (mq,
1863 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
1864 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
1865 GST_STIME_ARGS (next_time));
1866
1867 if (mq->sync_by_running_time) {
1868 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
1869 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1870 (mq->high_time == GST_CLOCK_STIME_NONE
1871 || next_time > mq->high_time);
1872 } else {
1873 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1874 next_time > sq->group_high_time;
1875 }
1876 } else
1877 should_wait = newid > mq->highid;
1878
1879 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
1880
1881 GST_DEBUG_OBJECT (mq,
1882 "queue %d sleeping for not-linked wakeup with "
1883 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
1884 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
1885 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
1886
1887 /* Wake up all non-linked pads before we sleep */
1888 wake_up_next_non_linked (mq);
1889
1890 mq->numwaiting++;
1891 g_cond_wait (&sq->turn, &mq->qlock);
1892 mq->numwaiting--;
1893
1894 if (sq->flushing) {
1895 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1896 goto out_flushing;
1897 }
1898
1899 /* Recompute the high time and ID */
1900 compute_high_time (mq, sq->groupid);
1901 compute_high_id (mq);
1902
1903 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
1904 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
1905 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
1906 sq->id, newid, mq->highid,
1907 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
1908 GST_STIME_ARGS (mq->high_time));
1909
1910 if (mq->sync_by_running_time) {
1911 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
1912 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1913 (mq->high_time == GST_CLOCK_STIME_NONE
1914 || next_time > mq->high_time);
1915 } else {
1916 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
1917 next_time > sq->group_high_time;
1918 }
1919 } else
1920 should_wait = newid > mq->highid;
1921 }
1922
1923 /* Re-compute the high_id in case someone else pushed */
1924 compute_high_id (mq);
1925 compute_high_time (mq, sq->groupid);
1926 } else {
1927 compute_high_id (mq);
1928 compute_high_time (mq, sq->groupid);
1929 /* Wake up all non-linked pads */
1930 wake_up_next_non_linked (mq);
1931 }
1932 /* We're done waiting, we can clear the nextid and nexttime */
1933 sq->nextid = 0;
1934 sq->next_time = GST_CLOCK_STIME_NONE;
1935 }
1936 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1937
1938 if (sq->flushing)
1939 goto out_flushing;
1940
1941 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
1942 gst_flow_get_name (sq->srcresult));
1943
1944 /* Update time stats */
1945 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1946 next_time = get_running_time (&sq->src_segment, object, TRUE);
1947 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
1948 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
1949 sq->last_time = next_time;
1950 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
1951 /* Wake up all non-linked pads now that we advanced the high time */
1952 mq->high_time = next_time;
1953 wake_up_next_non_linked (mq);
1954 }
1955 }
1956 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1957
1958 /* Try to push out the new object */
1959 result = gst_single_queue_push_one (mq, sq, object, &dropping);
1960 object = NULL;
1961
1962 /* Check if we pushed something already and if this is
1963 * now a switch from an active to a non-active stream.
1964 *
1965 * If it is, we reset all the waiting streams, let them
1966 * push another buffer to see if they're now active again.
1967 * This allows faster switching between streams and prevents
1968 * deadlocks if downstream does any waiting too.
1969 */
1970 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1971 if (sq->pushed && sq->srcresult == GST_FLOW_OK
1972 && result == GST_FLOW_NOT_LINKED) {
1973 GList *tmp;
1974
1975 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
1976 sq->id);
1977
1978 compute_high_id (mq);
1979 compute_high_time (mq, sq->groupid);
1980 do_update_buffering = TRUE;
1981
1982 /* maybe no-one is waiting */
1983 if (mq->numwaiting > 0) {
1984 /* Else figure out which singlequeue(s) need waking up */
1985 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1986 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
1987
1988 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
1989 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
1990 sq2->pushed = FALSE;
1991 sq2->srcresult = GST_FLOW_OK;
1992 g_cond_signal (&sq2->turn);
1993 }
1994 }
1995 }
1996 }
1997
1998 if (is_buffer)
1999 sq->pushed = TRUE;
2000
2001 /* now hold on a bit;
2002 * can not simply throw this result to upstream, because
2003 * that might already be onto another segment, so we have to make
2004 * sure we are relaying the correct info wrt proper segment */
2005 if (result == GST_FLOW_EOS && !dropping &&
2006 sq->srcresult != GST_FLOW_NOT_LINKED) {
2007 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
2008 dropping = TRUE;
2009 /* pretend we have not seen EOS yet for upstream's sake */
2010 result = sq->srcresult;
2011 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2012 /* queue empty, so stop dropping
2013 * we can commit the result we have now,
2014 * which is either OK after a segment, or EOS */
2015 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
2016 dropping = FALSE;
2017 result = GST_FLOW_EOS;
2018 }
2019 sq->srcresult = result;
2020 sq->last_oldid = newid;
2021
2022 if (do_update_buffering)
2023 update_buffering (mq, sq);
2024
2025 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2026 gst_multi_queue_post_buffering (mq);
2027
2028 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2029 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (sq->srcpad));
2030
2031 /* Need to make sure wake up any sleeping pads when we exit */
2032 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2033 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (sq->srcpad)
2034 || sq->srcresult == GST_FLOW_EOS)) {
2035 compute_high_time (mq, sq->groupid);
2036 compute_high_id (mq);
2037 wake_up_next_non_linked (mq);
2038 }
2039 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2040
2041 if (dropping)
2042 goto next;
2043
2044 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2045 && result != GST_FLOW_EOS)
2046 goto out_flushing;
2047
2048 return;
2049
2050 out_flushing:
2051 {
2052 if (object && !GST_IS_QUERY (object))
2053 gst_mini_object_unref (object);
2054
2055 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2056 sq->last_query = FALSE;
2057 g_cond_signal (&sq->query_handled);
2058
2059 /* Post an error message if we got EOS while downstream
2060 * has returned an error flow return. After EOS there
2061 * will be no further buffer which could propagate the
2062 * error upstream */
2063 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2064 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2065 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2066 } else {
2067 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2068 }
2069
2070 /* upstream needs to see fatal result ASAP to shut things down,
2071 * but might be stuck in one of our other full queues;
2072 * so empty this one and trigger dynamic queue growth. At
2073 * this point the srcresult is not OK, NOT_LINKED
2074 * or EOS, i.e. a real failure */
2075 gst_single_queue_flush_queue (sq, FALSE);
2076 single_queue_underrun_cb (sq->queue, sq);
2077 gst_data_queue_set_flushing (sq->queue, TRUE);
2078 gst_pad_pause_task (sq->srcpad);
2079 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
2080 "SingleQueue[%d] task paused, reason:%s",
2081 sq->id, gst_flow_get_name (sq->srcresult));
2082 return;
2083 }
2084 }
2085
2086 /**
2087 * gst_multi_queue_chain:
2088 *
2089 * This is similar to GstQueue's chain function, except:
2090 * _ we don't have leak behaviours,
2091 * _ we push with a unique id (curid)
2092 */
2093 static GstFlowReturn
gst_multi_queue_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2094 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2095 {
2096 GstSingleQueue *sq;
2097 GstMultiQueue *mq;
2098 GstMultiQueueItem *item;
2099 guint32 curid;
2100 GstClockTime timestamp, duration;
2101
2102 sq = gst_pad_get_element_private (pad);
2103 mq = sq->mqueue;
2104
2105 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2106 if (sq->is_eos)
2107 goto was_eos;
2108
2109 sq->active = TRUE;
2110
2111 /* Get a unique incrementing id */
2112 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2113
2114 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2115 duration = GST_BUFFER_DURATION (buffer);
2116
2117 GST_LOG_OBJECT (mq,
2118 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2119 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2120 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2121 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2122
2123 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2124
2125 /* Update interleave before pushing data into queue */
2126 if (mq->use_interleave) {
2127 GstClockTime val = timestamp;
2128 GstClockTimeDiff dval;
2129
2130 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2131 if (val == GST_CLOCK_TIME_NONE)
2132 val = sq->sink_segment.position;
2133 if (duration != GST_CLOCK_TIME_NONE)
2134 val += duration;
2135
2136 dval = my_segment_to_running_time (&sq->sink_segment, val);
2137 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2138 sq->cached_sinktime = dval;
2139 GST_DEBUG_OBJECT (mq,
2140 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2141 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2142 GST_STIME_ARGS (sq->cached_sinktime));
2143 calculate_interleave (mq, sq);
2144 }
2145 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2146 }
2147
2148 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2149 goto flushing;
2150
2151 /* update time level, we must do this after pushing the data in the queue so
2152 * that we never end up filling the queue first. */
2153 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2154
2155 done:
2156 return sq->srcresult;
2157
2158 /* ERRORS */
2159 flushing:
2160 {
2161 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2162 sq->id, gst_flow_get_name (sq->srcresult));
2163 gst_multi_queue_item_destroy (item);
2164 goto done;
2165 }
2166 was_eos:
2167 {
2168 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2169 gst_buffer_unref (buffer);
2170 return GST_FLOW_EOS;
2171 }
2172 }
2173
2174 static gboolean
gst_multi_queue_sink_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2175 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2176 GstPadMode mode, gboolean active)
2177 {
2178 gboolean res;
2179 GstSingleQueue *sq;
2180 GstMultiQueue *mq;
2181
2182 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2183 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2184
2185 /* mq is NULL if the pad is activated/deactivated before being
2186 * added to the multiqueue */
2187 if (mq)
2188 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2189
2190 switch (mode) {
2191 case GST_PAD_MODE_PUSH:
2192 if (active) {
2193 /* All pads start off linked until they push one buffer */
2194 sq->srcresult = GST_FLOW_OK;
2195 sq->pushed = FALSE;
2196 gst_data_queue_set_flushing (sq->queue, FALSE);
2197 } else {
2198 sq->srcresult = GST_FLOW_FLUSHING;
2199 sq->last_query = FALSE;
2200 g_cond_signal (&sq->query_handled);
2201 gst_data_queue_set_flushing (sq->queue, TRUE);
2202
2203 /* Wait until streaming thread has finished */
2204 if (mq)
2205 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2206 GST_PAD_STREAM_LOCK (pad);
2207 if (mq)
2208 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2209 gst_data_queue_flush (sq->queue);
2210 if (mq)
2211 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2212 GST_PAD_STREAM_UNLOCK (pad);
2213 if (mq)
2214 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2215 }
2216 res = TRUE;
2217 break;
2218 default:
2219 res = FALSE;
2220 break;
2221 }
2222
2223 if (mq) {
2224 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2225 gst_object_unref (mq);
2226 }
2227
2228 return res;
2229 }
2230
2231 static GstFlowReturn
gst_multi_queue_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2232 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2233 {
2234 GstSingleQueue *sq;
2235 GstMultiQueue *mq;
2236 guint32 curid;
2237 GstMultiQueueItem *item;
2238 gboolean res = TRUE;
2239 GstFlowReturn flowret = GST_FLOW_OK;
2240 GstEventType type;
2241 GstEvent *sref = NULL;
2242
2243 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2244 mq = (GstMultiQueue *) parent;
2245
2246 type = GST_EVENT_TYPE (event);
2247
2248 switch (type) {
2249 case GST_EVENT_STREAM_START:
2250 {
2251 if (mq->sync_by_running_time) {
2252 GstStreamFlags stream_flags;
2253 gst_event_parse_stream_flags (event, &stream_flags);
2254 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2255 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2256 sq->is_sparse = TRUE;
2257 }
2258 }
2259
2260 sq->thread = g_thread_self ();
2261
2262 /* Remove EOS flag */
2263 sq->is_eos = FALSE;
2264 break;
2265 }
2266 case GST_EVENT_FLUSH_START:
2267 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2268 sq->id);
2269
2270 res = gst_pad_push_event (sq->srcpad, event);
2271
2272 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2273 gst_single_queue_pause (mq, sq);
2274 goto done;
2275
2276 case GST_EVENT_FLUSH_STOP:
2277 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2278 sq->id);
2279
2280 res = gst_pad_push_event (sq->srcpad, event);
2281
2282 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2283 gst_single_queue_start (mq, sq);
2284 goto done;
2285
2286 case GST_EVENT_SEGMENT:
2287 sq->is_segment_done = FALSE;
2288 sref = gst_event_ref (event);
2289 break;
2290 case GST_EVENT_GAP:
2291 /* take ref because the queue will take ownership and we need the event
2292 * afterwards to update the segment */
2293 sref = gst_event_ref (event);
2294 if (mq->use_interleave) {
2295 GstClockTime val, dur;
2296 GstClockTime stime;
2297 gst_event_parse_gap (event, &val, &dur);
2298 if (GST_CLOCK_TIME_IS_VALID (val)) {
2299 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2300 if (GST_CLOCK_TIME_IS_VALID (dur))
2301 val += dur;
2302 stime = my_segment_to_running_time (&sq->sink_segment, val);
2303 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2304 sq->cached_sinktime = stime;
2305 calculate_interleave (mq, sq);
2306 }
2307 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2308 }
2309 }
2310 break;
2311
2312 default:
2313 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2314 res = gst_pad_push_event (sq->srcpad, event);
2315 goto done;
2316 }
2317 break;
2318 }
2319
2320 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2321 if (sq->is_eos)
2322 goto was_eos;
2323
2324 /* Get an unique incrementing id. */
2325 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2326
2327 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2328
2329 GST_DEBUG_OBJECT (mq,
2330 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2331 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2332
2333 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2334 goto flushing;
2335
2336 /* mark EOS when we received one, we must do that after putting the
2337 * buffer in the queue because EOS marks the buffer as filled. */
2338 switch (type) {
2339 case GST_EVENT_SEGMENT_DONE:
2340 sq->is_segment_done = TRUE;
2341 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2342 update_buffering (mq, sq);
2343 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2344 single_queue_overrun_cb (sq->queue, sq);
2345 gst_multi_queue_post_buffering (mq);
2346 break;
2347 case GST_EVENT_EOS:
2348 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2349 sq->is_eos = TRUE;
2350
2351 /* Post an error message if we got EOS while downstream
2352 * has returned an error flow return. After EOS there
2353 * will be no further buffer which could propagate the
2354 * error upstream */
2355 if (sq->srcresult < GST_FLOW_EOS) {
2356 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2357 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2358 } else {
2359 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2360 }
2361
2362 /* EOS affects the buffering state */
2363 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2364 update_buffering (mq, sq);
2365 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2366 single_queue_overrun_cb (sq->queue, sq);
2367 gst_multi_queue_post_buffering (mq);
2368 break;
2369 case GST_EVENT_SEGMENT:
2370 apply_segment (mq, sq, sref, &sq->sink_segment);
2371 gst_event_unref (sref);
2372 /* a new segment allows us to accept more buffers if we got EOS
2373 * from downstream */
2374 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2375 if (sq->srcresult == GST_FLOW_EOS)
2376 sq->srcresult = GST_FLOW_OK;
2377 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2378 break;
2379 case GST_EVENT_GAP:
2380 sq->active = TRUE;
2381 apply_gap (mq, sq, sref, &sq->sink_segment);
2382 gst_event_unref (sref);
2383 default:
2384 break;
2385 }
2386
2387 done:
2388 if (res == FALSE)
2389 flowret = GST_FLOW_ERROR;
2390 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2391 gst_flow_get_name (flowret));
2392 return flowret;
2393
2394 flushing:
2395 {
2396 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2397 sq->id, gst_flow_get_name (sq->srcresult));
2398 if (sref)
2399 gst_event_unref (sref);
2400 gst_multi_queue_item_destroy (item);
2401 return sq->srcresult;
2402 }
2403 was_eos:
2404 {
2405 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2406 gst_event_unref (event);
2407 return GST_FLOW_EOS;
2408 }
2409 }
2410
2411 static gboolean
gst_multi_queue_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2412 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2413 {
2414 gboolean res;
2415 GstSingleQueue *sq;
2416 GstMultiQueue *mq;
2417
2418 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2419 mq = (GstMultiQueue *) parent;
2420
2421 switch (GST_QUERY_TYPE (query)) {
2422 default:
2423 if (GST_QUERY_IS_SERIALIZED (query)) {
2424 guint32 curid;
2425 GstMultiQueueItem *item;
2426
2427 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2428 if (sq->srcresult != GST_FLOW_OK)
2429 goto out_flushing;
2430
2431 /* serialized events go in the queue. We need to be certain that we
2432 * don't cause deadlocks waiting for the query return value. We check if
2433 * the queue is empty (nothing is blocking downstream and the query can
2434 * be pushed for sure) or we are not buffering. If we are buffering,
2435 * the pipeline waits to unblock downstream until our queue fills up
2436 * completely, which can not happen if we block on the query..
2437 * Therefore we only potentially block when we are not buffering. */
2438 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2439 /* Get an unique incrementing id. */
2440 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2441
2442 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2443
2444 GST_DEBUG_OBJECT (mq,
2445 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2446 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2447 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2448 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2449 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2450 if (!res || sq->flushing)
2451 goto out_flushing;
2452 /* it might be that the query has been taken out of the queue
2453 * while we were unlocked. So, we need to check if the last
2454 * handled query is the same one than the one we just
2455 * pushed. If it is, we don't need to wait for the condition
2456 * variable, otherwise we wait for the condition variable to
2457 * be signaled. */
2458 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2459 && sq->last_handled_query != query)
2460 g_cond_wait (&sq->query_handled, &mq->qlock);
2461 res = sq->last_query;
2462 sq->last_handled_query = NULL;
2463 } else {
2464 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2465 "queue is not empty");
2466 res = FALSE;
2467 }
2468 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2469 } else {
2470 /* default handling */
2471 res = gst_pad_query_default (pad, parent, query);
2472 }
2473 break;
2474 }
2475 return res;
2476
2477 out_flushing:
2478 {
2479 GST_DEBUG_OBJECT (mq, "Flushing");
2480 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2481 return FALSE;
2482 }
2483 }
2484
2485 static gboolean
gst_multi_queue_src_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2486 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2487 GstPadMode mode, gboolean active)
2488 {
2489 GstMultiQueue *mq;
2490 GstSingleQueue *sq;
2491 gboolean result;
2492
2493 sq = (GstSingleQueue *) gst_pad_get_element_private (pad);
2494 mq = sq->mqueue;
2495
2496 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2497
2498 switch (mode) {
2499 case GST_PAD_MODE_PUSH:
2500 if (active) {
2501 gst_single_queue_flush (mq, sq, FALSE, TRUE);
2502 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
2503 } else {
2504 gst_single_queue_flush (mq, sq, TRUE, TRUE);
2505 result = gst_single_queue_stop (mq, sq);
2506 }
2507 break;
2508 default:
2509 result = FALSE;
2510 break;
2511 }
2512 return result;
2513 }
2514
2515 static gboolean
gst_multi_queue_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2516 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2517 {
2518 GstSingleQueue *sq = gst_pad_get_element_private (pad);
2519 GstMultiQueue *mq = sq->mqueue;
2520 gboolean ret;
2521
2522 switch (GST_EVENT_TYPE (event)) {
2523 case GST_EVENT_RECONFIGURE:
2524 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2525 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2526 sq->srcresult = GST_FLOW_OK;
2527 g_cond_signal (&sq->turn);
2528 }
2529 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2530
2531 ret = gst_pad_push_event (sq->sinkpad, event);
2532 break;
2533 default:
2534 ret = gst_pad_push_event (sq->sinkpad, event);
2535 break;
2536 }
2537
2538 return ret;
2539 }
2540
2541 static gboolean
gst_multi_queue_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2542 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2543 {
2544 gboolean res;
2545
2546 /* FIXME, Handle position offset depending on queue size */
2547 switch (GST_QUERY_TYPE (query)) {
2548 default:
2549 /* default handling */
2550 res = gst_pad_query_default (pad, parent, query);
2551 break;
2552 }
2553 return res;
2554 }
2555
2556 /*
2557 * Next-non-linked functions
2558 */
2559
2560 /* WITH LOCK TAKEN */
2561 static void
wake_up_next_non_linked(GstMultiQueue * mq)2562 wake_up_next_non_linked (GstMultiQueue * mq)
2563 {
2564 GList *tmp;
2565
2566 /* maybe no-one is waiting */
2567 if (mq->numwaiting < 1)
2568 return;
2569
2570 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
2571 /* Else figure out which singlequeue(s) need waking up */
2572 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2573 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2574 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2575 GstClockTimeDiff high_time;
2576
2577 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
2578 high_time = sq->group_high_time;
2579 else
2580 high_time = mq->high_time;
2581
2582 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
2583 GST_CLOCK_STIME_IS_VALID (high_time)
2584 && sq->next_time <= high_time) {
2585 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2586 g_cond_signal (&sq->turn);
2587 }
2588 }
2589 }
2590 } else {
2591 /* Else figure out which singlequeue(s) need waking up */
2592 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2593 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2594 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
2595 sq->nextid != 0 && sq->nextid <= mq->highid) {
2596 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
2597 g_cond_signal (&sq->turn);
2598 }
2599 }
2600 }
2601 }
2602
2603 /* WITH LOCK TAKEN */
2604 static void
compute_high_id(GstMultiQueue * mq)2605 compute_high_id (GstMultiQueue * mq)
2606 {
2607 /* The high-id is either the highest id among the linked pads, or if all
2608 * pads are not-linked, it's the lowest not-linked pad */
2609 GList *tmp;
2610 guint32 lowest = G_MAXUINT32;
2611 guint32 highid = G_MAXUINT32;
2612
2613 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2614 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2615
2616 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
2617 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
2618
2619 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2620 /* No need to consider queues which are not waiting */
2621 if (sq->nextid == 0) {
2622 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2623 continue;
2624 }
2625
2626 if (sq->nextid < lowest)
2627 lowest = sq->nextid;
2628 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2629 /* If we don't have a global highid, or the global highid is lower than
2630 * this single queue's last outputted id, store the queue's one,
2631 * unless the singlequeue output is at EOS */
2632 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
2633 highid = sq->oldid;
2634 }
2635 }
2636
2637 if (highid == G_MAXUINT32 || lowest < highid)
2638 mq->highid = lowest;
2639 else
2640 mq->highid = highid;
2641
2642 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
2643 lowest);
2644 }
2645
2646 /* WITH LOCK TAKEN */
2647 static void
compute_high_time(GstMultiQueue * mq,guint groupid)2648 compute_high_time (GstMultiQueue * mq, guint groupid)
2649 {
2650 /* The high-time is either the highest last time among the linked
2651 * pads, or if all pads are not-linked, it's the lowest nex time of
2652 * not-linked pad */
2653 GList *tmp;
2654 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
2655 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
2656 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
2657 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
2658 GstClockTimeDiff res;
2659 /* Number of streams which belong to groupid */
2660 guint group_count = 0;
2661
2662 if (!mq->sync_by_running_time)
2663 /* return GST_CLOCK_STIME_NONE; */
2664 return;
2665
2666 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2667 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2668
2669 GST_LOG_OBJECT (mq,
2670 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
2671 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
2672 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
2673 gst_flow_get_name (sq->srcresult));
2674
2675 if (sq->groupid == groupid)
2676 group_count++;
2677
2678 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2679 /* No need to consider queues which are not waiting */
2680 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
2681 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
2682 continue;
2683 }
2684
2685 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
2686 lowest = sq->next_time;
2687 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
2688 || sq->next_time < group_low))
2689 group_low = sq->next_time;
2690 } else if (!GST_PAD_IS_EOS (sq->srcpad) && sq->srcresult != GST_FLOW_EOS) {
2691 /* If we don't have a global high time, or the global high time
2692 * is lower than this single queue's last outputted time, store
2693 * the queue's one, unless the singlequeue output is at EOS. */
2694 if (highest == GST_CLOCK_STIME_NONE
2695 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
2696 highest = sq->last_time;
2697 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
2698 || (sq->last_time != GST_CLOCK_STIME_NONE
2699 && sq->last_time > group_high)))
2700 group_high = sq->last_time;
2701 }
2702 GST_LOG_OBJECT (mq,
2703 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
2704 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
2705 if (sq->groupid == groupid)
2706 GST_LOG_OBJECT (mq,
2707 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
2708 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
2709 }
2710
2711 if (highest == GST_CLOCK_STIME_NONE)
2712 mq->high_time = lowest;
2713 else
2714 mq->high_time = highest;
2715
2716 /* If there's only one stream of a given type, use the global high */
2717 if (group_count < 2)
2718 res = GST_CLOCK_STIME_NONE;
2719 else if (group_high == GST_CLOCK_STIME_NONE)
2720 res = group_low;
2721 else
2722 res = group_high;
2723
2724 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
2725 GST_LOG_OBJECT (mq,
2726 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
2727 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
2728 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
2729 GST_STIME_ARGS (lowest));
2730
2731 for (tmp = mq->queues; tmp; tmp = tmp->next) {
2732 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
2733 if (groupid == sq->groupid)
2734 sq->group_high_time = res;
2735 }
2736 }
2737
2738 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
2739 ((q)->max_size.format) <= (value))
2740
2741 /*
2742 * GstSingleQueue functions
2743 */
2744 static void
single_queue_overrun_cb(GstDataQueue * dq,GstSingleQueue * sq)2745 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2746 {
2747 GstMultiQueue *mq = sq->mqueue;
2748 GList *tmp;
2749 GstDataQueueSize size;
2750 gboolean filled = TRUE;
2751 gboolean empty_found = FALSE;
2752
2753 gst_data_queue_get_level (sq->queue, &size);
2754
2755 GST_LOG_OBJECT (mq,
2756 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
2757 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
2758 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
2759 sq->max_size.time);
2760
2761 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2762
2763 /* check if we reached the hard time/bytes limits;
2764 time limit is only taken into account for non-sparse streams */
2765 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
2766 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
2767 goto done;
2768 }
2769
2770 /* Search for empty queues */
2771 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2772 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2773
2774 if (oq == sq)
2775 continue;
2776
2777 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
2778 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
2779 continue;
2780 }
2781
2782 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
2783 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
2784 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
2785 empty_found = TRUE;
2786 break;
2787 }
2788 }
2789
2790 /* if hard limits are not reached then we allow one more buffer in the full
2791 * queue, but only if any of the other singelqueues are empty */
2792 if (empty_found) {
2793 if (IS_FILLED (sq, visible, size.visible)) {
2794 sq->max_size.visible = size.visible + 1;
2795 GST_DEBUG_OBJECT (mq,
2796 "Bumping single queue %d max visible to %d",
2797 sq->id, sq->max_size.visible);
2798 filled = FALSE;
2799 }
2800 }
2801
2802 done:
2803 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2804
2805 /* Overrun is always forwarded, since this is blocking the upstream element */
2806 if (filled) {
2807 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
2808 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
2809 }
2810 }
2811
2812 static void
single_queue_underrun_cb(GstDataQueue * dq,GstSingleQueue * sq)2813 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
2814 {
2815 gboolean empty = TRUE;
2816 GstMultiQueue *mq = sq->mqueue;
2817 GList *tmp;
2818
2819 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2820 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
2821 return;
2822 } else {
2823 GST_LOG_OBJECT (mq,
2824 "Single Queue %d is empty, Checking other single queues", sq->id);
2825 }
2826
2827 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2828 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2829 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
2830
2831 if (gst_data_queue_is_full (oq->queue)) {
2832 GstDataQueueSize size;
2833
2834 gst_data_queue_get_level (oq->queue, &size);
2835 if (IS_FILLED (oq, visible, size.visible)) {
2836 oq->max_size.visible = size.visible + 1;
2837 GST_DEBUG_OBJECT (mq,
2838 "queue %d is filled, bumping its max visible to %d", oq->id,
2839 oq->max_size.visible);
2840 gst_data_queue_limits_changed (oq->queue);
2841 }
2842 }
2843 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
2844 empty = FALSE;
2845 }
2846 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2847
2848 if (empty) {
2849 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
2850 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
2851 }
2852 }
2853
2854 static gboolean
single_queue_check_full(GstDataQueue * dataq,guint visible,guint bytes,guint64 time,GstSingleQueue * sq)2855 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
2856 guint64 time, GstSingleQueue * sq)
2857 {
2858 gboolean res;
2859 GstMultiQueue *mq = sq->mqueue;
2860
2861 GST_DEBUG_OBJECT (mq,
2862 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
2863 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
2864 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
2865
2866 /* we are always filled on EOS */
2867 if (sq->is_eos || sq->is_segment_done)
2868 return TRUE;
2869
2870 /* we never go past the max visible items unless we are in buffering mode */
2871 if (!mq->use_buffering && IS_FILLED (sq, visible, visible))
2872 return TRUE;
2873
2874 /* check time or bytes */
2875 res = IS_FILLED (sq, bytes, bytes);
2876 /* We only care about limits in time if we're not a sparse stream or
2877 * we're not syncing by running time */
2878 if (!sq->is_sparse || !mq->sync_by_running_time) {
2879 /* If unlinked, take into account the extra unlinked cache time */
2880 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
2881 if (sq->cur_time > mq->unlinked_cache_time)
2882 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
2883 else
2884 res = FALSE;
2885 } else
2886 res |= IS_FILLED (sq, time, sq->cur_time);
2887 }
2888
2889 return res;
2890 }
2891
2892 static void
gst_single_queue_flush_queue(GstSingleQueue * sq,gboolean full)2893 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
2894 {
2895 GstDataQueueItem *sitem;
2896 GstMultiQueueItem *mitem;
2897 gboolean was_flushing = FALSE;
2898
2899 while (!gst_data_queue_is_empty (sq->queue)) {
2900 GstMiniObject *data;
2901
2902 /* FIXME: If this fails here although the queue is not empty,
2903 * we're flushing... but we want to rescue all sticky
2904 * events nonetheless.
2905 */
2906 if (!gst_data_queue_pop (sq->queue, &sitem)) {
2907 was_flushing = TRUE;
2908 gst_data_queue_set_flushing (sq->queue, FALSE);
2909 continue;
2910 }
2911
2912 mitem = (GstMultiQueueItem *) sitem;
2913
2914 data = sitem->object;
2915
2916 if (!full && !mitem->is_query && GST_IS_EVENT (data)
2917 && GST_EVENT_IS_STICKY (data)
2918 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
2919 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
2920 gst_pad_store_sticky_event (sq->srcpad, GST_EVENT_CAST (data));
2921 }
2922
2923 sitem->destroy (sitem);
2924 }
2925
2926 gst_data_queue_flush (sq->queue);
2927 if (was_flushing)
2928 gst_data_queue_set_flushing (sq->queue, TRUE);
2929
2930 GST_MULTI_QUEUE_MUTEX_LOCK (sq->mqueue);
2931 update_buffering (sq->mqueue, sq);
2932 GST_MULTI_QUEUE_MUTEX_UNLOCK (sq->mqueue);
2933 gst_multi_queue_post_buffering (sq->mqueue);
2934 }
2935
2936 static void
gst_single_queue_free(GstSingleQueue * sq)2937 gst_single_queue_free (GstSingleQueue * sq)
2938 {
2939 /* DRAIN QUEUE */
2940 gst_data_queue_flush (sq->queue);
2941 g_object_unref (sq->queue);
2942 g_cond_clear (&sq->turn);
2943 g_cond_clear (&sq->query_handled);
2944 g_free (sq);
2945 }
2946
2947 static GstSingleQueue *
gst_single_queue_new(GstMultiQueue * mqueue,guint id)2948 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
2949 {
2950 GstSingleQueue *sq;
2951 GstMultiQueuePad *mqpad;
2952 GstPadTemplate *templ;
2953 gchar *name;
2954 GList *tmp;
2955 guint temp_id = (id == -1) ? 0 : id;
2956
2957 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
2958
2959 /* Find an unused queue ID, if possible the passed one */
2960 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
2961 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2962 /* This works because the IDs are sorted in ascending order */
2963 if (sq2->id == temp_id) {
2964 /* If this ID was requested by the caller return NULL,
2965 * otherwise just get us the next one */
2966 if (id == -1) {
2967 temp_id = sq2->id + 1;
2968 } else {
2969 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
2970 return NULL;
2971 }
2972 } else if (sq2->id > temp_id) {
2973 break;
2974 }
2975 }
2976
2977 sq = g_new0 (GstSingleQueue, 1);
2978 mqueue->nbqueues++;
2979 sq->id = temp_id;
2980 sq->groupid = DEFAULT_PAD_GROUP_ID;
2981 sq->group_high_time = GST_CLOCK_STIME_NONE;
2982
2983 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
2984 mqueue->queues_cookie++;
2985
2986 /* copy over max_size and extra_size so we don't need to take the lock
2987 * any longer when checking if the queue is full. */
2988 sq->max_size.visible = mqueue->max_size.visible;
2989 sq->max_size.bytes = mqueue->max_size.bytes;
2990 sq->max_size.time = mqueue->max_size.time;
2991
2992 sq->extra_size.visible = mqueue->extra_size.visible;
2993 sq->extra_size.bytes = mqueue->extra_size.bytes;
2994 sq->extra_size.time = mqueue->extra_size.time;
2995
2996 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
2997
2998 sq->mqueue = mqueue;
2999 sq->srcresult = GST_FLOW_FLUSHING;
3000 sq->pushed = FALSE;
3001 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3002 single_queue_check_full,
3003 (GstDataQueueFullCallback) single_queue_overrun_cb,
3004 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3005 sq->is_eos = FALSE;
3006 sq->is_sparse = FALSE;
3007 sq->flushing = FALSE;
3008 sq->active = FALSE;
3009 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3010 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3011
3012 sq->nextid = 0;
3013 sq->oldid = 0;
3014 sq->next_time = GST_CLOCK_STIME_NONE;
3015 sq->last_time = GST_CLOCK_STIME_NONE;
3016 g_cond_init (&sq->turn);
3017 g_cond_init (&sq->query_handled);
3018
3019 sq->sinktime = GST_CLOCK_STIME_NONE;
3020 sq->srctime = GST_CLOCK_STIME_NONE;
3021 sq->sink_tainted = TRUE;
3022 sq->src_tainted = TRUE;
3023
3024 name = g_strdup_printf ("sink_%u", sq->id);
3025 templ = gst_static_pad_template_get (&sinktemplate);
3026 sq->sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3027 "direction", templ->direction, "template", templ, NULL);
3028 gst_object_unref (templ);
3029 g_free (name);
3030
3031 mqpad = (GstMultiQueuePad *) sq->sinkpad;
3032 mqpad->sq = sq;
3033
3034 gst_pad_set_chain_function (sq->sinkpad,
3035 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3036 gst_pad_set_activatemode_function (sq->sinkpad,
3037 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3038 gst_pad_set_event_full_function (sq->sinkpad,
3039 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3040 gst_pad_set_query_function (sq->sinkpad,
3041 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3042 gst_pad_set_iterate_internal_links_function (sq->sinkpad,
3043 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3044 GST_OBJECT_FLAG_SET (sq->sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3045
3046 name = g_strdup_printf ("src_%u", sq->id);
3047 sq->srcpad = gst_pad_new_from_static_template (&srctemplate, name);
3048 g_free (name);
3049
3050 gst_pad_set_activatemode_function (sq->srcpad,
3051 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3052 gst_pad_set_event_function (sq->srcpad,
3053 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3054 gst_pad_set_query_function (sq->srcpad,
3055 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3056 gst_pad_set_iterate_internal_links_function (sq->srcpad,
3057 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3058 GST_OBJECT_FLAG_SET (sq->srcpad, GST_PAD_FLAG_PROXY_CAPS);
3059
3060 gst_pad_set_element_private (sq->sinkpad, (gpointer) sq);
3061 gst_pad_set_element_private (sq->srcpad, (gpointer) sq);
3062
3063 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3064
3065 /* only activate the pads when we are not in the NULL state
3066 * and add the pad under the state_lock to prevend state changes
3067 * between activating and adding */
3068 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3069 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3070 gst_pad_set_active (sq->srcpad, TRUE);
3071 gst_pad_set_active (sq->sinkpad, TRUE);
3072 }
3073 gst_element_add_pad (GST_ELEMENT (mqueue), sq->srcpad);
3074 gst_element_add_pad (GST_ELEMENT (mqueue), sq->sinkpad);
3075 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3076 gst_single_queue_start (mqueue, sq);
3077 }
3078 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3079
3080 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",
3081 sq->id);
3082
3083 return sq;
3084 }
3085