1 /* GStreamer
2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
17 * Boston, MA 02110-1301, USA.
18 */
19
20 /**
21 * SECTION:element-audiorate
22 * @title: audiorate
23 * @see_also: #GstVideoRate
24 *
25 * This element takes an incoming stream of timestamped raw audio frames and
26 * produces a perfect stream by inserting or dropping samples as needed.
27 *
28 * This operation may be of use to link to elements that require or otherwise
29 * implicitly assume a perfect stream as they do not store timestamps,
30 * but derive this by some means (e.g. bitrate for some AVI cases).
31 *
32 * The properties #GstAudioRate:in, #GstAudioRate:out, #GstAudioRate:add
33 * and #GstAudioRate:drop can be read to obtain information about number of
34 * input samples, output samples, dropped samples (i.e. the number of unused
35 * input samples) and inserted samples (i.e. the number of samples added to
36 * stream).
37 *
38 * When the #GstAudioRate:silent property is set to FALSE, a GObject property
39 * notification will be emitted whenever one of the #GstAudioRate:add or
40 * #GstAudioRate:drop values changes.
41 * This can potentially cause performance degradation.
42 * Note that property notification will happen from the streaming thread, so
43 * applications should be prepared for this.
44 *
45 * If the #GstAudioRate:tolerance property is non-zero, and an incoming buffer's
46 * timestamp deviates less than the property indicates from what would make a
47 * 'perfect time', then no samples will be added or dropped.
48 * Note that the output is still guaranteed to be a perfect stream, which means
49 * that the incoming data is then simply shifted (by less than the indicated
50 * tolerance) to a perfect time.
51 *
52 * ## Example pipelines
53 * |[
54 * gst-launch-1.0 -v autoaudiosrc ! audiorate ! audioconvert ! wavenc ! filesink location=alsa.wav
55 * ]|
56 * Capture audio from the sound card and turn it into a perfect stream
57 * for saving in a raw audio file.
58 * |[
59 * gst-launch-1.0 -v uridecodebin uri=file:///path/to/audio.file ! audiorate ! audioconvert ! wavenc ! filesink location=alsa.wav
60 * ]|
61 * Decodes an audio file and transforms it into a perfect stream for saving
62 * in a raw audio WAV file. Without the audio rate, the timing might not be
63 * preserved correctly in the WAV file in case the decoded stream is jittery
64 * or there are samples missing.
65 *
66 */
67
68 #ifdef HAVE_CONFIG_H
69 #include "config.h"
70 #endif
71
72 #include <string.h>
73 #include <stdlib.h>
74
75 #include "gstaudiorate.h"
76
77 #define GST_CAT_DEFAULT audio_rate_debug
78 GST_DEBUG_CATEGORY_STATIC (audio_rate_debug);
79
80 /* GstAudioRate signals and args */
81 enum
82 {
83 /* FILL ME */
84 LAST_SIGNAL
85 };
86
87 #define DEFAULT_SILENT TRUE
88 #define DEFAULT_TOLERANCE (40 * GST_MSECOND)
89 #define DEFAULT_SKIP_TO_FIRST FALSE
90
91 enum
92 {
93 PROP_0,
94 PROP_IN,
95 PROP_OUT,
96 PROP_ADD,
97 PROP_DROP,
98 PROP_SILENT,
99 PROP_TOLERANCE,
100 PROP_SKIP_TO_FIRST
101 };
102
103 static GstStaticPadTemplate gst_audio_rate_src_template =
104 GST_STATIC_PAD_TEMPLATE ("src",
105 GST_PAD_SRC,
106 GST_PAD_ALWAYS,
107 GST_STATIC_CAPS (GST_AUDIO_CAPS_MAKE (GST_AUDIO_FORMATS_ALL)
108 ", layout = (string) { interleaved, non-interleaved }")
109 );
110
111 static GstStaticPadTemplate gst_audio_rate_sink_template =
112 GST_STATIC_PAD_TEMPLATE ("sink",
113 GST_PAD_SINK,
114 GST_PAD_ALWAYS,
115 GST_STATIC_CAPS (GST_AUDIO_CAPS_MAKE (GST_AUDIO_FORMATS_ALL)
116 ", layout = (string) { interleaved, non-interleaved }")
117 );
118
119 static gboolean gst_audio_rate_sink_event (GstPad * pad, GstObject * parent,
120 GstEvent * event);
121 static gboolean gst_audio_rate_src_event (GstPad * pad, GstObject * parent,
122 GstEvent * event);
123 static GstFlowReturn gst_audio_rate_chain (GstPad * pad, GstObject * parent,
124 GstBuffer * buf);
125
126 static void gst_audio_rate_set_property (GObject * object,
127 guint prop_id, const GValue * value, GParamSpec * pspec);
128 static void gst_audio_rate_get_property (GObject * object,
129 guint prop_id, GValue * value, GParamSpec * pspec);
130
131 static GstStateChangeReturn gst_audio_rate_change_state (GstElement * element,
132 GstStateChange transition);
133
134 /*static guint gst_audio_rate_signals[LAST_SIGNAL] = { 0 }; */
135
136 static GParamSpec *pspec_drop = NULL;
137 static GParamSpec *pspec_add = NULL;
138
139 #define gst_audio_rate_parent_class parent_class
140 G_DEFINE_TYPE (GstAudioRate, gst_audio_rate, GST_TYPE_ELEMENT);
141
142 static void
gst_audio_rate_class_init(GstAudioRateClass * klass)143 gst_audio_rate_class_init (GstAudioRateClass * klass)
144 {
145 GObjectClass *object_class = G_OBJECT_CLASS (klass);
146 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
147
148 object_class->set_property = gst_audio_rate_set_property;
149 object_class->get_property = gst_audio_rate_get_property;
150
151 g_object_class_install_property (object_class, PROP_IN,
152 g_param_spec_uint64 ("in", "In",
153 "Number of input samples", 0, G_MAXUINT64, 0,
154 G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
155 g_object_class_install_property (object_class, PROP_OUT,
156 g_param_spec_uint64 ("out", "Out", "Number of output samples", 0,
157 G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
158 pspec_add = g_param_spec_uint64 ("add", "Add", "Number of added samples",
159 0, G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
160 g_object_class_install_property (object_class, PROP_ADD, pspec_add);
161 pspec_drop = g_param_spec_uint64 ("drop", "Drop", "Number of dropped samples",
162 0, G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
163 g_object_class_install_property (object_class, PROP_DROP, pspec_drop);
164 g_object_class_install_property (object_class, PROP_SILENT,
165 g_param_spec_boolean ("silent", "silent",
166 "Don't emit notify for dropped and duplicated frames", DEFAULT_SILENT,
167 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
168 /**
169 * GstAudioRate:tolerance:
170 *
171 * The difference between incoming timestamp and next timestamp must exceed
172 * the given value for audiorate to add or drop samples.
173 */
174 g_object_class_install_property (object_class, PROP_TOLERANCE,
175 g_param_spec_uint64 ("tolerance", "tolerance",
176 "Only act if timestamp jitter/imperfection exceeds indicated tolerance (ns)",
177 0, G_MAXUINT64, DEFAULT_TOLERANCE,
178 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
179
180 /**
181 * GstAudioRate:skip-to-first:
182 *
183 * Don't produce buffers before the first one we receive.
184 */
185 g_object_class_install_property (object_class, PROP_SKIP_TO_FIRST,
186 g_param_spec_boolean ("skip-to-first", "Skip to first buffer",
187 "Don't produce buffers before the first one we receive",
188 DEFAULT_SKIP_TO_FIRST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
189
190 gst_element_class_set_static_metadata (element_class,
191 "Audio rate adjuster", "Filter/Effect/Audio",
192 "Drops/duplicates/adjusts timestamps on audio samples to make a perfect stream",
193 "Wim Taymans <wim@fluendo.com>");
194
195 gst_element_class_add_static_pad_template (element_class,
196 &gst_audio_rate_sink_template);
197 gst_element_class_add_static_pad_template (element_class,
198 &gst_audio_rate_src_template);
199
200 element_class->change_state = gst_audio_rate_change_state;
201 }
202
203 static void
gst_audio_rate_reset(GstAudioRate * audiorate)204 gst_audio_rate_reset (GstAudioRate * audiorate)
205 {
206 audiorate->next_offset = -1;
207 audiorate->next_ts = -1;
208 audiorate->discont = TRUE;
209 gst_segment_init (&audiorate->sink_segment, GST_FORMAT_UNDEFINED);
210 gst_segment_init (&audiorate->src_segment, GST_FORMAT_TIME);
211
212 GST_DEBUG_OBJECT (audiorate, "handle reset");
213 }
214
215 static gboolean
gst_audio_rate_setcaps(GstAudioRate * audiorate,GstCaps * caps)216 gst_audio_rate_setcaps (GstAudioRate * audiorate, GstCaps * caps)
217 {
218 GstAudioInfo info;
219 gint prev_rate = 0;
220
221 if (!gst_audio_info_from_caps (&info, caps))
222 goto wrong_caps;
223
224 prev_rate = audiorate->info.rate;
225 audiorate->info = info;
226
227 if (audiorate->next_offset >= 0 && prev_rate > 0 && prev_rate != info.rate) {
228 GST_DEBUG_OBJECT (audiorate,
229 "rate changed from %d to %d", prev_rate, info.rate);
230
231 /* calculate next_offset based on new rate value */
232 audiorate->next_offset =
233 gst_util_uint64_scale_int_round (audiorate->next_ts,
234 info.rate, GST_SECOND);
235 }
236
237 return TRUE;
238
239 /* ERRORS */
240 wrong_caps:
241 {
242 GST_DEBUG_OBJECT (audiorate, "could not parse caps");
243 return FALSE;
244 }
245 }
246
247 static void
gst_audio_rate_init(GstAudioRate * audiorate)248 gst_audio_rate_init (GstAudioRate * audiorate)
249 {
250 audiorate->sinkpad =
251 gst_pad_new_from_static_template (&gst_audio_rate_sink_template, "sink");
252 gst_pad_set_event_function (audiorate->sinkpad, gst_audio_rate_sink_event);
253 gst_pad_set_chain_function (audiorate->sinkpad, gst_audio_rate_chain);
254 GST_PAD_SET_PROXY_CAPS (audiorate->sinkpad);
255 gst_element_add_pad (GST_ELEMENT (audiorate), audiorate->sinkpad);
256
257 audiorate->srcpad =
258 gst_pad_new_from_static_template (&gst_audio_rate_src_template, "src");
259 gst_pad_set_event_function (audiorate->srcpad, gst_audio_rate_src_event);
260 GST_PAD_SET_PROXY_CAPS (audiorate->srcpad);
261 gst_element_add_pad (GST_ELEMENT (audiorate), audiorate->srcpad);
262
263 audiorate->in = 0;
264 audiorate->out = 0;
265 audiorate->drop = 0;
266 audiorate->add = 0;
267 audiorate->silent = DEFAULT_SILENT;
268 audiorate->tolerance = DEFAULT_TOLERANCE;
269 }
270
271 static void
gst_audio_rate_fill_to_time(GstAudioRate * audiorate,GstClockTime time)272 gst_audio_rate_fill_to_time (GstAudioRate * audiorate, GstClockTime time)
273 {
274 GstBuffer *buf;
275
276 GST_DEBUG_OBJECT (audiorate, "next_ts: %" GST_TIME_FORMAT
277 ", filling to %" GST_TIME_FORMAT, GST_TIME_ARGS (audiorate->next_ts),
278 GST_TIME_ARGS (time));
279
280 if (!GST_CLOCK_TIME_IS_VALID (time) ||
281 !GST_CLOCK_TIME_IS_VALID (audiorate->next_ts))
282 return;
283
284 /* feed an empty buffer to chain with the given timestamp,
285 * it will take care of filling */
286 buf = gst_buffer_new ();
287 GST_BUFFER_TIMESTAMP (buf) = time;
288 gst_audio_rate_chain (audiorate->sinkpad, GST_OBJECT_CAST (audiorate), buf);
289 }
290
291 static gboolean
gst_audio_rate_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)292 gst_audio_rate_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
293 {
294 gboolean res;
295 GstAudioRate *audiorate;
296
297 audiorate = GST_AUDIO_RATE (parent);
298
299 switch (GST_EVENT_TYPE (event)) {
300 case GST_EVENT_CAPS:
301 {
302 GstCaps *caps;
303
304 gst_event_parse_caps (event, &caps);
305 if ((res = gst_audio_rate_setcaps (audiorate, caps))) {
306 res = gst_pad_push_event (audiorate->srcpad, event);
307 } else {
308 gst_event_unref (event);
309 }
310 break;
311 }
312 case GST_EVENT_FLUSH_STOP:
313 GST_DEBUG_OBJECT (audiorate, "handling FLUSH_STOP");
314 gst_audio_rate_reset (audiorate);
315 res = gst_pad_push_event (audiorate->srcpad, event);
316 break;
317 case GST_EVENT_SEGMENT:
318 {
319 gst_event_copy_segment (event, &audiorate->sink_segment);
320
321 GST_DEBUG_OBJECT (audiorate, "handle NEWSEGMENT");
322 #if 0
323 /* FIXME: bad things will likely happen if rate < 0 ... */
324 if (!update) {
325 /* a new segment starts. We need to figure out what will be the next
326 * sample offset. We mark the offsets as invalid so that the _chain
327 * function will perform this calculation. */
328 gst_audio_rate_fill_to_time (audiorate, audiorate->src_segment.stop);
329 #endif
330 audiorate->next_offset = -1;
331 audiorate->next_ts = -1;
332 #if 0
333 } else {
334 gst_audio_rate_fill_to_time (audiorate, audiorate->src_segment.start);
335 }
336 #endif
337
338 GST_DEBUG_OBJECT (audiorate, "updated segment: %" GST_SEGMENT_FORMAT,
339 &audiorate->sink_segment);
340
341 if (audiorate->sink_segment.format == GST_FORMAT_TIME) {
342 /* TIME formats can be copied to src and forwarded */
343 res = gst_pad_push_event (audiorate->srcpad, event);
344 gst_segment_copy_into (&audiorate->sink_segment,
345 &audiorate->src_segment);
346 } else {
347 /* other formats will be handled in the _chain function */
348 gst_event_unref (event);
349 res = TRUE;
350 }
351 break;
352 }
353 case GST_EVENT_EOS:
354 /* Fill segment until the end */
355 if (GST_CLOCK_TIME_IS_VALID (audiorate->src_segment.stop))
356 gst_audio_rate_fill_to_time (audiorate, audiorate->src_segment.stop);
357 res = gst_pad_push_event (audiorate->srcpad, event);
358 break;
359 case GST_EVENT_GAP:
360 {
361 /* Fill until end of gap */
362 GstClockTime timestamp, duration;
363 gst_event_parse_gap (event, ×tamp, &duration);
364 gst_event_unref (event);
365 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
366 if (GST_CLOCK_TIME_IS_VALID (duration))
367 timestamp += duration;
368 gst_audio_rate_fill_to_time (audiorate, timestamp);
369 }
370 res = TRUE;
371 break;
372 }
373 default:
374 res = gst_pad_event_default (pad, parent, event);
375 break;
376 }
377
378 return res;
379 }
380
381 static gboolean
gst_audio_rate_src_event(GstPad * pad,GstObject * parent,GstEvent * event)382 gst_audio_rate_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
383 {
384 gboolean res;
385 GstAudioRate *audiorate;
386
387 audiorate = GST_AUDIO_RATE (parent);
388
389 switch (GST_EVENT_TYPE (event)) {
390 default:
391 res = gst_pad_push_event (audiorate->sinkpad, event);
392 break;
393 }
394
395 return res;
396 }
397
398 static gboolean
gst_audio_rate_convert(GstAudioRate * audiorate,GstFormat src_fmt,guint64 src_val,GstFormat dest_fmt,guint64 * dest_val)399 gst_audio_rate_convert (GstAudioRate * audiorate,
400 GstFormat src_fmt, guint64 src_val, GstFormat dest_fmt, guint64 * dest_val)
401 {
402 return gst_audio_info_convert (&audiorate->info, src_fmt, src_val, dest_fmt,
403 (gint64 *) dest_val);
404 }
405
406
407 static gboolean
gst_audio_rate_convert_segments(GstAudioRate * audiorate)408 gst_audio_rate_convert_segments (GstAudioRate * audiorate)
409 {
410 GstFormat src_fmt, dst_fmt;
411
412 src_fmt = audiorate->sink_segment.format;
413 dst_fmt = audiorate->src_segment.format;
414
415 #define CONVERT_VAL(field) gst_audio_rate_convert (audiorate, \
416 src_fmt, audiorate->sink_segment.field, \
417 dst_fmt, &audiorate->src_segment.field);
418
419 audiorate->sink_segment.rate = audiorate->src_segment.rate;
420 audiorate->sink_segment.flags = audiorate->src_segment.flags;
421 audiorate->sink_segment.applied_rate = audiorate->src_segment.applied_rate;
422 CONVERT_VAL (start);
423 CONVERT_VAL (stop);
424 CONVERT_VAL (time);
425 CONVERT_VAL (base);
426 CONVERT_VAL (position);
427 #undef CONVERT_VAL
428
429 return TRUE;
430 }
431
432 static void
gst_audio_rate_notify_drop(GstAudioRate * audiorate)433 gst_audio_rate_notify_drop (GstAudioRate * audiorate)
434 {
435 g_object_notify_by_pspec ((GObject *) audiorate, pspec_drop);
436 }
437
438 static void
gst_audio_rate_notify_add(GstAudioRate * audiorate)439 gst_audio_rate_notify_add (GstAudioRate * audiorate)
440 {
441 g_object_notify_by_pspec ((GObject *) audiorate, pspec_add);
442 }
443
444 static GstFlowReturn
gst_audio_rate_chain(GstPad * pad,GstObject * parent,GstBuffer * buf)445 gst_audio_rate_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
446 {
447 GstAudioRate *audiorate;
448 GstClockTime in_time;
449 guint64 in_offset, in_offset_end, in_samples;
450 guint in_size;
451 GstFlowReturn ret = GST_FLOW_OK;
452 GstClockTimeDiff diff;
453 gint rate, bpf;
454 GstAudioMeta *meta;
455
456 audiorate = GST_AUDIO_RATE (parent);
457
458 rate = GST_AUDIO_INFO_RATE (&audiorate->info);
459 bpf = GST_AUDIO_INFO_BPF (&audiorate->info);
460
461 /* need to be negotiated now */
462 if (bpf == 0)
463 goto not_negotiated;
464
465 /* we have a new pending segment */
466 if (audiorate->next_offset == -1) {
467 gint64 pos;
468
469 /* update the TIME segment */
470 gst_audio_rate_convert_segments (audiorate);
471
472 /* first buffer, we are negotiated and we have a segment, calculate the
473 * current expected offsets based on the segment.start, which is the first
474 * media time of the segment and should match the media time of the first
475 * buffer in that segment, which is the offset expressed in DEFAULT units.
476 */
477 /* convert first timestamp of segment to sample position */
478 pos = gst_util_uint64_scale_int_round (audiorate->src_segment.start,
479 GST_AUDIO_INFO_RATE (&audiorate->info), GST_SECOND);
480
481 GST_DEBUG_OBJECT (audiorate, "resync to offset %" G_GINT64_FORMAT, pos);
482
483 /* resyncing is a discont */
484 audiorate->discont = TRUE;
485
486 audiorate->next_offset = pos;
487 audiorate->next_ts =
488 gst_util_uint64_scale_int_round (audiorate->next_offset, GST_SECOND,
489 GST_AUDIO_INFO_RATE (&audiorate->info));
490
491 if (audiorate->skip_to_first && GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
492 GST_DEBUG_OBJECT (audiorate, "but skipping to first buffer instead");
493 pos = gst_util_uint64_scale_int_round (GST_BUFFER_TIMESTAMP (buf),
494 GST_AUDIO_INFO_RATE (&audiorate->info), GST_SECOND);
495 GST_DEBUG_OBJECT (audiorate, "so resync to offset %" G_GINT64_FORMAT,
496 pos);
497 audiorate->next_offset = pos;
498 audiorate->next_ts = GST_BUFFER_TIMESTAMP (buf);
499 }
500 }
501
502 in_time = GST_BUFFER_TIMESTAMP (buf);
503 if (in_time == GST_CLOCK_TIME_NONE) {
504 GST_DEBUG_OBJECT (audiorate, "no timestamp, using expected next time");
505 in_time = audiorate->next_ts;
506 }
507
508 meta = gst_buffer_get_audio_meta (buf);
509 in_size = gst_buffer_get_size (buf);
510 in_samples = meta ? meta->samples : in_size / bpf;
511 audiorate->in += in_samples;
512
513 /* calculate the buffer offset */
514 in_offset = gst_util_uint64_scale_int_round (in_time, rate, GST_SECOND);
515 in_offset_end = in_offset + in_samples;
516
517 GST_LOG_OBJECT (audiorate,
518 "in_time:%" GST_TIME_FORMAT ", in_duration:%" GST_TIME_FORMAT
519 ", in_size:%u, in_offset:%" G_GUINT64_FORMAT ", in_offset_end:%"
520 G_GUINT64_FORMAT ", ->next_offset:%" G_GUINT64_FORMAT ", ->next_ts:%"
521 GST_TIME_FORMAT, GST_TIME_ARGS (in_time),
522 GST_TIME_ARGS (GST_FRAMES_TO_CLOCK_TIME (in_samples, rate)),
523 in_size, in_offset, in_offset_end, audiorate->next_offset,
524 GST_TIME_ARGS (audiorate->next_ts));
525
526 diff = in_time - audiorate->next_ts;
527 if (diff <= (GstClockTimeDiff) audiorate->tolerance &&
528 diff >= (GstClockTimeDiff) - audiorate->tolerance) {
529 /* buffer time close enough to expected time,
530 * so produce a perfect stream by simply 'shifting'
531 * it to next ts and offset and sending */
532 GST_LOG_OBJECT (audiorate, "within tolerance %" GST_TIME_FORMAT,
533 GST_TIME_ARGS (audiorate->tolerance));
534 /* The outgoing buffer's offset will be set to ->next_offset, we also
535 * need to adjust the offset_end value accordingly */
536 in_offset_end = audiorate->next_offset + in_samples;
537 audiorate->out += in_samples;
538 goto send;
539 }
540
541 /* do we need to insert samples */
542 if (in_offset > audiorate->next_offset) {
543 GstBuffer *fill;
544 gint fillsize;
545 guint64 fillsamples;
546
547 /* We don't want to allocate a single unreasonably huge buffer - it might
548 be hundreds of megabytes. So, limit each output buffer to one second of
549 audio */
550 fillsamples = in_offset - audiorate->next_offset;
551
552 while (fillsamples > 0) {
553 guint64 cursamples = MIN (fillsamples, rate);
554 GstMapInfo fillmap;
555
556 fillsamples -= cursamples;
557 fillsize = cursamples * bpf;
558
559 fill = gst_buffer_new_and_alloc (fillsize);
560
561 gst_buffer_map (fill, &fillmap, GST_MAP_WRITE);
562 gst_audio_format_fill_silence (audiorate->info.finfo, fillmap.data,
563 fillmap.size);
564 gst_buffer_unmap (fill, &fillmap);
565
566 if (audiorate->info.layout == GST_AUDIO_LAYOUT_NON_INTERLEAVED) {
567 gst_buffer_add_audio_meta (fill, &audiorate->info, cursamples, NULL);
568 }
569
570 GST_DEBUG_OBJECT (audiorate, "inserting %" G_GUINT64_FORMAT " samples",
571 cursamples);
572
573 GST_BUFFER_OFFSET (fill) = audiorate->next_offset;
574 audiorate->next_offset += cursamples;
575 GST_BUFFER_OFFSET_END (fill) = audiorate->next_offset;
576
577 /* Use next timestamp, then calculate following timestamp based on
578 * offset to get duration. Necessary complexity to get 'perfect'
579 * streams */
580 GST_BUFFER_TIMESTAMP (fill) = audiorate->next_ts;
581 audiorate->next_ts =
582 gst_util_uint64_scale_int_round (audiorate->next_offset, GST_SECOND,
583 rate);
584 GST_BUFFER_DURATION (fill) =
585 audiorate->next_ts - GST_BUFFER_TIMESTAMP (fill);
586
587 /* we created this buffer to fill a gap */
588 GST_BUFFER_FLAG_SET (fill, GST_BUFFER_FLAG_GAP);
589 /* set discont if it's pending, this is mostly done for the first buffer
590 * and after a flushing seek */
591 if (audiorate->discont) {
592 GST_BUFFER_FLAG_SET (fill, GST_BUFFER_FLAG_DISCONT);
593 audiorate->discont = FALSE;
594 }
595
596 fill = gst_audio_buffer_clip (fill, &audiorate->src_segment, rate, bpf);
597 if (fill)
598 ret = gst_pad_push (audiorate->srcpad, fill);
599
600 if (ret != GST_FLOW_OK)
601 goto beach;
602 audiorate->out += cursamples;
603 audiorate->add += cursamples;
604
605 if (!audiorate->silent)
606 gst_audio_rate_notify_add (audiorate);
607 }
608
609 } else if (in_offset < audiorate->next_offset) {
610 /* need to remove samples */
611 if (in_offset_end <= audiorate->next_offset) {
612 guint64 drop = in_samples;
613
614 audiorate->drop += drop;
615
616 GST_DEBUG_OBJECT (audiorate, "dropping %" G_GUINT64_FORMAT " samples",
617 drop);
618
619 /* we can drop the buffer completely */
620 gst_buffer_unref (buf);
621 buf = NULL;
622
623 if (!audiorate->silent)
624 gst_audio_rate_notify_drop (audiorate);
625
626 goto beach;
627 } else {
628 guint64 truncsamples, leftsamples;
629
630 /* truncate buffer */
631 truncsamples = audiorate->next_offset - in_offset;
632 leftsamples = in_samples - truncsamples;
633
634 buf = gst_audio_buffer_truncate (buf, bpf, truncsamples, leftsamples);
635
636 audiorate->drop += truncsamples;
637 audiorate->out += leftsamples;
638 GST_DEBUG_OBJECT (audiorate, "truncating %" G_GUINT64_FORMAT " samples",
639 truncsamples);
640
641 if (!audiorate->silent)
642 gst_audio_rate_notify_drop (audiorate);
643 }
644 }
645
646 send:
647 if (gst_buffer_get_size (buf) == 0)
648 goto beach;
649
650 /* Now calculate parameters for whichever buffer (either the original
651 * or truncated one) we're pushing. */
652 GST_BUFFER_OFFSET (buf) = audiorate->next_offset;
653 GST_BUFFER_OFFSET_END (buf) = in_offset_end;
654
655 GST_BUFFER_TIMESTAMP (buf) = audiorate->next_ts;
656 audiorate->next_ts = gst_util_uint64_scale_int_round (in_offset_end,
657 GST_SECOND, rate);
658 GST_BUFFER_DURATION (buf) = audiorate->next_ts - GST_BUFFER_TIMESTAMP (buf);
659
660 if (audiorate->discont) {
661 /* we need to output a discont buffer, do so now */
662 GST_DEBUG_OBJECT (audiorate, "marking DISCONT on output buffer");
663 buf = gst_buffer_make_writable (buf);
664 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
665 audiorate->discont = FALSE;
666 } else if (GST_BUFFER_IS_DISCONT (buf)) {
667 /* else we make everything continuous so we can safely remove the DISCONT
668 * flag from the buffer if there was one */
669 GST_DEBUG_OBJECT (audiorate, "removing DISCONT from buffer");
670 buf = gst_buffer_make_writable (buf);
671 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
672 }
673
674 buf = gst_audio_buffer_clip (buf, &audiorate->src_segment, rate, bpf);
675 if (buf) {
676 /* set last_stop on segment */
677 audiorate->src_segment.position =
678 GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
679
680 ret = gst_pad_push (audiorate->srcpad, buf);
681 }
682 buf = NULL;
683
684 audiorate->next_offset = in_offset_end;
685 beach:
686
687 if (buf)
688 gst_buffer_unref (buf);
689
690 return ret;
691
692 /* ERRORS */
693 not_negotiated:
694 {
695 gst_buffer_unref (buf);
696
697 GST_ELEMENT_ERROR (audiorate, STREAM, FORMAT,
698 (NULL), ("pipeline error, format was not negotiated"));
699 return GST_FLOW_NOT_NEGOTIATED;
700 }
701 }
702
703 static void
gst_audio_rate_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)704 gst_audio_rate_set_property (GObject * object,
705 guint prop_id, const GValue * value, GParamSpec * pspec)
706 {
707 GstAudioRate *audiorate = GST_AUDIO_RATE (object);
708
709 switch (prop_id) {
710 case PROP_SILENT:
711 audiorate->silent = g_value_get_boolean (value);
712 break;
713 case PROP_TOLERANCE:
714 audiorate->tolerance = g_value_get_uint64 (value);
715 break;
716 case PROP_SKIP_TO_FIRST:
717 audiorate->skip_to_first = g_value_get_boolean (value);
718 break;
719 default:
720 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
721 break;
722 }
723 }
724
725 static void
gst_audio_rate_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)726 gst_audio_rate_get_property (GObject * object,
727 guint prop_id, GValue * value, GParamSpec * pspec)
728 {
729 GstAudioRate *audiorate = GST_AUDIO_RATE (object);
730
731 switch (prop_id) {
732 case PROP_IN:
733 g_value_set_uint64 (value, audiorate->in);
734 break;
735 case PROP_OUT:
736 g_value_set_uint64 (value, audiorate->out);
737 break;
738 case PROP_ADD:
739 g_value_set_uint64 (value, audiorate->add);
740 break;
741 case PROP_DROP:
742 g_value_set_uint64 (value, audiorate->drop);
743 break;
744 case PROP_SILENT:
745 g_value_set_boolean (value, audiorate->silent);
746 break;
747 case PROP_TOLERANCE:
748 g_value_set_uint64 (value, audiorate->tolerance);
749 break;
750 case PROP_SKIP_TO_FIRST:
751 g_value_set_boolean (value, audiorate->skip_to_first);
752 break;
753 default:
754 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
755 break;
756 }
757 }
758
759 static GstStateChangeReturn
gst_audio_rate_change_state(GstElement * element,GstStateChange transition)760 gst_audio_rate_change_state (GstElement * element, GstStateChange transition)
761 {
762 GstAudioRate *audiorate = GST_AUDIO_RATE (element);
763
764 switch (transition) {
765 case GST_STATE_CHANGE_PAUSED_TO_READY:
766 break;
767 case GST_STATE_CHANGE_READY_TO_PAUSED:
768 audiorate->in = 0;
769 audiorate->out = 0;
770 audiorate->drop = 0;
771 audiorate->add = 0;
772 gst_audio_info_init (&audiorate->info);
773 gst_audio_rate_reset (audiorate);
774 break;
775 default:
776 break;
777 }
778
779 return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
780 }
781
782 static gboolean
plugin_init(GstPlugin * plugin)783 plugin_init (GstPlugin * plugin)
784 {
785 GST_DEBUG_CATEGORY_INIT (audio_rate_debug, "audiorate", 0,
786 "AudioRate stream fixer");
787
788 return gst_element_register (plugin, "audiorate", GST_RANK_NONE,
789 GST_TYPE_AUDIO_RATE);
790 }
791
792 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
793 GST_VERSION_MINOR,
794 audiorate,
795 "Adjusts audio frames",
796 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
797