1 /*
2  * WebRTC Audio Processing Elements
3  *
4  *  Copyright 2016 Collabora Ltd
5  *    @author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
20  *
21  */
22 
23 /**
24  * SECTION:element-webrtcdsp
25  * @short_description: Audio Filter using WebRTC Audio Processing library
26  *
27  * A voice enhancement filter based on WebRTC Audio Processing library. This
28  * library provides a whide variety of enhancement algorithms. This element
29  * tries to enable as much as possible. The currently enabled enhancements are
30  * High Pass Filter, Echo Canceller, Noise Suppression, Automatic Gain Control,
31  * and some extended filters.
32  *
33  * While webrtcdsp element can be used alone, there is an exception for the
34  * echo canceller. The audio canceller need to be aware of the far end streams
35  * that are played to loud speakers. For this, you must place a webrtcechoprobe
36  * element at that far end. Note that the sample rate must match between
37  * webrtcdsp and the webrtechoprobe. Though, the number of channels can differ.
38  * The probe is found by the DSP element using it's object name. By default,
39  * webrtcdsp looks for webrtcechoprobe0, which means it just work if you have
40  * a single probe and DSP.
41  *
42  * The probe can only be used within the same top level GstPipeline.
43  * Additonally, to simplify the code, the probe element must be created
44  * before the DSP sink pad is activated. It does not need to be in any
45  * particular state and does not even need to be added to the pipeline yet.
46  *
47  * # Example launch line
48  *
49  * As a conveniance, the echo canceller can be tested using an echo loop. In
50  * this configuration, one would expect a single echo to be heard.
51  *
52  * |[
53  * gst-launch-1.0 pulsesrc ! webrtcdsp ! webrtcechoprobe ! pulsesink
54  * ]|
55  *
56  * In real environment, you'll place the probe before the playback, but only
57  * process the far end streams. The DSP should be placed as close as possible
58  * to the audio capture. The following pipeline is astracted and does not
59  * represent a real pipeline.
60  *
61  * |[
62  * gst-launch-1.0 far-end-src ! audio/x-raw,rate=48000 ! webrtcechoprobe ! pulsesink \
63  *                pulsesrc ! audio/x-raw,rate=48000 ! webrtcdsp ! far-end-sink
64  * ]|
65  */
66 
67 #ifdef HAVE_CONFIG_H
68 #include "config.h"
69 #endif
70 
71 #include "gstwebrtcdsp.h"
72 #include "gstwebrtcechoprobe.h"
73 
74 #include <webrtc/modules/audio_processing/include/audio_processing.h>
75 #include <webrtc/modules/interface/module_common_types.h>
76 #include <webrtc/system_wrappers/include/trace.h>
77 
78 GST_DEBUG_CATEGORY (webrtc_dsp_debug);
79 #define GST_CAT_DEFAULT (webrtc_dsp_debug)
80 
81 #define DEFAULT_TARGET_LEVEL_DBFS 3
82 #define DEFAULT_COMPRESSION_GAIN_DB 9
83 #define DEFAULT_STARTUP_MIN_VOLUME 12
84 #define DEFAULT_LIMITER TRUE
85 #define DEFAULT_GAIN_CONTROL_MODE webrtc::GainControl::kAdaptiveDigital
86 #define DEFAULT_VOICE_DETECTION FALSE
87 #define DEFAULT_VOICE_DETECTION_FRAME_SIZE_MS 10
88 #define DEFAULT_VOICE_DETECTION_LIKELIHOOD webrtc::VoiceDetection::kLowLikelihood
89 
90 static GstStaticPadTemplate gst_webrtc_dsp_sink_template =
91 GST_STATIC_PAD_TEMPLATE ("sink",
92     GST_PAD_SINK,
93     GST_PAD_ALWAYS,
94     GST_STATIC_CAPS ("audio/x-raw, "
95         "format = (string) " GST_AUDIO_NE (S16) ", "
96         "layout = (string) interleaved, "
97         "rate = (int) { 48000, 32000, 16000, 8000 }, "
98         "channels = (int) [1, MAX];"
99         "audio/x-raw, "
100         "format = (string) " GST_AUDIO_NE (F32) ", "
101         "layout = (string) non-interleaved, "
102         "rate = (int) { 48000, 32000, 16000, 8000 }, "
103         "channels = (int) [1, MAX]")
104     );
105 
106 static GstStaticPadTemplate gst_webrtc_dsp_src_template =
107 GST_STATIC_PAD_TEMPLATE ("src",
108     GST_PAD_SRC,
109     GST_PAD_ALWAYS,
110     GST_STATIC_CAPS ("audio/x-raw, "
111         "format = (string) " GST_AUDIO_NE (S16) ", "
112         "layout = (string) interleaved, "
113         "rate = (int) { 48000, 32000, 16000, 8000 }, "
114         "channels = (int) [1, MAX];"
115         "audio/x-raw, "
116         "format = (string) " GST_AUDIO_NE (F32) ", "
117         "layout = (string) non-interleaved, "
118         "rate = (int) { 48000, 32000, 16000, 8000 }, "
119         "channels = (int) [1, MAX]")
120     );
121 
122 typedef webrtc::EchoCancellation::SuppressionLevel GstWebrtcEchoSuppressionLevel;
123 #define GST_TYPE_WEBRTC_ECHO_SUPPRESSION_LEVEL \
124     (gst_webrtc_echo_suppression_level_get_type ())
125 static GType
gst_webrtc_echo_suppression_level_get_type(void)126 gst_webrtc_echo_suppression_level_get_type (void)
127 {
128   static GType suppression_level_type = 0;
129   static const GEnumValue level_types[] = {
130     {webrtc::EchoCancellation::kLowSuppression, "Low Suppression", "low"},
131     {webrtc::EchoCancellation::kModerateSuppression,
132       "Moderate Suppression", "moderate"},
133     {webrtc::EchoCancellation::kHighSuppression, "high Suppression", "high"},
134     {0, NULL, NULL}
135   };
136 
137   if (!suppression_level_type) {
138     suppression_level_type =
139         g_enum_register_static ("GstWebrtcEchoSuppressionLevel", level_types);
140   }
141   return suppression_level_type;
142 }
143 
144 typedef webrtc::NoiseSuppression::Level GstWebrtcNoiseSuppressionLevel;
145 #define GST_TYPE_WEBRTC_NOISE_SUPPRESSION_LEVEL \
146     (gst_webrtc_noise_suppression_level_get_type ())
147 static GType
gst_webrtc_noise_suppression_level_get_type(void)148 gst_webrtc_noise_suppression_level_get_type (void)
149 {
150   static GType suppression_level_type = 0;
151   static const GEnumValue level_types[] = {
152     {webrtc::NoiseSuppression::kLow, "Low Suppression", "low"},
153     {webrtc::NoiseSuppression::kModerate, "Moderate Suppression", "moderate"},
154     {webrtc::NoiseSuppression::kHigh, "High Suppression", "high"},
155     {webrtc::NoiseSuppression::kVeryHigh, "Very High Suppression",
156       "very-high"},
157     {0, NULL, NULL}
158   };
159 
160   if (!suppression_level_type) {
161     suppression_level_type =
162         g_enum_register_static ("GstWebrtcNoiseSuppressionLevel", level_types);
163   }
164   return suppression_level_type;
165 }
166 
167 typedef webrtc::GainControl::Mode GstWebrtcGainControlMode;
168 #define GST_TYPE_WEBRTC_GAIN_CONTROL_MODE \
169     (gst_webrtc_gain_control_mode_get_type ())
170 static GType
gst_webrtc_gain_control_mode_get_type(void)171 gst_webrtc_gain_control_mode_get_type (void)
172 {
173   static GType gain_control_mode_type = 0;
174   static const GEnumValue mode_types[] = {
175     {webrtc::GainControl::kAdaptiveDigital, "Adaptive Digital", "adaptive-digital"},
176     {webrtc::GainControl::kFixedDigital, "Fixed Digital", "fixed-digital"},
177     {0, NULL, NULL}
178   };
179 
180   if (!gain_control_mode_type) {
181     gain_control_mode_type =
182         g_enum_register_static ("GstWebrtcGainControlMode", mode_types);
183   }
184   return gain_control_mode_type;
185 }
186 
187 typedef webrtc::VoiceDetection::Likelihood GstWebrtcVoiceDetectionLikelihood;
188 #define GST_TYPE_WEBRTC_VOICE_DETECTION_LIKELIHOOD \
189     (gst_webrtc_voice_detection_likelihood_get_type ())
190 static GType
gst_webrtc_voice_detection_likelihood_get_type(void)191 gst_webrtc_voice_detection_likelihood_get_type (void)
192 {
193   static GType likelihood_type = 0;
194   static const GEnumValue likelihood_types[] = {
195     {webrtc::VoiceDetection::kVeryLowLikelihood, "Very Low Likelihood", "very-low"},
196     {webrtc::VoiceDetection::kLowLikelihood, "Low Likelihood", "low"},
197     {webrtc::VoiceDetection::kModerateLikelihood, "Moderate Likelihood", "moderate"},
198     {webrtc::VoiceDetection::kHighLikelihood, "High Likelihood", "high"},
199     {0, NULL, NULL}
200   };
201 
202   if (!likelihood_type) {
203     likelihood_type =
204         g_enum_register_static ("GstWebrtcVoiceDetectionLikelihood", likelihood_types);
205   }
206   return likelihood_type;
207 }
208 
209 enum
210 {
211   PROP_0,
212   PROP_PROBE,
213   PROP_HIGH_PASS_FILTER,
214   PROP_ECHO_CANCEL,
215   PROP_ECHO_SUPPRESSION_LEVEL,
216   PROP_NOISE_SUPPRESSION,
217   PROP_NOISE_SUPPRESSION_LEVEL,
218   PROP_GAIN_CONTROL,
219   PROP_EXPERIMENTAL_AGC,
220   PROP_EXTENDED_FILTER,
221   PROP_DELAY_AGNOSTIC,
222   PROP_TARGET_LEVEL_DBFS,
223   PROP_COMPRESSION_GAIN_DB,
224   PROP_STARTUP_MIN_VOLUME,
225   PROP_LIMITER,
226   PROP_GAIN_CONTROL_MODE,
227   PROP_VOICE_DETECTION,
228   PROP_VOICE_DETECTION_FRAME_SIZE_MS,
229   PROP_VOICE_DETECTION_LIKELIHOOD,
230 };
231 
232 /**
233  * GstWebrtcDSP:
234  *
235  * The adder object structure.
236  */
237 struct _GstWebrtcDsp
238 {
239   GstAudioFilter element;
240 
241   /* Protected by the object lock */
242   GstAudioInfo info;
243   gboolean interleaved;
244   guint period_size;
245   guint period_samples;
246   gboolean stream_has_voice;
247 
248   /* Protected by the stream lock */
249   GstAdapter *adapter;
250   GstPlanarAudioAdapter *padapter;
251   webrtc::AudioProcessing * apm;
252 
253   /* Protected by the object lock */
254   gchar *probe_name;
255   GstWebrtcEchoProbe *probe;
256 
257   /* Properties */
258   gboolean high_pass_filter;
259   gboolean echo_cancel;
260   webrtc::EchoCancellation::SuppressionLevel echo_suppression_level;
261   gboolean noise_suppression;
262   webrtc::NoiseSuppression::Level noise_suppression_level;
263   gboolean gain_control;
264   gboolean experimental_agc;
265   gboolean extended_filter;
266   gboolean delay_agnostic;
267   gint target_level_dbfs;
268   gint compression_gain_db;
269   gint startup_min_volume;
270   gboolean limiter;
271   webrtc::GainControl::Mode gain_control_mode;
272   gboolean voice_detection;
273   gint voice_detection_frame_size_ms;
274   webrtc::VoiceDetection::Likelihood voice_detection_likelihood;
275 };
276 
277 G_DEFINE_TYPE (GstWebrtcDsp, gst_webrtc_dsp, GST_TYPE_AUDIO_FILTER);
278 
279 static const gchar *
webrtc_error_to_string(gint err)280 webrtc_error_to_string (gint err)
281 {
282   const gchar *str = "unkown error";
283 
284   switch (err) {
285     case webrtc::AudioProcessing::kNoError:
286       str = "success";
287       break;
288     case webrtc::AudioProcessing::kUnspecifiedError:
289       str = "unspecified error";
290       break;
291     case webrtc::AudioProcessing::kCreationFailedError:
292       str = "creating failed";
293       break;
294     case webrtc::AudioProcessing::kUnsupportedComponentError:
295       str = "unsupported component";
296       break;
297     case webrtc::AudioProcessing::kUnsupportedFunctionError:
298       str = "unsupported function";
299       break;
300     case webrtc::AudioProcessing::kNullPointerError:
301       str = "null pointer";
302       break;
303     case webrtc::AudioProcessing::kBadParameterError:
304       str = "bad parameter";
305       break;
306     case webrtc::AudioProcessing::kBadSampleRateError:
307       str = "bad sample rate";
308       break;
309     case webrtc::AudioProcessing::kBadDataLengthError:
310       str = "bad data length";
311       break;
312     case webrtc::AudioProcessing::kBadNumberChannelsError:
313       str = "bad number of channels";
314       break;
315     case webrtc::AudioProcessing::kFileError:
316       str = "file IO error";
317       break;
318     case webrtc::AudioProcessing::kStreamParameterNotSetError:
319       str = "stream parameter not set";
320       break;
321     case webrtc::AudioProcessing::kNotEnabledError:
322       str = "not enabled";
323       break;
324     default:
325       break;
326   }
327 
328   return str;
329 }
330 
331 static GstBuffer *
gst_webrtc_dsp_take_buffer(GstWebrtcDsp * self)332 gst_webrtc_dsp_take_buffer (GstWebrtcDsp * self)
333 {
334   GstBuffer *buffer;
335   GstClockTime timestamp;
336   guint64 distance;
337   gboolean at_discont;
338 
339   if (self->interleaved) {
340     timestamp = gst_adapter_prev_pts (self->adapter, &distance);
341     distance /= self->info.bpf;
342   } else {
343     timestamp = gst_planar_audio_adapter_prev_pts (self->padapter, &distance);
344   }
345 
346   timestamp += gst_util_uint64_scale_int (distance, GST_SECOND, self->info.rate);
347 
348   if (self->interleaved) {
349     buffer = gst_adapter_take_buffer (self->adapter, self->period_size);
350     at_discont = (gst_adapter_pts_at_discont (self->adapter) == timestamp);
351   } else {
352     buffer = gst_planar_audio_adapter_take_buffer (self->padapter,
353         self->period_samples, GST_MAP_READWRITE);
354     at_discont =
355         (gst_planar_audio_adapter_pts_at_discont (self->padapter) == timestamp);
356   }
357 
358   GST_BUFFER_PTS (buffer) = timestamp;
359   GST_BUFFER_DURATION (buffer) = 10 * GST_MSECOND;
360 
361   if (at_discont && distance == 0) {
362     GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
363   } else {
364     GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DISCONT);
365   }
366 
367   return buffer;
368 }
369 
370 static GstFlowReturn
gst_webrtc_dsp_analyze_reverse_stream(GstWebrtcDsp * self,GstClockTime rec_time)371 gst_webrtc_dsp_analyze_reverse_stream (GstWebrtcDsp * self,
372     GstClockTime rec_time)
373 {
374   GstWebrtcEchoProbe *probe = NULL;
375   webrtc::AudioProcessing * apm;
376   webrtc::AudioFrame frame;
377   GstBuffer *buf = NULL;
378   GstFlowReturn ret = GST_FLOW_OK;
379   gint err, delay;
380 
381   GST_OBJECT_LOCK (self);
382   if (self->echo_cancel)
383     probe = GST_WEBRTC_ECHO_PROBE (g_object_ref (self->probe));
384   GST_OBJECT_UNLOCK (self);
385 
386   /* If echo cancellation is disabled */
387   if (!probe)
388     return GST_FLOW_OK;
389 
390   apm = self->apm;
391 
392   if (self->delay_agnostic)
393     rec_time = GST_CLOCK_TIME_NONE;
394 
395 again:
396   delay = gst_webrtc_echo_probe_read (probe, rec_time, (gpointer) &frame, &buf);
397   apm->set_stream_delay_ms (delay);
398 
399   if (delay < 0)
400     goto done;
401 
402   if (frame.sample_rate_hz_ != self->info.rate) {
403     GST_ELEMENT_ERROR (self, STREAM, FORMAT,
404         ("Echo Probe has rate %i , while the DSP is running at rate %i,"
405          " use a caps filter to ensure those are the same.",
406          frame.sample_rate_hz_, self->info.rate), (NULL));
407     ret = GST_FLOW_ERROR;
408     goto done;
409   }
410 
411   if (buf) {
412     webrtc::StreamConfig config (frame.sample_rate_hz_, frame.num_channels_,
413         false);
414     GstAudioBuffer abuf;
415     float * const * data;
416 
417     gst_audio_buffer_map (&abuf, &self->info, buf, GST_MAP_READWRITE);
418     data = (float * const *) abuf.planes;
419     if ((err = apm->ProcessReverseStream (data, config, config, data)) < 0)
420       GST_WARNING_OBJECT (self, "Reverse stream analyses failed: %s.",
421           webrtc_error_to_string (err));
422     gst_audio_buffer_unmap (&abuf);
423     gst_buffer_replace (&buf, NULL);
424   } else {
425     if ((err = apm->AnalyzeReverseStream (&frame)) < 0)
426       GST_WARNING_OBJECT (self, "Reverse stream analyses failed: %s.",
427           webrtc_error_to_string (err));
428   }
429 
430   if (self->delay_agnostic)
431       goto again;
432 
433 done:
434   gst_object_unref (probe);
435   gst_buffer_replace (&buf, NULL);
436 
437   return ret;
438 }
439 
440 static void
gst_webrtc_vad_post_message(GstWebrtcDsp * self,GstClockTime timestamp,gboolean stream_has_voice)441 gst_webrtc_vad_post_message (GstWebrtcDsp *self, GstClockTime timestamp,
442     gboolean stream_has_voice)
443 {
444   GstBaseTransform *trans = GST_BASE_TRANSFORM_CAST (self);
445   GstStructure *s;
446   GstClockTime stream_time;
447 
448   stream_time = gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME,
449       timestamp);
450 
451   s = gst_structure_new ("voice-activity",
452       "stream-time", G_TYPE_UINT64, stream_time,
453       "stream-has-voice", G_TYPE_BOOLEAN, stream_has_voice, NULL);
454 
455   GST_LOG_OBJECT (self, "Posting voice activity message, stream %s voice",
456       stream_has_voice ? "now has" : "no longer has");
457 
458   gst_element_post_message (GST_ELEMENT (self),
459       gst_message_new_element (GST_OBJECT (self), s));
460 }
461 
462 static GstFlowReturn
gst_webrtc_dsp_process_stream(GstWebrtcDsp * self,GstBuffer * buffer)463 gst_webrtc_dsp_process_stream (GstWebrtcDsp * self,
464     GstBuffer * buffer)
465 {
466   GstAudioBuffer abuf;
467   webrtc::AudioProcessing * apm = self->apm;
468   gint err;
469 
470   if (!gst_audio_buffer_map (&abuf, &self->info, buffer,
471           (GstMapFlags) GST_MAP_READWRITE)) {
472     gst_buffer_unref (buffer);
473     return GST_FLOW_ERROR;
474   }
475 
476   if (self->interleaved) {
477     webrtc::AudioFrame frame;
478     frame.num_channels_ = self->info.channels;
479     frame.sample_rate_hz_ = self->info.rate;
480     frame.samples_per_channel_ = self->period_samples;
481 
482     memcpy (frame.data_, abuf.planes[0], self->period_size);
483     err = apm->ProcessStream (&frame);
484     if (err >= 0)
485       memcpy (abuf.planes[0], frame.data_, self->period_size);
486   } else {
487     float * const * data = (float * const *) abuf.planes;
488     webrtc::StreamConfig config (self->info.rate, self->info.channels, false);
489 
490     err = apm->ProcessStream (data, config, config, data);
491   }
492 
493   if (err < 0) {
494     GST_WARNING_OBJECT (self, "Failed to filter the audio: %s.",
495         webrtc_error_to_string (err));
496   } else {
497     if (self->voice_detection) {
498       gboolean stream_has_voice = apm->voice_detection ()->stream_has_voice ();
499 
500       if (stream_has_voice != self->stream_has_voice)
501         gst_webrtc_vad_post_message (self, GST_BUFFER_PTS (buffer), stream_has_voice);
502 
503       self->stream_has_voice = stream_has_voice;
504     }
505   }
506 
507   gst_audio_buffer_unmap (&abuf);
508 
509   return GST_FLOW_OK;
510 }
511 
512 static GstFlowReturn
gst_webrtc_dsp_submit_input_buffer(GstBaseTransform * btrans,gboolean is_discont,GstBuffer * buffer)513 gst_webrtc_dsp_submit_input_buffer (GstBaseTransform * btrans,
514     gboolean is_discont, GstBuffer * buffer)
515 {
516   GstWebrtcDsp *self = GST_WEBRTC_DSP (btrans);
517 
518   buffer = gst_buffer_make_writable (buffer);
519   GST_BUFFER_PTS (buffer) = gst_segment_to_running_time (&btrans->segment,
520       GST_FORMAT_TIME, GST_BUFFER_PTS (buffer));
521 
522   if (is_discont) {
523     GST_DEBUG_OBJECT (self,
524         "Received discont, clearing adapter.");
525     if (self->interleaved)
526       gst_adapter_clear (self->adapter);
527     else
528       gst_planar_audio_adapter_clear (self->padapter);
529   }
530 
531   if (self->interleaved)
532     gst_adapter_push (self->adapter, buffer);
533   else
534     gst_planar_audio_adapter_push (self->padapter, buffer);
535 
536   return GST_FLOW_OK;
537 }
538 
539 static GstFlowReturn
gst_webrtc_dsp_generate_output(GstBaseTransform * btrans,GstBuffer ** outbuf)540 gst_webrtc_dsp_generate_output (GstBaseTransform * btrans, GstBuffer ** outbuf)
541 {
542   GstWebrtcDsp *self = GST_WEBRTC_DSP (btrans);
543   GstFlowReturn ret;
544   gboolean not_enough;
545 
546   if (self->interleaved)
547     not_enough = gst_adapter_available (self->adapter) < self->period_size;
548   else
549     not_enough = gst_planar_audio_adapter_available (self->padapter) <
550         self->period_samples;
551 
552   if (not_enough) {
553     *outbuf = NULL;
554     return GST_FLOW_OK;
555   }
556 
557   *outbuf = gst_webrtc_dsp_take_buffer (self);
558   ret = gst_webrtc_dsp_analyze_reverse_stream (self, GST_BUFFER_PTS (*outbuf));
559 
560   if (ret == GST_FLOW_OK)
561     ret = gst_webrtc_dsp_process_stream (self, *outbuf);
562 
563   return ret;
564 }
565 
566 static gboolean
gst_webrtc_dsp_start(GstBaseTransform * btrans)567 gst_webrtc_dsp_start (GstBaseTransform * btrans)
568 {
569   GstWebrtcDsp *self = GST_WEBRTC_DSP (btrans);
570   webrtc::Config config;
571 
572   GST_OBJECT_LOCK (self);
573 
574   config.Set < webrtc::ExtendedFilter >
575       (new webrtc::ExtendedFilter (self->extended_filter));
576   config.Set < webrtc::ExperimentalAgc >
577       (new webrtc::ExperimentalAgc (self->experimental_agc, self->startup_min_volume));
578   config.Set < webrtc::DelayAgnostic >
579       (new webrtc::DelayAgnostic (self->delay_agnostic));
580 
581   /* TODO Intelligibility enhancer, Beamforming, etc. */
582 
583   self->apm = webrtc::AudioProcessing::Create (config);
584 
585   if (self->echo_cancel) {
586     self->probe = gst_webrtc_acquire_echo_probe (self->probe_name);
587 
588     if (self->probe == NULL) {
589       GST_OBJECT_UNLOCK (self);
590       GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND,
591           ("No echo probe with name %s found.", self->probe_name), (NULL));
592       return FALSE;
593     }
594   }
595 
596   GST_OBJECT_UNLOCK (self);
597 
598   return TRUE;
599 }
600 
601 static gboolean
gst_webrtc_dsp_setup(GstAudioFilter * filter,const GstAudioInfo * info)602 gst_webrtc_dsp_setup (GstAudioFilter * filter, const GstAudioInfo * info)
603 {
604   GstWebrtcDsp *self = GST_WEBRTC_DSP (filter);
605   webrtc::AudioProcessing * apm;
606   webrtc::ProcessingConfig pconfig;
607   GstAudioInfo probe_info = *info;
608   gint err = 0;
609 
610   GST_LOG_OBJECT (self, "setting format to %s with %i Hz and %i channels",
611       info->finfo->description, info->rate, info->channels);
612 
613   GST_OBJECT_LOCK (self);
614 
615   gst_adapter_clear (self->adapter);
616   gst_planar_audio_adapter_clear (self->padapter);
617 
618   self->info = *info;
619   self->interleaved = (info->layout == GST_AUDIO_LAYOUT_INTERLEAVED);
620   apm = self->apm;
621 
622   if (!self->interleaved)
623     gst_planar_audio_adapter_configure (self->padapter, info);
624 
625   /* WebRTC library works with 10ms buffers, compute once this size */
626   self->period_samples = info->rate / 100;
627   self->period_size = self->period_samples * info->bpf;
628 
629   if (self->interleaved &&
630       (webrtc::AudioFrame::kMaxDataSizeSamples * 2) < self->period_size)
631     goto period_too_big;
632 
633   if (self->probe) {
634     GST_WEBRTC_ECHO_PROBE_LOCK (self->probe);
635 
636     if (self->probe->info.rate != 0) {
637       if (self->probe->info.rate != info->rate)
638         goto probe_has_wrong_rate;
639       probe_info = self->probe->info;
640     }
641 
642     GST_WEBRTC_ECHO_PROBE_UNLOCK (self->probe);
643   }
644 
645   /* input stream */
646   pconfig.streams[webrtc::ProcessingConfig::kInputStream] =
647       webrtc::StreamConfig (info->rate, info->channels, false);
648   /* output stream */
649   pconfig.streams[webrtc::ProcessingConfig::kOutputStream] =
650       webrtc::StreamConfig (info->rate, info->channels, false);
651   /* reverse input stream */
652   pconfig.streams[webrtc::ProcessingConfig::kReverseInputStream] =
653       webrtc::StreamConfig (probe_info.rate, probe_info.channels, false);
654   /* reverse output stream */
655   pconfig.streams[webrtc::ProcessingConfig::kReverseOutputStream] =
656       webrtc::StreamConfig (probe_info.rate, probe_info.channels, false);
657 
658   if ((err = apm->Initialize (pconfig)) < 0)
659     goto initialize_failed;
660 
661   /* Setup Filters */
662   if (self->high_pass_filter) {
663     GST_DEBUG_OBJECT (self, "Enabling High Pass filter");
664     apm->high_pass_filter ()->Enable (true);
665   }
666 
667   if (self->echo_cancel) {
668     GST_DEBUG_OBJECT (self, "Enabling Echo Cancellation");
669     apm->echo_cancellation ()->enable_drift_compensation (false);
670     apm->echo_cancellation ()
671         ->set_suppression_level (self->echo_suppression_level);
672     apm->echo_cancellation ()->Enable (true);
673   }
674 
675   if (self->noise_suppression) {
676     GST_DEBUG_OBJECT (self, "Enabling Noise Suppression");
677     apm->noise_suppression ()->set_level (self->noise_suppression_level);
678     apm->noise_suppression ()->Enable (true);
679   }
680 
681   if (self->gain_control) {
682     GEnumClass *mode_class = (GEnumClass *)
683         g_type_class_ref (GST_TYPE_WEBRTC_GAIN_CONTROL_MODE);
684 
685     GST_DEBUG_OBJECT (self, "Enabling Digital Gain Control, target level "
686         "dBFS %d, compression gain dB %d, limiter %senabled, mode: %s",
687         self->target_level_dbfs, self->compression_gain_db,
688         self->limiter ? "" : "NOT ",
689         g_enum_get_value (mode_class, self->gain_control_mode)->value_name);
690 
691     g_type_class_unref (mode_class);
692 
693     apm->gain_control ()->set_mode (self->gain_control_mode);
694     apm->gain_control ()->set_target_level_dbfs (self->target_level_dbfs);
695     apm->gain_control ()->set_compression_gain_db (self->compression_gain_db);
696     apm->gain_control ()->enable_limiter (self->limiter);
697     apm->gain_control ()->Enable (true);
698   }
699 
700   if (self->voice_detection) {
701     GEnumClass *likelihood_class = (GEnumClass *)
702         g_type_class_ref (GST_TYPE_WEBRTC_VOICE_DETECTION_LIKELIHOOD);
703     GST_DEBUG_OBJECT (self, "Enabling Voice Activity Detection, frame size "
704       "%d milliseconds, likelihood: %s", self->voice_detection_frame_size_ms,
705       g_enum_get_value (likelihood_class,
706           self->voice_detection_likelihood)->value_name);
707     g_type_class_unref (likelihood_class);
708 
709     self->stream_has_voice = FALSE;
710 
711     apm->voice_detection ()->Enable (true);
712     apm->voice_detection ()->set_likelihood (self->voice_detection_likelihood);
713     apm->voice_detection ()->set_frame_size_ms (
714         self->voice_detection_frame_size_ms);
715   }
716 
717   GST_OBJECT_UNLOCK (self);
718 
719   return TRUE;
720 
721 period_too_big:
722   GST_OBJECT_UNLOCK (self);
723   GST_WARNING_OBJECT (self, "webrtcdsp format produce too big period "
724       "(maximum is %" G_GSIZE_FORMAT " samples and we have %u samples), "
725       "reduce the number of channels or the rate.",
726       webrtc::AudioFrame::kMaxDataSizeSamples, self->period_size / 2);
727   return FALSE;
728 
729 probe_has_wrong_rate:
730   GST_WEBRTC_ECHO_PROBE_UNLOCK (self->probe);
731   GST_OBJECT_UNLOCK (self);
732   GST_ELEMENT_ERROR (self, STREAM, FORMAT,
733       ("Echo Probe has rate %i , while the DSP is running at rate %i,"
734           " use a caps filter to ensure those are the same.",
735           probe_info.rate, info->rate), (NULL));
736   return FALSE;
737 
738 initialize_failed:
739   GST_OBJECT_UNLOCK (self);
740   GST_ELEMENT_ERROR (self, LIBRARY, INIT,
741       ("Failed to initialize WebRTC Audio Processing library"),
742       ("webrtc::AudioProcessing::Initialize() failed: %s",
743           webrtc_error_to_string (err)));
744   return FALSE;
745 }
746 
747 static gboolean
gst_webrtc_dsp_stop(GstBaseTransform * btrans)748 gst_webrtc_dsp_stop (GstBaseTransform * btrans)
749 {
750   GstWebrtcDsp *self = GST_WEBRTC_DSP (btrans);
751 
752   GST_OBJECT_LOCK (self);
753 
754   gst_adapter_clear (self->adapter);
755   gst_planar_audio_adapter_clear (self->padapter);
756 
757   if (self->probe) {
758     gst_webrtc_release_echo_probe (self->probe);
759     self->probe = NULL;
760   }
761 
762   delete self->apm;
763   self->apm = NULL;
764 
765   GST_OBJECT_UNLOCK (self);
766 
767   return TRUE;
768 }
769 
770 static void
gst_webrtc_dsp_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)771 gst_webrtc_dsp_set_property (GObject * object,
772     guint prop_id, const GValue * value, GParamSpec * pspec)
773 {
774   GstWebrtcDsp *self = GST_WEBRTC_DSP (object);
775 
776   GST_OBJECT_LOCK (self);
777   switch (prop_id) {
778     case PROP_PROBE:
779       g_free (self->probe_name);
780       self->probe_name = g_value_dup_string (value);
781       break;
782     case PROP_HIGH_PASS_FILTER:
783       self->high_pass_filter = g_value_get_boolean (value);
784       break;
785     case PROP_ECHO_CANCEL:
786       self->echo_cancel = g_value_get_boolean (value);
787       break;
788     case PROP_ECHO_SUPPRESSION_LEVEL:
789       self->echo_suppression_level =
790           (GstWebrtcEchoSuppressionLevel) g_value_get_enum (value);
791       break;
792     case PROP_NOISE_SUPPRESSION:
793       self->noise_suppression = g_value_get_boolean (value);
794       break;
795     case PROP_NOISE_SUPPRESSION_LEVEL:
796       self->noise_suppression_level =
797           (GstWebrtcNoiseSuppressionLevel) g_value_get_enum (value);
798       break;
799     case PROP_GAIN_CONTROL:
800       self->gain_control = g_value_get_boolean (value);
801       break;
802     case PROP_EXPERIMENTAL_AGC:
803       self->experimental_agc = g_value_get_boolean (value);
804       break;
805     case PROP_EXTENDED_FILTER:
806       self->extended_filter = g_value_get_boolean (value);
807       break;
808     case PROP_DELAY_AGNOSTIC:
809       self->delay_agnostic = g_value_get_boolean (value);
810       break;
811     case PROP_TARGET_LEVEL_DBFS:
812       self->target_level_dbfs = g_value_get_int (value);
813       break;
814     case PROP_COMPRESSION_GAIN_DB:
815       self->compression_gain_db = g_value_get_int (value);
816       break;
817     case PROP_STARTUP_MIN_VOLUME:
818       self->startup_min_volume = g_value_get_int (value);
819       break;
820     case PROP_LIMITER:
821       self->limiter = g_value_get_boolean (value);
822       break;
823     case PROP_GAIN_CONTROL_MODE:
824       self->gain_control_mode =
825           (GstWebrtcGainControlMode) g_value_get_enum (value);
826       break;
827     case PROP_VOICE_DETECTION:
828       self->voice_detection = g_value_get_boolean (value);
829       break;
830     case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
831       self->voice_detection_frame_size_ms = g_value_get_int (value);
832       break;
833     case PROP_VOICE_DETECTION_LIKELIHOOD:
834       self->voice_detection_likelihood =
835           (GstWebrtcVoiceDetectionLikelihood) g_value_get_enum (value);
836       break;
837     default:
838       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
839       break;
840   }
841   GST_OBJECT_UNLOCK (self);
842 }
843 
844 static void
gst_webrtc_dsp_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)845 gst_webrtc_dsp_get_property (GObject * object,
846     guint prop_id, GValue * value, GParamSpec * pspec)
847 {
848   GstWebrtcDsp *self = GST_WEBRTC_DSP (object);
849 
850   GST_OBJECT_LOCK (self);
851   switch (prop_id) {
852     case PROP_PROBE:
853       g_value_set_string (value, self->probe_name);
854       break;
855     case PROP_HIGH_PASS_FILTER:
856       g_value_set_boolean (value, self->high_pass_filter);
857       break;
858     case PROP_ECHO_CANCEL:
859       g_value_set_boolean (value, self->echo_cancel);
860       break;
861     case PROP_ECHO_SUPPRESSION_LEVEL:
862       g_value_set_enum (value, self->echo_suppression_level);
863       break;
864     case PROP_NOISE_SUPPRESSION:
865       g_value_set_boolean (value, self->noise_suppression);
866       break;
867     case PROP_NOISE_SUPPRESSION_LEVEL:
868       g_value_set_enum (value, self->noise_suppression_level);
869       break;
870     case PROP_GAIN_CONTROL:
871       g_value_set_boolean (value, self->gain_control);
872       break;
873     case PROP_EXPERIMENTAL_AGC:
874       g_value_set_boolean (value, self->experimental_agc);
875       break;
876     case PROP_EXTENDED_FILTER:
877       g_value_set_boolean (value, self->extended_filter);
878       break;
879     case PROP_DELAY_AGNOSTIC:
880       g_value_set_boolean (value, self->delay_agnostic);
881       break;
882     case PROP_TARGET_LEVEL_DBFS:
883       g_value_set_int (value, self->target_level_dbfs);
884       break;
885     case PROP_COMPRESSION_GAIN_DB:
886       g_value_set_int (value, self->compression_gain_db);
887       break;
888     case PROP_STARTUP_MIN_VOLUME:
889       g_value_set_int (value, self->startup_min_volume);
890       break;
891     case PROP_LIMITER:
892       g_value_set_boolean (value, self->limiter);
893       break;
894     case PROP_GAIN_CONTROL_MODE:
895       g_value_set_enum (value, self->gain_control_mode);
896       break;
897     case PROP_VOICE_DETECTION:
898       g_value_set_boolean (value, self->voice_detection);
899       break;
900     case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
901       g_value_set_int (value, self->voice_detection_frame_size_ms);
902       break;
903     case PROP_VOICE_DETECTION_LIKELIHOOD:
904       g_value_set_enum (value, self->voice_detection_likelihood);
905       break;
906     default:
907       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
908       break;
909   }
910   GST_OBJECT_UNLOCK (self);
911 }
912 
913 
914 static void
gst_webrtc_dsp_finalize(GObject * object)915 gst_webrtc_dsp_finalize (GObject * object)
916 {
917   GstWebrtcDsp *self = GST_WEBRTC_DSP (object);
918 
919   gst_object_unref (self->adapter);
920   gst_object_unref (self->padapter);
921   g_free (self->probe_name);
922 
923   G_OBJECT_CLASS (gst_webrtc_dsp_parent_class)->finalize (object);
924 }
925 
926 static void
gst_webrtc_dsp_init(GstWebrtcDsp * self)927 gst_webrtc_dsp_init (GstWebrtcDsp * self)
928 {
929   self->adapter = gst_adapter_new ();
930   self->padapter = gst_planar_audio_adapter_new ();
931   gst_audio_info_init (&self->info);
932 }
933 
934 static void
gst_webrtc_dsp_class_init(GstWebrtcDspClass * klass)935 gst_webrtc_dsp_class_init (GstWebrtcDspClass * klass)
936 {
937   GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
938   GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
939   GstBaseTransformClass *btrans_class = GST_BASE_TRANSFORM_CLASS (klass);
940   GstAudioFilterClass *audiofilter_class = GST_AUDIO_FILTER_CLASS (klass);
941 
942   gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_finalize);
943   gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_set_property);
944   gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_get_property);
945 
946   btrans_class->passthrough_on_same_caps = FALSE;
947   btrans_class->start = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_start);
948   btrans_class->stop = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_stop);
949   btrans_class->submit_input_buffer =
950       GST_DEBUG_FUNCPTR (gst_webrtc_dsp_submit_input_buffer);
951   btrans_class->generate_output =
952       GST_DEBUG_FUNCPTR (gst_webrtc_dsp_generate_output);
953 
954   audiofilter_class->setup = GST_DEBUG_FUNCPTR (gst_webrtc_dsp_setup);
955 
956   gst_element_class_add_static_pad_template (element_class,
957       &gst_webrtc_dsp_src_template);
958   gst_element_class_add_static_pad_template (element_class,
959       &gst_webrtc_dsp_sink_template);
960   gst_element_class_set_static_metadata (element_class,
961       "Voice Processor (AGC, AEC, filters, etc.)",
962       "Generic/Audio",
963       "Pre-processes voice with WebRTC Audio Processing Library",
964       "Nicolas Dufresne <nicolas.dufresne@collabora.com>");
965 
966   g_object_class_install_property (gobject_class,
967       PROP_PROBE,
968       g_param_spec_string ("probe", "Echo Probe",
969           "The name of the webrtcechoprobe element that record the audio being "
970           "played through loud speakers. Must be set before PAUSED state.",
971           "webrtcechoprobe0",
972           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
973               G_PARAM_CONSTRUCT)));
974 
975   g_object_class_install_property (gobject_class,
976       PROP_HIGH_PASS_FILTER,
977       g_param_spec_boolean ("high-pass-filter", "High Pass Filter",
978           "Enable or disable high pass filtering", TRUE,
979           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
980               G_PARAM_CONSTRUCT)));
981 
982   g_object_class_install_property (gobject_class,
983       PROP_ECHO_CANCEL,
984       g_param_spec_boolean ("echo-cancel", "Echo Cancel",
985           "Enable or disable echo canceller", TRUE,
986           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
987               G_PARAM_CONSTRUCT)));
988 
989   g_object_class_install_property (gobject_class,
990       PROP_ECHO_SUPPRESSION_LEVEL,
991       g_param_spec_enum ("echo-suppression-level", "Echo Suppression Level",
992           "Controls the aggressiveness of the suppressor. A higher level "
993           "trades off double-talk performance for increased echo suppression.",
994           GST_TYPE_WEBRTC_ECHO_SUPPRESSION_LEVEL,
995           webrtc::EchoCancellation::kModerateSuppression,
996           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
997               G_PARAM_CONSTRUCT)));
998 
999   g_object_class_install_property (gobject_class,
1000       PROP_NOISE_SUPPRESSION,
1001       g_param_spec_boolean ("noise-suppression", "Noise Suppression",
1002           "Enable or disable noise suppression", TRUE,
1003           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1004               G_PARAM_CONSTRUCT)));
1005 
1006   g_object_class_install_property (gobject_class,
1007       PROP_NOISE_SUPPRESSION_LEVEL,
1008       g_param_spec_enum ("noise-suppression-level", "Noise Suppression Level",
1009           "Controls the aggressiveness of the suppression. Increasing the "
1010           "level will reduce the noise level at the expense of a higher "
1011           "speech distortion.", GST_TYPE_WEBRTC_NOISE_SUPPRESSION_LEVEL,
1012           webrtc::EchoCancellation::kModerateSuppression,
1013           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1014               G_PARAM_CONSTRUCT)));
1015 
1016   g_object_class_install_property (gobject_class,
1017       PROP_GAIN_CONTROL,
1018       g_param_spec_boolean ("gain-control", "Gain Control",
1019           "Enable or disable automatic digital gain control",
1020           TRUE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1021               G_PARAM_CONSTRUCT)));
1022 
1023   g_object_class_install_property (gobject_class,
1024       PROP_EXPERIMENTAL_AGC,
1025       g_param_spec_boolean ("experimental-agc", "Experimental AGC",
1026           "Enable or disable experimental automatic gain control.",
1027           FALSE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1028               G_PARAM_CONSTRUCT)));
1029 
1030   g_object_class_install_property (gobject_class,
1031       PROP_EXTENDED_FILTER,
1032       g_param_spec_boolean ("extended-filter", "Extended Filter",
1033           "Enable or disable the extended filter.",
1034           TRUE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1035               G_PARAM_CONSTRUCT)));
1036 
1037   g_object_class_install_property (gobject_class,
1038       PROP_DELAY_AGNOSTIC,
1039       g_param_spec_boolean ("delay-agnostic", "Delay Agnostic",
1040           "Enable or disable the delay agnostic mode.",
1041           FALSE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1042               G_PARAM_CONSTRUCT)));
1043 
1044   g_object_class_install_property (gobject_class,
1045       PROP_TARGET_LEVEL_DBFS,
1046       g_param_spec_int ("target-level-dbfs", "Target Level dBFS",
1047           "Sets the target peak |level| (or envelope) of the gain control in "
1048           "dBFS (decibels from digital full-scale).",
1049           0, 31, DEFAULT_TARGET_LEVEL_DBFS, (GParamFlags) (G_PARAM_READWRITE |
1050               G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
1051 
1052   g_object_class_install_property (gobject_class,
1053       PROP_COMPRESSION_GAIN_DB,
1054       g_param_spec_int ("compression-gain-db", "Compression Gain dB",
1055           "Sets the maximum |gain| the digital compression stage may apply, "
1056 					"in dB.",
1057           0, 90, DEFAULT_COMPRESSION_GAIN_DB, (GParamFlags) (G_PARAM_READWRITE |
1058               G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
1059 
1060   g_object_class_install_property (gobject_class,
1061       PROP_STARTUP_MIN_VOLUME,
1062       g_param_spec_int ("startup-min-volume", "Startup Minimum Volume",
1063           "At startup the experimental AGC moves the microphone volume up to "
1064           "|startup_min_volume| if the current microphone volume is set too "
1065           "low. No effect if experimental-agc isn't enabled.",
1066           12, 255, DEFAULT_STARTUP_MIN_VOLUME, (GParamFlags) (G_PARAM_READWRITE |
1067               G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
1068 
1069   g_object_class_install_property (gobject_class,
1070       PROP_LIMITER,
1071       g_param_spec_boolean ("limiter", "Limiter",
1072           "When enabled, the compression stage will hard limit the signal to "
1073           "the target level. Otherwise, the signal will be compressed but not "
1074           "limited above the target level.",
1075           DEFAULT_LIMITER, (GParamFlags) (G_PARAM_READWRITE |
1076               G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
1077 
1078   g_object_class_install_property (gobject_class,
1079       PROP_GAIN_CONTROL_MODE,
1080       g_param_spec_enum ("gain-control-mode", "Gain Control Mode",
1081           "Controls the mode of the compression stage",
1082           GST_TYPE_WEBRTC_GAIN_CONTROL_MODE,
1083           DEFAULT_GAIN_CONTROL_MODE,
1084           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1085               G_PARAM_CONSTRUCT)));
1086 
1087   g_object_class_install_property (gobject_class,
1088       PROP_VOICE_DETECTION,
1089       g_param_spec_boolean ("voice-detection", "Voice Detection",
1090           "Enable or disable the voice activity detector",
1091           DEFAULT_VOICE_DETECTION, (GParamFlags) (G_PARAM_READWRITE |
1092               G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
1093 
1094   g_object_class_install_property (gobject_class,
1095       PROP_VOICE_DETECTION_FRAME_SIZE_MS,
1096       g_param_spec_int ("voice-detection-frame-size-ms",
1097           "Voice Detection Frame Size Milliseconds",
1098           "Sets the |size| of the frames in ms on which the VAD will operate. "
1099           "Larger frames will improve detection accuracy, but reduce the "
1100           "frequency of updates",
1101           10, 30, DEFAULT_VOICE_DETECTION_FRAME_SIZE_MS,
1102           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1103               G_PARAM_CONSTRUCT)));
1104 
1105   g_object_class_install_property (gobject_class,
1106       PROP_VOICE_DETECTION_LIKELIHOOD,
1107       g_param_spec_enum ("voice-detection-likelihood",
1108           "Voice Detection Likelihood",
1109           "Specifies the likelihood that a frame will be declared to contain "
1110           "voice.",
1111           GST_TYPE_WEBRTC_VOICE_DETECTION_LIKELIHOOD,
1112           DEFAULT_VOICE_DETECTION_LIKELIHOOD,
1113           (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1114               G_PARAM_CONSTRUCT)));
1115 
1116 }
1117 
1118 static gboolean
plugin_init(GstPlugin * plugin)1119 plugin_init (GstPlugin * plugin)
1120 {
1121   GST_DEBUG_CATEGORY_INIT
1122       (webrtc_dsp_debug, "webrtcdsp", 0, "libwebrtcdsp wrapping elements");
1123 
1124   if (!gst_element_register (plugin, "webrtcdsp", GST_RANK_NONE,
1125           GST_TYPE_WEBRTC_DSP)) {
1126     return FALSE;
1127   }
1128   if (!gst_element_register (plugin, "webrtcechoprobe", GST_RANK_NONE,
1129           GST_TYPE_WEBRTC_ECHO_PROBE)) {
1130     return FALSE;
1131   }
1132 
1133   return TRUE;
1134 }
1135 
1136 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
1137     GST_VERSION_MINOR,
1138     webrtcdsp,
1139     "Voice pre-processing using WebRTC Audio Processing Library",
1140     plugin_init, VERSION, "LGPL", GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
1141