1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 
13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/audio_device_utility.h"
15 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
16 
17 #include "webrtc/system_wrappers/interface/event_wrapper.h"
18 #include "webrtc/system_wrappers/interface/trace.h"
19 
20 webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
21 
22 // Accesses Pulse functions through our late-binding symbol table instead of
23 // directly. This way we don't have to link to libpulse, which means our binary
24 // will work on systems that don't have it.
25 #define LATE(sym) \
26   LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
27 
28 namespace webrtc
29 {
30 
31 // ============================================================================
32 //                              Static Methods
33 // ============================================================================
34 
AudioDeviceLinuxPulse(const int32_t id)35 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
36     _ptrAudioBuffer(NULL),
37     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
38     _timeEventRec(*EventWrapper::Create()),
39     _timeEventPlay(*EventWrapper::Create()),
40     _recStartEvent(*EventWrapper::Create()),
41     _playStartEvent(*EventWrapper::Create()),
42     _id(id),
43     _mixerManager(id),
44     _inputDeviceIndex(0),
45     _outputDeviceIndex(0),
46     _inputDeviceIsSpecified(false),
47     _outputDeviceIsSpecified(false),
48     sample_rate_hz_(0),
49     _recChannels(1),
50     _playChannels(1),
51     _playBufType(AudioDeviceModule::kFixedBufferSize),
52     _initialized(false),
53     _recording(false),
54     _playing(false),
55     _recIsInitialized(false),
56     _playIsInitialized(false),
57     _startRec(false),
58     _stopRec(false),
59     _startPlay(false),
60     _stopPlay(false),
61     _AGC(false),
62     update_speaker_volume_at_startup_(false),
63     _playBufDelayFixed(20),
64     _sndCardPlayDelay(0),
65     _sndCardRecDelay(0),
66     _writeErrors(0),
67     _playWarning(0),
68     _playError(0),
69     _recWarning(0),
70     _recError(0),
71     _deviceIndex(-1),
72     _numPlayDevices(0),
73     _numRecDevices(0),
74     _playDeviceName(NULL),
75     _recDeviceName(NULL),
76     _playDisplayDeviceName(NULL),
77     _recDisplayDeviceName(NULL),
78     _playBuffer(NULL),
79     _playbackBufferSize(0),
80     _playbackBufferUnused(0),
81     _tempBufferSpace(0),
82     _recBuffer(NULL),
83     _recordBufferSize(0),
84     _recordBufferUsed(0),
85     _tempSampleData(NULL),
86     _tempSampleDataSize(0),
87     _configuredLatencyPlay(0),
88     _configuredLatencyRec(0),
89     _paDeviceIndex(-1),
90     _paStateChanged(false),
91     _paMainloop(NULL),
92     _paMainloopApi(NULL),
93     _paContext(NULL),
94     _recStream(NULL),
95     _playStream(NULL),
96     _recStreamFlags(0),
97     _playStreamFlags(0)
98 {
99     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
100                  "%s created", __FUNCTION__);
101 
102     memset(_paServerVersion, 0, sizeof(_paServerVersion));
103     memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
104     memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
105     memset(_oldKeyState, 0, sizeof(_oldKeyState));
106 }
107 
~AudioDeviceLinuxPulse()108 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
109 {
110     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
111                  "%s destroyed", __FUNCTION__);
112 
113     Terminate();
114 
115     if (_recBuffer)
116     {
117         delete [] _recBuffer;
118         _recBuffer = NULL;
119     }
120     if (_playBuffer)
121     {
122         delete [] _playBuffer;
123         _playBuffer = NULL;
124     }
125     if (_playDeviceName)
126     {
127         delete [] _playDeviceName;
128         _playDeviceName = NULL;
129     }
130     if (_recDeviceName)
131     {
132         delete [] _recDeviceName;
133         _recDeviceName = NULL;
134     }
135 
136     delete &_recStartEvent;
137     delete &_playStartEvent;
138     delete &_timeEventRec;
139     delete &_timeEventPlay;
140     delete &_critSect;
141 }
142 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)143 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
144 {
145 
146     CriticalSectionScoped lock(&_critSect);
147 
148     _ptrAudioBuffer = audioBuffer;
149 
150     // Inform the AudioBuffer about default settings for this implementation.
151     // Set all values to zero here since the actual settings will be done by
152     // InitPlayout and InitRecording later.
153     _ptrAudioBuffer->SetRecordingSampleRate(0);
154     _ptrAudioBuffer->SetPlayoutSampleRate(0);
155     _ptrAudioBuffer->SetRecordingChannels(0);
156     _ptrAudioBuffer->SetPlayoutChannels(0);
157 }
158 
159 // ----------------------------------------------------------------------------
160 //  ActiveAudioLayer
161 // ----------------------------------------------------------------------------
162 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const163 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
164     AudioDeviceModule::AudioLayer& audioLayer) const
165 {
166     audioLayer = AudioDeviceModule::kLinuxPulseAudio;
167     return 0;
168 }
169 
Init()170 int32_t AudioDeviceLinuxPulse::Init()
171 {
172 
173     CriticalSectionScoped lock(&_critSect);
174 
175     if (_initialized)
176     {
177         return 0;
178     }
179 
180     // Initialize PulseAudio
181     if (InitPulseAudio() < 0)
182     {
183         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
184                      "  failed to initialize PulseAudio");
185 
186         if (TerminatePulseAudio() < 0)
187         {
188             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
189                          "  failed to terminate PulseAudio");
190         }
191 
192         return -1;
193     }
194 
195     _playWarning = 0;
196     _playError = 0;
197     _recWarning = 0;
198     _recError = 0;
199 
200 #ifdef USE_X11
201     //Get X display handle for typing detection
202     _XDisplay = XOpenDisplay(NULL);
203     if (!_XDisplay)
204     {
205         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
206           "  failed to open X display, typing detection will not work");
207     }
208 #endif
209 
210     // RECORDING
211     const char* threadName = "webrtc_audio_module_rec_thread";
212     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
213                                                 threadName);
214     if (!_ptrThreadRec->Start())
215     {
216         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
217                      "  failed to start the rec audio thread");
218 
219         _ptrThreadRec.reset();
220         return -1;
221     }
222 
223     _ptrThreadRec->SetPriority(kRealtimePriority);
224 
225     // PLAYOUT
226     threadName = "webrtc_audio_module_play_thread";
227     _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
228                                                  threadName);
229     if (!_ptrThreadPlay->Start())
230     {
231         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
232                      "  failed to start the play audio thread");
233 
234         _ptrThreadPlay.reset();
235         return -1;
236     }
237     _ptrThreadPlay->SetPriority(kRealtimePriority);
238 
239     _initialized = true;
240 
241     return 0;
242 }
243 
Terminate()244 int32_t AudioDeviceLinuxPulse::Terminate()
245 {
246 
247     if (!_initialized)
248     {
249         return 0;
250     }
251 
252     Lock();
253 
254     _mixerManager.Close();
255 
256     // RECORDING
257     if (_ptrThreadRec)
258     {
259         ThreadWrapper* tmpThread = _ptrThreadRec.release();
260         UnLock();
261 
262         _timeEventRec.Set();
263         tmpThread->Stop();
264         delete tmpThread;
265         // Lock again since we need to protect _ptrThreadPlay.
266         Lock();
267     }
268 
269     // PLAYOUT
270     if (_ptrThreadPlay)
271     {
272         ThreadWrapper* tmpThread = _ptrThreadPlay.release();
273         _critSect.Leave();
274 
275         _timeEventPlay.Set();
276         tmpThread->Stop();
277         delete tmpThread;
278     } else {
279       UnLock();
280     }
281 
282     // Terminate PulseAudio
283     if (TerminatePulseAudio() < 0)
284     {
285         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
286                      "  failed to terminate PulseAudio");
287         return -1;
288     }
289 
290 #ifdef USE_X11
291     if (_XDisplay)
292     {
293       XCloseDisplay(_XDisplay);
294       _XDisplay = NULL;
295     }
296 #endif
297 
298     _initialized = false;
299     _outputDeviceIsSpecified = false;
300     _inputDeviceIsSpecified = false;
301 
302     return 0;
303 }
304 
Initialized() const305 bool AudioDeviceLinuxPulse::Initialized() const
306 {
307     return (_initialized);
308 }
309 
InitSpeaker()310 int32_t AudioDeviceLinuxPulse::InitSpeaker()
311 {
312 
313     CriticalSectionScoped lock(&_critSect);
314 
315     if (_playing)
316     {
317         return -1;
318     }
319 
320     if (!_outputDeviceIsSpecified)
321     {
322         return -1;
323     }
324 
325     // check if default device
326     if (_outputDeviceIndex == 0)
327     {
328         uint16_t deviceIndex = 0;
329         GetDefaultDeviceInfo(false, NULL, deviceIndex);
330         _paDeviceIndex = deviceIndex;
331     } else
332     {
333         // get the PA device index from
334         // the callback
335         _deviceIndex = _outputDeviceIndex;
336 
337         // get playout devices
338         PlayoutDevices();
339     }
340 
341     // the callback has now set the _paDeviceIndex to
342     // the PulseAudio index of the device
343     if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
344     {
345         return -1;
346     }
347 
348     // clear _deviceIndex
349     _deviceIndex = -1;
350     _paDeviceIndex = -1;
351 
352     return 0;
353 }
354 
InitMicrophone()355 int32_t AudioDeviceLinuxPulse::InitMicrophone()
356 {
357 
358     CriticalSectionScoped lock(&_critSect);
359 
360     if (_recording)
361     {
362         return -1;
363     }
364 
365     if (!_inputDeviceIsSpecified)
366     {
367         return -1;
368     }
369 
370     // Check if default device
371     if (_inputDeviceIndex == 0)
372     {
373         uint16_t deviceIndex = 0;
374         GetDefaultDeviceInfo(true, NULL, deviceIndex);
375         _paDeviceIndex = deviceIndex;
376     } else
377     {
378         // Get the PA device index from
379         // the callback
380         _deviceIndex = _inputDeviceIndex;
381 
382         // get recording devices
383         RecordingDevices();
384     }
385 
386     // The callback has now set the _paDeviceIndex to
387     // the PulseAudio index of the device
388     if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
389     {
390         return -1;
391     }
392 
393     // Clear _deviceIndex
394     _deviceIndex = -1;
395     _paDeviceIndex = -1;
396 
397     return 0;
398 }
399 
SpeakerIsInitialized() const400 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
401 {
402     return (_mixerManager.SpeakerIsInitialized());
403 }
404 
MicrophoneIsInitialized() const405 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
406 {
407     return (_mixerManager.MicrophoneIsInitialized());
408 }
409 
SpeakerVolumeIsAvailable(bool & available)410 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
411 {
412 
413     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
414 
415     // Make an attempt to open up the
416     // output mixer corresponding to the currently selected output device.
417     if (!wasInitialized && InitSpeaker() == -1)
418     {
419         // If we end up here it means that the selected speaker has no volume
420         // control.
421         available = false;
422         return 0;
423     }
424 
425     // Given that InitSpeaker was successful, we know that a volume control exists
426     available = true;
427 
428     // Close the initialized output mixer
429     if (!wasInitialized)
430     {
431         _mixerManager.CloseSpeaker();
432     }
433 
434     return 0;
435 }
436 
SetSpeakerVolume(uint32_t volume)437 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
438 {
439     if (!_playing) {
440       // Only update the volume if it's been set while we weren't playing.
441       update_speaker_volume_at_startup_ = true;
442     }
443     return (_mixerManager.SetSpeakerVolume(volume));
444 }
445 
SpeakerVolume(uint32_t & volume) const446 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
447 {
448 
449     uint32_t level(0);
450 
451     if (_mixerManager.SpeakerVolume(level) == -1)
452     {
453         return -1;
454     }
455 
456     volume = level;
457 
458     return 0;
459 }
460 
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)461 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
462     uint16_t volumeLeft,
463     uint16_t volumeRight)
464 {
465 
466     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
467                  "  API call not supported on this platform");
468     return -1;
469 }
470 
WaveOutVolume(uint16_t &,uint16_t &) const471 int32_t AudioDeviceLinuxPulse::WaveOutVolume(
472     uint16_t& /*volumeLeft*/,
473     uint16_t& /*volumeRight*/) const
474 {
475 
476     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
477                  "  API call not supported on this platform");
478     return -1;
479 }
480 
MaxSpeakerVolume(uint32_t & maxVolume) const481 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
482     uint32_t& maxVolume) const
483 {
484 
485     uint32_t maxVol(0);
486 
487     if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
488     {
489         return -1;
490     }
491 
492     maxVolume = maxVol;
493 
494     return 0;
495 }
496 
MinSpeakerVolume(uint32_t & minVolume) const497 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
498     uint32_t& minVolume) const
499 {
500 
501     uint32_t minVol(0);
502 
503     if (_mixerManager.MinSpeakerVolume(minVol) == -1)
504     {
505         return -1;
506     }
507 
508     minVolume = minVol;
509 
510     return 0;
511 }
512 
SpeakerVolumeStepSize(uint16_t & stepSize) const513 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
514     uint16_t& stepSize) const
515 {
516 
517     uint16_t delta(0);
518 
519     if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
520     {
521         return -1;
522     }
523 
524     stepSize = delta;
525 
526     return 0;
527 }
528 
SpeakerMuteIsAvailable(bool & available)529 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
530 {
531 
532     bool isAvailable(false);
533     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
534 
535     // Make an attempt to open up the
536     // output mixer corresponding to the currently selected output device.
537     //
538     if (!wasInitialized && InitSpeaker() == -1)
539     {
540         // If we end up here it means that the selected speaker has no volume
541         // control, hence it is safe to state that there is no mute control
542         // already at this stage.
543         available = false;
544         return 0;
545     }
546 
547     // Check if the selected speaker has a mute control
548     _mixerManager.SpeakerMuteIsAvailable(isAvailable);
549 
550     available = isAvailable;
551 
552     // Close the initialized output mixer
553     if (!wasInitialized)
554     {
555         _mixerManager.CloseSpeaker();
556     }
557 
558     return 0;
559 }
560 
SetSpeakerMute(bool enable)561 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
562 {
563 
564     return (_mixerManager.SetSpeakerMute(enable));
565 }
566 
SpeakerMute(bool & enabled) const567 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
568 {
569 
570     bool muted(0);
571     if (_mixerManager.SpeakerMute(muted) == -1)
572     {
573         return -1;
574     }
575 
576     enabled = muted;
577     return 0;
578 }
579 
MicrophoneMuteIsAvailable(bool & available)580 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
581 {
582 
583     bool isAvailable(false);
584     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
585 
586     // Make an attempt to open up the
587     // input mixer corresponding to the currently selected input device.
588     //
589     if (!wasInitialized && InitMicrophone() == -1)
590     {
591         // If we end up here it means that the selected microphone has no volume
592         // control, hence it is safe to state that there is no boost control
593         // already at this stage.
594         available = false;
595         return 0;
596     }
597 
598     // Check if the selected microphone has a mute control
599     //
600     _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
601     available = isAvailable;
602 
603     // Close the initialized input mixer
604     //
605     if (!wasInitialized)
606     {
607         _mixerManager.CloseMicrophone();
608     }
609 
610     return 0;
611 }
612 
SetMicrophoneMute(bool enable)613 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
614 {
615 
616     return (_mixerManager.SetMicrophoneMute(enable));
617 }
618 
MicrophoneMute(bool & enabled) const619 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
620 {
621 
622     bool muted(0);
623     if (_mixerManager.MicrophoneMute(muted) == -1)
624     {
625         return -1;
626     }
627 
628     enabled = muted;
629     return 0;
630 }
631 
MicrophoneBoostIsAvailable(bool & available)632 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
633 {
634 
635     bool isAvailable(false);
636     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
637 
638     // Enumerate all avaliable microphone and make an attempt to open up the
639     // input mixer corresponding to the currently selected input device.
640     //
641     if (!wasInitialized && InitMicrophone() == -1)
642     {
643         // If we end up here it means that the selected microphone has no volume
644         // control, hence it is safe to state that there is no boost control
645         // already at this stage.
646         available = false;
647         return 0;
648     }
649 
650     // Check if the selected microphone has a boost control
651     _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
652     available = isAvailable;
653 
654     // Close the initialized input mixer
655     if (!wasInitialized)
656     {
657         _mixerManager.CloseMicrophone();
658     }
659 
660     return 0;
661 }
662 
SetMicrophoneBoost(bool enable)663 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
664 {
665 
666     return (_mixerManager.SetMicrophoneBoost(enable));
667 }
668 
MicrophoneBoost(bool & enabled) const669 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
670 {
671 
672     bool onOff(0);
673 
674     if (_mixerManager.MicrophoneBoost(onOff) == -1)
675     {
676         return -1;
677     }
678 
679     enabled = onOff;
680 
681     return 0;
682 }
683 
StereoRecordingIsAvailable(bool & available)684 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
685 {
686 
687     if (_recChannels == 2 && _recording) {
688       available = true;
689       return 0;
690     }
691 
692     available = false;
693     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
694     int error = 0;
695 
696     if (!wasInitialized && InitMicrophone() == -1)
697     {
698         // Cannot open the specified device
699         available = false;
700         return 0;
701     }
702 
703     // Check if the selected microphone can record stereo.
704     bool isAvailable(false);
705     error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
706     if (!error)
707       available = isAvailable;
708 
709     // Close the initialized input mixer
710     if (!wasInitialized)
711     {
712         _mixerManager.CloseMicrophone();
713     }
714 
715     return error;
716 }
717 
SetStereoRecording(bool enable)718 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
719 {
720 
721     if (enable)
722         _recChannels = 2;
723     else
724         _recChannels = 1;
725 
726     return 0;
727 }
728 
StereoRecording(bool & enabled) const729 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
730 {
731 
732     if (_recChannels == 2)
733         enabled = true;
734     else
735         enabled = false;
736 
737     return 0;
738 }
739 
StereoPlayoutIsAvailable(bool & available)740 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
741 {
742 
743     if (_playChannels == 2 && _playing) {
744       available = true;
745       return 0;
746     }
747 
748     available = false;
749     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
750     int error = 0;
751 
752     if (!wasInitialized && InitSpeaker() == -1)
753     {
754         // Cannot open the specified device.
755         return -1;
756     }
757 
758     // Check if the selected speaker can play stereo.
759     bool isAvailable(false);
760     error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
761     if (!error)
762       available = isAvailable;
763 
764     // Close the initialized input mixer
765     if (!wasInitialized)
766     {
767         _mixerManager.CloseSpeaker();
768     }
769 
770     return error;
771 }
772 
SetStereoPlayout(bool enable)773 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
774 {
775 
776     if (enable)
777         _playChannels = 2;
778     else
779         _playChannels = 1;
780 
781     return 0;
782 }
783 
StereoPlayout(bool & enabled) const784 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
785 {
786 
787     if (_playChannels == 2)
788         enabled = true;
789     else
790         enabled = false;
791 
792     return 0;
793 }
794 
SetAGC(bool enable)795 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
796 {
797 
798     _AGC = enable;
799 
800     return 0;
801 }
802 
AGC() const803 bool AudioDeviceLinuxPulse::AGC() const
804 {
805 
806     return _AGC;
807 }
808 
MicrophoneVolumeIsAvailable(bool & available)809 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
810     bool& available)
811 {
812 
813     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
814 
815     // Make an attempt to open up the
816     // input mixer corresponding to the currently selected output device.
817     if (!wasInitialized && InitMicrophone() == -1)
818     {
819         // If we end up here it means that the selected microphone has no volume
820         // control.
821         available = false;
822         return 0;
823     }
824 
825     // Given that InitMicrophone was successful, we know that a volume control
826     // exists
827     available = true;
828 
829     // Close the initialized input mixer
830     if (!wasInitialized)
831     {
832         _mixerManager.CloseMicrophone();
833     }
834 
835     return 0;
836 }
837 
SetMicrophoneVolume(uint32_t volume)838 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
839 {
840 
841     return (_mixerManager.SetMicrophoneVolume(volume));
842 }
843 
MicrophoneVolume(uint32_t & volume) const844 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
845     uint32_t& volume) const
846 {
847 
848     uint32_t level(0);
849 
850     if (_mixerManager.MicrophoneVolume(level) == -1)
851     {
852         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
853                      "  failed to retrive current microphone level");
854         return -1;
855     }
856 
857     volume = level;
858 
859     return 0;
860 }
861 
MaxMicrophoneVolume(uint32_t & maxVolume) const862 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
863     uint32_t& maxVolume) const
864 {
865 
866     uint32_t maxVol(0);
867 
868     if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
869     {
870         return -1;
871     }
872 
873     maxVolume = maxVol;
874 
875     return 0;
876 }
877 
MinMicrophoneVolume(uint32_t & minVolume) const878 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
879     uint32_t& minVolume) const
880 {
881 
882     uint32_t minVol(0);
883 
884     if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
885     {
886         return -1;
887     }
888 
889     minVolume = minVol;
890 
891     return 0;
892 }
893 
MicrophoneVolumeStepSize(uint16_t & stepSize) const894 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
895     uint16_t& stepSize) const
896 {
897 
898     uint16_t delta(0);
899 
900     if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
901     {
902         return -1;
903     }
904 
905     stepSize = delta;
906 
907     return 0;
908 }
909 
PlayoutDevices()910 int16_t AudioDeviceLinuxPulse::PlayoutDevices()
911 {
912 
913     PaLock();
914 
915     pa_operation* paOperation = NULL;
916     _numPlayDevices = 1; // init to 1 to account for "default"
917 
918     // get the whole list of devices and update _numPlayDevices
919     paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
920                                                       PaSinkInfoCallback,
921                                                       this);
922 
923     WaitForOperationCompletion(paOperation);
924 
925     PaUnLock();
926 
927     return _numPlayDevices;
928 }
929 
SetPlayoutDevice(uint16_t index)930 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
931 {
932 
933     if (_playIsInitialized)
934     {
935         return -1;
936     }
937 
938     const uint16_t nDevices = PlayoutDevices();
939 
940     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
941                  "  number of availiable output devices is %u", nDevices);
942 
943     if (index > (nDevices - 1))
944     {
945         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
946                      "  device index is out of range [0,%u]", (nDevices - 1));
947         return -1;
948     }
949 
950     _outputDeviceIndex = index;
951     _outputDeviceIsSpecified = true;
952 
953     return 0;
954 }
955 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)956 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
957     AudioDeviceModule::WindowsDeviceType /*device*/)
958 {
959     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
960                  "WindowsDeviceType not supported");
961     return -1;
962 }
963 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])964 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
965     uint16_t index,
966     char name[kAdmMaxDeviceNameSize],
967     char guid[kAdmMaxGuidSize])
968 {
969 
970     const uint16_t nDevices = PlayoutDevices();
971 
972     if ((index > (nDevices - 1)) || (name == NULL))
973     {
974         return -1;
975     }
976 
977     memset(name, 0, kAdmMaxDeviceNameSize);
978 
979     if (guid != NULL)
980     {
981         memset(guid, 0, kAdmMaxGuidSize);
982     }
983 
984     // Check if default device
985     if (index == 0)
986     {
987         uint16_t deviceIndex = 0;
988         return GetDefaultDeviceInfo(false, name, deviceIndex);
989     }
990 
991     // Tell the callback that we want
992     // The name for this device
993     _playDisplayDeviceName = name;
994     _deviceIndex = index;
995 
996     // get playout devices
997     PlayoutDevices();
998 
999     // clear device name and index
1000     _playDisplayDeviceName = NULL;
1001     _deviceIndex = -1;
1002 
1003     return 0;
1004 }
1005 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1006 int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
1007     uint16_t index,
1008     char name[kAdmMaxDeviceNameSize],
1009     char guid[kAdmMaxGuidSize])
1010 {
1011 
1012     const uint16_t nDevices(RecordingDevices());
1013 
1014     if ((index > (nDevices - 1)) || (name == NULL))
1015     {
1016         return -1;
1017     }
1018 
1019     memset(name, 0, kAdmMaxDeviceNameSize);
1020 
1021     if (guid != NULL)
1022     {
1023         memset(guid, 0, kAdmMaxGuidSize);
1024     }
1025 
1026     // Check if default device
1027     if (index == 0)
1028     {
1029         uint16_t deviceIndex = 0;
1030         return GetDefaultDeviceInfo(true, name, deviceIndex);
1031     }
1032 
1033     // Tell the callback that we want
1034     // the name for this device
1035     _recDisplayDeviceName = name;
1036     _deviceIndex = index;
1037 
1038     // Get recording devices
1039     RecordingDevices();
1040 
1041     // Clear device name and index
1042     _recDisplayDeviceName = NULL;
1043     _deviceIndex = -1;
1044 
1045     return 0;
1046 }
1047 
RecordingDevices()1048 int16_t AudioDeviceLinuxPulse::RecordingDevices()
1049 {
1050 
1051     PaLock();
1052 
1053     pa_operation* paOperation = NULL;
1054     _numRecDevices = 1; // Init to 1 to account for "default"
1055 
1056     // Get the whole list of devices and update _numRecDevices
1057     paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1058                                                         PaSourceInfoCallback,
1059                                                         this);
1060 
1061     WaitForOperationCompletion(paOperation);
1062 
1063     PaUnLock();
1064 
1065     return _numRecDevices;
1066 }
1067 
SetRecordingDevice(uint16_t index)1068 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1069 {
1070 
1071     if (_recIsInitialized)
1072     {
1073         return -1;
1074     }
1075 
1076     const uint16_t nDevices(RecordingDevices());
1077 
1078     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1079                  "  number of availiable input devices is %u", nDevices);
1080 
1081     if (index > (nDevices - 1))
1082     {
1083         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1084                      "  device index is out of range [0,%u]", (nDevices - 1));
1085         return -1;
1086     }
1087 
1088     _inputDeviceIndex = index;
1089     _inputDeviceIsSpecified = true;
1090 
1091     return 0;
1092 }
1093 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)1094 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1095     AudioDeviceModule::WindowsDeviceType /*device*/)
1096 {
1097     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1098                  "WindowsDeviceType not supported");
1099     return -1;
1100 }
1101 
PlayoutIsAvailable(bool & available)1102 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1103 {
1104 
1105     available = false;
1106 
1107     // Try to initialize the playout side
1108     int32_t res = InitPlayout();
1109 
1110     // Cancel effect of initialization
1111     StopPlayout();
1112 
1113     if (res != -1)
1114     {
1115         available = true;
1116     }
1117 
1118     return res;
1119 }
1120 
RecordingIsAvailable(bool & available)1121 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1122 {
1123 
1124     available = false;
1125 
1126     // Try to initialize the playout side
1127     int32_t res = InitRecording();
1128 
1129     // Cancel effect of initialization
1130     StopRecording();
1131 
1132     if (res != -1)
1133     {
1134         available = true;
1135     }
1136 
1137     return res;
1138 }
1139 
InitPlayout()1140 int32_t AudioDeviceLinuxPulse::InitPlayout()
1141 {
1142 
1143     CriticalSectionScoped lock(&_critSect);
1144 
1145     if (_playing)
1146     {
1147         return -1;
1148     }
1149 
1150     if (!_outputDeviceIsSpecified)
1151     {
1152         return -1;
1153     }
1154 
1155     if (_playIsInitialized)
1156     {
1157         return 0;
1158     }
1159 
1160     // Initialize the speaker (devices might have been added or removed)
1161     if (InitSpeaker() == -1)
1162     {
1163         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1164                      "  InitSpeaker() failed");
1165     }
1166 
1167     // Set the play sample specification
1168     pa_sample_spec playSampleSpec;
1169     playSampleSpec.channels = _playChannels;
1170     playSampleSpec.format = PA_SAMPLE_S16LE;
1171     playSampleSpec.rate = sample_rate_hz_;
1172 
1173     // Create a new play stream
1174     _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1175                                       &playSampleSpec, NULL);
1176 
1177     if (!_playStream)
1178     {
1179         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1180                      "  failed to create play stream, err=%d",
1181                      LATE(pa_context_errno)(_paContext));
1182         return -1;
1183     }
1184 
1185     // Provide the playStream to the mixer
1186     _mixerManager.SetPlayStream(_playStream);
1187 
1188     if (_ptrAudioBuffer)
1189     {
1190         // Update audio buffer with the selected parameters
1191         _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1192         _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1193     }
1194 
1195     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1196                  "  stream state %d\n", LATE(pa_stream_get_state)(_playStream));
1197 
1198     // Set stream flags
1199     _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1200         | PA_STREAM_INTERPOLATE_TIMING);
1201 
1202     if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1203     {
1204         // If configuring a specific latency then we want to specify
1205         // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1206         // automatically to reach that target latency. However, that flag doesn't
1207         // exist in Ubuntu 8.04 and many people still use that, so we have to check
1208         // the protocol version of libpulse.
1209         if (LATE(pa_context_get_protocol_version)(_paContext)
1210             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1211         {
1212             _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1213         }
1214 
1215         const pa_sample_spec *spec =
1216             LATE(pa_stream_get_sample_spec)(_playStream);
1217         if (!spec)
1218         {
1219             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1220                          "  pa_stream_get_sample_spec()");
1221             return -1;
1222         }
1223 
1224         size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1225         uint32_t latency = bytesPerSec
1226             * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1227 
1228         // Set the play buffer attributes
1229         _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1230         _playBufferAttr.tlength = latency; // target fill level of play buffer
1231         // minimum free num bytes before server request more data
1232         _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1233         _playBufferAttr.prebuf = _playBufferAttr.tlength
1234             - _playBufferAttr.minreq; // prebuffer tlength before starting playout
1235 
1236         _configuredLatencyPlay = latency;
1237     }
1238 
1239     // num samples in bytes * num channels
1240     _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1241     _playbackBufferUnused = _playbackBufferSize;
1242     _playBuffer = new int8_t[_playbackBufferSize];
1243 
1244     // Enable underflow callback
1245     LATE(pa_stream_set_underflow_callback)(_playStream,
1246                                            PaStreamUnderflowCallback, this);
1247 
1248     // Set the state callback function for the stream
1249     LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this);
1250 
1251     // Mark playout side as initialized
1252     _playIsInitialized = true;
1253     _sndCardPlayDelay = 0;
1254     _sndCardRecDelay = 0;
1255 
1256     return 0;
1257 }
1258 
InitRecording()1259 int32_t AudioDeviceLinuxPulse::InitRecording()
1260 {
1261 
1262     CriticalSectionScoped lock(&_critSect);
1263 
1264     if (_recording)
1265     {
1266         return -1;
1267     }
1268 
1269     if (!_inputDeviceIsSpecified)
1270     {
1271         return -1;
1272     }
1273 
1274     if (_recIsInitialized)
1275     {
1276         return 0;
1277     }
1278 
1279     // Initialize the microphone (devices might have been added or removed)
1280     if (InitMicrophone() == -1)
1281     {
1282         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1283                      "  InitMicrophone() failed");
1284     }
1285 
1286     // Set the rec sample specification
1287     pa_sample_spec recSampleSpec;
1288     recSampleSpec.channels = _recChannels;
1289     recSampleSpec.format = PA_SAMPLE_S16LE;
1290     recSampleSpec.rate = sample_rate_hz_;
1291 
1292     // Create a new rec stream
1293     _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1294                                      NULL);
1295     if (!_recStream)
1296     {
1297         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1298                      "  failed to create rec stream, err=%d",
1299                      LATE(pa_context_errno)(_paContext));
1300         return -1;
1301     }
1302 
1303     // Provide the recStream to the mixer
1304     _mixerManager.SetRecStream(_recStream);
1305 
1306     if (_ptrAudioBuffer)
1307     {
1308         // Update audio buffer with the selected parameters
1309         _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1310         _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1311     }
1312 
1313     if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1314     {
1315         _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1316             | PA_STREAM_INTERPOLATE_TIMING);
1317 
1318         // If configuring a specific latency then we want to specify
1319         // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1320         // automatically to reach that target latency. However, that flag doesn't
1321         // exist in Ubuntu 8.04 and many people still use that, so we have to check
1322         // the protocol version of libpulse.
1323         if (LATE(pa_context_get_protocol_version)(_paContext)
1324             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1325         {
1326             _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1327         }
1328 
1329         const pa_sample_spec *spec =
1330             LATE(pa_stream_get_sample_spec)(_recStream);
1331         if (!spec)
1332         {
1333             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1334                          "  pa_stream_get_sample_spec(rec)");
1335             return -1;
1336         }
1337 
1338         size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1339         uint32_t latency = bytesPerSec
1340             * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1341 
1342         // Set the rec buffer attributes
1343         // Note: fragsize specifies a maximum transfer size, not a minimum, so
1344         // it is not possible to force a high latency setting, only a low one.
1345         _recBufferAttr.fragsize = latency; // size of fragment
1346         _recBufferAttr.maxlength = latency + bytesPerSec
1347             * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1348 
1349         _configuredLatencyRec = latency;
1350     }
1351 
1352     _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1353     _recordBufferUsed = 0;
1354     _recBuffer = new int8_t[_recordBufferSize];
1355 
1356     // Enable overflow callback
1357     LATE(pa_stream_set_overflow_callback)(_recStream, PaStreamOverflowCallback,
1358                                           this);
1359 
1360     // Set the state callback function for the stream
1361     LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this);
1362 
1363     // Mark recording side as initialized
1364     _recIsInitialized = true;
1365 
1366     return 0;
1367 }
1368 
StartRecording()1369 int32_t AudioDeviceLinuxPulse::StartRecording()
1370 {
1371 
1372     if (!_recIsInitialized)
1373     {
1374         return -1;
1375     }
1376 
1377     if (_recording)
1378     {
1379         return 0;
1380     }
1381 
1382     // set state to ensure that the recording starts from the audio thread
1383     _startRec = true;
1384 
1385     // the audio thread will signal when recording has started
1386     _timeEventRec.Set();
1387     if (kEventTimeout == _recStartEvent.Wait(10000))
1388     {
1389         {
1390             CriticalSectionScoped lock(&_critSect);
1391             _startRec = false;
1392         }
1393         StopRecording();
1394         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1395                      "  failed to activate recording");
1396         return -1;
1397     }
1398 
1399     {
1400         CriticalSectionScoped lock(&_critSect);
1401         if (_recording)
1402         {
1403             // the recording state is set by the audio thread after recording has started
1404         } else
1405         {
1406             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1407                          "  failed to activate recording");
1408             return -1;
1409         }
1410     }
1411 
1412     return 0;
1413 }
1414 
StopRecording()1415 int32_t AudioDeviceLinuxPulse::StopRecording()
1416 {
1417 
1418     CriticalSectionScoped lock(&_critSect);
1419 
1420     if (!_recIsInitialized)
1421     {
1422         return 0;
1423     }
1424 
1425     if (_recStream == NULL)
1426     {
1427         return -1;
1428     }
1429 
1430     _recIsInitialized = false;
1431     _recording = false;
1432 
1433     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1434                  "  stopping recording");
1435 
1436     // Stop Recording
1437     PaLock();
1438 
1439     DisableReadCallback();
1440     LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1441 
1442     // Unset this here so that we don't get a TERMINATED callback
1443     LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1444 
1445     if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1446     {
1447         // Disconnect the stream
1448         if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1449         {
1450             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1451                          "  failed to disconnect rec stream, err=%d\n",
1452                          LATE(pa_context_errno)(_paContext));
1453             PaUnLock();
1454             return -1;
1455         }
1456 
1457         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1458                      "  disconnected recording");
1459     }
1460 
1461     LATE(pa_stream_unref)(_recStream);
1462     _recStream = NULL;
1463 
1464     PaUnLock();
1465 
1466     // Provide the recStream to the mixer
1467     _mixerManager.SetRecStream(_recStream);
1468 
1469     if (_recBuffer)
1470     {
1471         delete [] _recBuffer;
1472         _recBuffer = NULL;
1473     }
1474 
1475     return 0;
1476 }
1477 
RecordingIsInitialized() const1478 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1479 {
1480     return (_recIsInitialized);
1481 }
1482 
Recording() const1483 bool AudioDeviceLinuxPulse::Recording() const
1484 {
1485     CriticalSectionScoped lock(&_critSect);
1486     return (_recording);
1487 }
1488 
PlayoutIsInitialized() const1489 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1490 {
1491     return (_playIsInitialized);
1492 }
1493 
StartPlayout()1494 int32_t AudioDeviceLinuxPulse::StartPlayout()
1495 {
1496     if (!_playIsInitialized)
1497     {
1498         return -1;
1499     }
1500 
1501     if (_playing)
1502     {
1503         return 0;
1504     }
1505 
1506     // set state to ensure that playout starts from the audio thread
1507     _startPlay = true;
1508 
1509     // Both |_startPlay| and |_playing| needs protction since they are also
1510     // accessed on the playout thread.
1511 
1512     // the audio thread will signal when playout has started
1513     _timeEventPlay.Set();
1514     if (kEventTimeout == _playStartEvent.Wait(10000))
1515     {
1516         {
1517             CriticalSectionScoped lock(&_critSect);
1518             _startPlay = false;
1519         }
1520         StopPlayout();
1521         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1522                      "  failed to activate playout");
1523         return -1;
1524     }
1525 
1526     {
1527         CriticalSectionScoped lock(&_critSect);
1528         if (_playing)
1529         {
1530             // the playing state is set by the audio thread after playout has started
1531         } else
1532         {
1533             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1534                          "  failed to activate playing");
1535             return -1;
1536         }
1537     }
1538 
1539     return 0;
1540 }
1541 
StopPlayout()1542 int32_t AudioDeviceLinuxPulse::StopPlayout()
1543 {
1544 
1545     CriticalSectionScoped lock(&_critSect);
1546 
1547     if (!_playIsInitialized)
1548     {
1549         return 0;
1550     }
1551 
1552     if (_playStream == NULL)
1553     {
1554         return -1;
1555     }
1556 
1557     _playIsInitialized = false;
1558     _playing = false;
1559     _sndCardPlayDelay = 0;
1560     _sndCardRecDelay = 0;
1561 
1562     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1563                  "  stopping playback");
1564 
1565     // Stop Playout
1566     PaLock();
1567 
1568     DisableWriteCallback();
1569     LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1570 
1571     // Unset this here so that we don't get a TERMINATED callback
1572     LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1573 
1574     if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1575     {
1576         // Disconnect the stream
1577         if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1578         {
1579             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1580                          "  failed to disconnect play stream, err=%d",
1581                          LATE(pa_context_errno)(_paContext));
1582             PaUnLock();
1583             return -1;
1584         }
1585 
1586         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1587                      "  disconnected playback");
1588     }
1589 
1590     LATE(pa_stream_unref)(_playStream);
1591     _playStream = NULL;
1592 
1593     PaUnLock();
1594 
1595     // Provide the playStream to the mixer
1596     _mixerManager.SetPlayStream(_playStream);
1597 
1598     if (_playBuffer)
1599     {
1600         delete [] _playBuffer;
1601         _playBuffer = NULL;
1602     }
1603 
1604     return 0;
1605 }
1606 
PlayoutDelay(uint16_t & delayMS) const1607 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1608 {
1609     CriticalSectionScoped lock(&_critSect);
1610     delayMS = (uint16_t) _sndCardPlayDelay;
1611     return 0;
1612 }
1613 
RecordingDelay(uint16_t & delayMS) const1614 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1615 {
1616     CriticalSectionScoped lock(&_critSect);
1617     delayMS = (uint16_t) _sndCardRecDelay;
1618     return 0;
1619 }
1620 
Playing() const1621 bool AudioDeviceLinuxPulse::Playing() const
1622 {
1623     CriticalSectionScoped lock(&_critSect);
1624     return (_playing);
1625 }
1626 
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1627 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1628     const AudioDeviceModule::BufferType type,
1629     uint16_t sizeMS)
1630 {
1631 
1632     if (type != AudioDeviceModule::kFixedBufferSize)
1633     {
1634         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1635                      " Adaptive buffer size not supported on this platform");
1636         return -1;
1637     }
1638 
1639     _playBufType = type;
1640     _playBufDelayFixed = sizeMS;
1641 
1642     return 0;
1643 }
1644 
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1645 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1646     AudioDeviceModule::BufferType& type,
1647     uint16_t& sizeMS) const
1648 {
1649 
1650     type = _playBufType;
1651     sizeMS = _playBufDelayFixed;
1652 
1653     return 0;
1654 }
1655 
CPULoad(uint16_t &) const1656 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1657 {
1658 
1659     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1660                  "  API call not supported on this platform");
1661     return -1;
1662 }
1663 
PlayoutWarning() const1664 bool AudioDeviceLinuxPulse::PlayoutWarning() const
1665 {
1666   CriticalSectionScoped lock(&_critSect);
1667   return (_playWarning > 0);
1668 }
1669 
PlayoutError() const1670 bool AudioDeviceLinuxPulse::PlayoutError() const
1671 {
1672   CriticalSectionScoped lock(&_critSect);
1673   return (_playError > 0);
1674 }
1675 
RecordingWarning() const1676 bool AudioDeviceLinuxPulse::RecordingWarning() const
1677 {
1678   CriticalSectionScoped lock(&_critSect);
1679   return (_recWarning > 0);
1680 }
1681 
RecordingError() const1682 bool AudioDeviceLinuxPulse::RecordingError() const
1683 {
1684   CriticalSectionScoped lock(&_critSect);
1685   return (_recError > 0);
1686 }
1687 
ClearPlayoutWarning()1688 void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1689 {
1690   CriticalSectionScoped lock(&_critSect);
1691   _playWarning = 0;
1692 }
1693 
ClearPlayoutError()1694 void AudioDeviceLinuxPulse::ClearPlayoutError()
1695 {
1696   CriticalSectionScoped lock(&_critSect);
1697   _playError = 0;
1698 }
1699 
ClearRecordingWarning()1700 void AudioDeviceLinuxPulse::ClearRecordingWarning()
1701 {
1702   CriticalSectionScoped lock(&_critSect);
1703   _recWarning = 0;
1704 }
1705 
ClearRecordingError()1706 void AudioDeviceLinuxPulse::ClearRecordingError()
1707 {
1708   CriticalSectionScoped lock(&_critSect);
1709   _recError = 0;
1710 }
1711 
1712 // ============================================================================
1713 //                                 Private Methods
1714 // ============================================================================
1715 
PaContextStateCallback(pa_context * c,void * pThis)1716 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1717 {
1718     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaContextStateCallbackHandler(
1719         c);
1720 }
1721 
1722 // ----------------------------------------------------------------------------
1723 //  PaSinkInfoCallback
1724 // ----------------------------------------------------------------------------
1725 
PaSinkInfoCallback(pa_context *,const pa_sink_info * i,int eol,void * pThis)1726 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1727                                                const pa_sink_info *i, int eol,
1728                                                void *pThis)
1729 {
1730     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1731         i, eol);
1732 }
1733 
PaSourceInfoCallback(pa_context *,const pa_source_info * i,int eol,void * pThis)1734 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1735                                                  const pa_source_info *i,
1736                                                  int eol, void *pThis)
1737 {
1738     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1739         i, eol);
1740 }
1741 
PaServerInfoCallback(pa_context *,const pa_server_info * i,void * pThis)1742 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1743                                                  const pa_server_info *i,
1744                                                  void *pThis)
1745 {
1746     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaServerInfoCallbackHandler(i);
1747 }
1748 
PaStreamStateCallback(pa_stream * p,void * pThis)1749 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1750 {
1751     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamStateCallbackHandler(p);
1752 }
1753 
PaContextStateCallbackHandler(pa_context * c)1754 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1755 {
1756     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1757                  "  context state cb");
1758 
1759     pa_context_state_t state = LATE(pa_context_get_state)(c);
1760     switch (state)
1761     {
1762         case PA_CONTEXT_UNCONNECTED:
1763             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1764                          "  unconnected");
1765             break;
1766         case PA_CONTEXT_CONNECTING:
1767         case PA_CONTEXT_AUTHORIZING:
1768         case PA_CONTEXT_SETTING_NAME:
1769             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1770                          "  no state");
1771             break;
1772         case PA_CONTEXT_FAILED:
1773         case PA_CONTEXT_TERMINATED:
1774             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1775                          "  failed");
1776             _paStateChanged = true;
1777             LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1778             break;
1779         case PA_CONTEXT_READY:
1780             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1781                          "  ready");
1782             _paStateChanged = true;
1783             LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1784             break;
1785     }
1786 }
1787 
PaSinkInfoCallbackHandler(const pa_sink_info * i,int eol)1788 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1789                                                       int eol)
1790 {
1791     if (eol)
1792     {
1793         // Signal that we are done
1794         LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1795         return;
1796     }
1797 
1798     if (_numPlayDevices == _deviceIndex)
1799     {
1800         // Convert the device index to the one of the sink
1801         _paDeviceIndex = i->index;
1802 
1803         if (_playDeviceName)
1804         {
1805             // Copy the sink name
1806             strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1807             _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1808         }
1809         if (_playDisplayDeviceName)
1810         {
1811             // Copy the sink display name
1812             strncpy(_playDisplayDeviceName, i->description,
1813                     kAdmMaxDeviceNameSize);
1814             _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1815         }
1816     }
1817 
1818     _numPlayDevices++;
1819 }
1820 
PaSourceInfoCallbackHandler(const pa_source_info * i,int eol)1821 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1822     const pa_source_info *i,
1823     int eol)
1824 {
1825     if (eol)
1826     {
1827         // Signal that we are done
1828         LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1829         return;
1830     }
1831 
1832     // We don't want to list output devices
1833      if (i->monitor_of_sink == PA_INVALID_INDEX)
1834     {
1835         if (_numRecDevices == _deviceIndex)
1836         {
1837             // Convert the device index to the one of the source
1838             _paDeviceIndex = i->index;
1839 
1840             if (_recDeviceName)
1841             {
1842                 // copy the source name
1843                 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1844                 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1845             }
1846             if (_recDisplayDeviceName)
1847             {
1848                 // Copy the source display name
1849                 strncpy(_recDisplayDeviceName, i->description,
1850                         kAdmMaxDeviceNameSize);
1851                 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1852             }
1853         }
1854 
1855         _numRecDevices++;
1856     }
1857 }
1858 
PaServerInfoCallbackHandler(const pa_server_info * i)1859 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i)
1860 {
1861     // Use PA native sampling rate
1862     sample_rate_hz_ = i->sample_spec.rate;
1863 
1864     // Copy the PA server version
1865     strncpy(_paServerVersion, i->server_version, 31);
1866     _paServerVersion[31] = '\0';
1867 
1868     if (_recDisplayDeviceName)
1869     {
1870         // Copy the source name
1871         strncpy(_recDisplayDeviceName, i->default_source_name,
1872                 kAdmMaxDeviceNameSize);
1873         _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1874     }
1875 
1876     if (_playDisplayDeviceName)
1877     {
1878         // Copy the sink name
1879         strncpy(_playDisplayDeviceName, i->default_sink_name,
1880                 kAdmMaxDeviceNameSize);
1881         _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1882     }
1883 
1884     LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1885 }
1886 
PaStreamStateCallbackHandler(pa_stream * p)1887 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1888 {
1889     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1890                  "  stream state cb");
1891 
1892     pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1893     switch (state)
1894     {
1895         case PA_STREAM_UNCONNECTED:
1896             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1897                          "  unconnected");
1898             break;
1899         case PA_STREAM_CREATING:
1900             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1901                          "  creating");
1902             break;
1903         case PA_STREAM_FAILED:
1904         case PA_STREAM_TERMINATED:
1905             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1906                          "  failed");
1907             break;
1908         case PA_STREAM_READY:
1909             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1910                          "  ready");
1911             break;
1912     }
1913 
1914     LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1915 }
1916 
CheckPulseAudioVersion()1917 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1918 {
1919     PaLock();
1920 
1921     pa_operation* paOperation = NULL;
1922 
1923     // get the server info and update deviceName
1924     paOperation = LATE(pa_context_get_server_info)(_paContext,
1925                                                    PaServerInfoCallback, this);
1926 
1927     WaitForOperationCompletion(paOperation);
1928 
1929     PaUnLock();
1930 
1931     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1932                  "  checking PulseAudio version: %s", _paServerVersion);
1933 
1934     return 0;
1935 }
1936 
InitSamplingFrequency()1937 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1938 {
1939     PaLock();
1940 
1941     pa_operation* paOperation = NULL;
1942 
1943     // Get the server info and update sample_rate_hz_
1944     paOperation = LATE(pa_context_get_server_info)(_paContext,
1945                                                    PaServerInfoCallback, this);
1946 
1947     WaitForOperationCompletion(paOperation);
1948 
1949     PaUnLock();
1950 
1951     return 0;
1952 }
1953 
GetDefaultDeviceInfo(bool recDevice,char * name,uint16_t & index)1954 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1955                                                     char* name,
1956                                                     uint16_t& index)
1957 {
1958     char tmpName[kAdmMaxDeviceNameSize] = {0};
1959     // subtract length of "default: "
1960     uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
1961     char* pName = NULL;
1962 
1963     if (name)
1964     {
1965         // Add "default: "
1966         strcpy(name, "default: ");
1967         pName = &name[9];
1968     }
1969 
1970     // Tell the callback that we want
1971     // the name for this device
1972     if (recDevice)
1973     {
1974         _recDisplayDeviceName = tmpName;
1975     } else
1976     {
1977         _playDisplayDeviceName = tmpName;
1978     }
1979 
1980     // Set members
1981     _paDeviceIndex = -1;
1982     _deviceIndex = 0;
1983     _numPlayDevices = 0;
1984     _numRecDevices = 0;
1985 
1986     PaLock();
1987 
1988     pa_operation* paOperation = NULL;
1989 
1990     // Get the server info and update deviceName
1991     paOperation = LATE(pa_context_get_server_info)(_paContext,
1992                                                    PaServerInfoCallback, this);
1993 
1994     WaitForOperationCompletion(paOperation);
1995 
1996     // Get the device index
1997     if (recDevice)
1998     {
1999         paOperation
2000             = LATE(pa_context_get_source_info_by_name)(_paContext,
2001                                                        (char *) tmpName,
2002                                                        PaSourceInfoCallback,
2003                                                        this);
2004     } else
2005     {
2006         paOperation
2007             = LATE(pa_context_get_sink_info_by_name)(_paContext,
2008                                                      (char *) tmpName,
2009                                                      PaSinkInfoCallback, this);
2010     }
2011 
2012     WaitForOperationCompletion(paOperation);
2013 
2014     PaUnLock();
2015 
2016     // Set the index
2017     index = _paDeviceIndex;
2018 
2019     if (name)
2020     {
2021         // Copy to name string
2022         strncpy(pName, tmpName, nameLen);
2023     }
2024 
2025     // Clear members
2026     _playDisplayDeviceName = NULL;
2027     _recDisplayDeviceName = NULL;
2028     _paDeviceIndex = -1;
2029     _deviceIndex = -1;
2030     _numPlayDevices = 0;
2031     _numRecDevices = 0;
2032 
2033     return 0;
2034 }
2035 
InitPulseAudio()2036 int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2037 {
2038     int retVal = 0;
2039 
2040     // Load libpulse
2041     if (!PaSymbolTable.Load())
2042     {
2043         // Most likely the Pulse library and sound server are not installed on
2044         // this system
2045         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2046                      "  failed to load symbol table");
2047         return -1;
2048     }
2049 
2050     // Create a mainloop API and connection to the default server
2051     // the mainloop is the internal asynchronous API event loop
2052     if (_paMainloop) {
2053         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2054                      "  PA mainloop has already existed");
2055         return -1;
2056     }
2057     _paMainloop = LATE(pa_threaded_mainloop_new)();
2058     if (!_paMainloop)
2059     {
2060         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2061                      "  could not create mainloop");
2062         return -1;
2063     }
2064 
2065     // Start the threaded main loop
2066     retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2067     if (retVal != PA_OK)
2068     {
2069         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2070                      "  failed to start main loop, error=%d", retVal);
2071         return -1;
2072     }
2073 
2074     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2075                  "  mainloop running!");
2076 
2077     PaLock();
2078 
2079     _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2080     if (!_paMainloopApi)
2081     {
2082         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2083                      "  could not create mainloop API");
2084         PaUnLock();
2085         return -1;
2086     }
2087 
2088     // Create a new PulseAudio context
2089     if (_paContext){
2090         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2091                      "  PA context has already existed");
2092         PaUnLock();
2093         return -1;
2094     }
2095     _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2096 
2097     if (!_paContext)
2098     {
2099         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2100                      "  could not create context");
2101         PaUnLock();
2102         return -1;
2103     }
2104 
2105     // Set state callback function
2106     LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2107                                         this);
2108 
2109     // Connect the context to a server (default)
2110     _paStateChanged = false;
2111     retVal = LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN,
2112                                       NULL);
2113 
2114     if (retVal != PA_OK)
2115     {
2116         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2117                      "  failed to connect context, error=%d", retVal);
2118         PaUnLock();
2119         return -1;
2120     }
2121 
2122     // Wait for state change
2123     while (!_paStateChanged)
2124     {
2125         LATE(pa_threaded_mainloop_wait)(_paMainloop);
2126     }
2127 
2128     // Now check to see what final state we reached.
2129     pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2130 
2131     if (state != PA_CONTEXT_READY)
2132     {
2133         if (state == PA_CONTEXT_FAILED)
2134         {
2135             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2136                          "  failed to connect to PulseAudio sound server");
2137         } else if (state == PA_CONTEXT_TERMINATED)
2138         {
2139             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2140                          "  PulseAudio connection terminated early");
2141         } else
2142         {
2143             // Shouldn't happen, because we only signal on one of those three
2144             // states
2145             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2146                          "  unknown problem connecting to PulseAudio");
2147         }
2148         PaUnLock();
2149         return -1;
2150     }
2151 
2152     PaUnLock();
2153 
2154     // Give the objects to the mixer manager
2155     _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2156 
2157     // Check the version
2158     if (CheckPulseAudioVersion() < 0)
2159     {
2160         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2161                      "  PulseAudio version %s not supported", _paServerVersion);
2162         return -1;
2163     }
2164 
2165     // Initialize sampling frequency
2166     if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2167     {
2168         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2169                      "  failed to initialize sampling frequency, set to %d Hz",
2170                      sample_rate_hz_);
2171         return -1;
2172     }
2173 
2174     return 0;
2175 }
2176 
TerminatePulseAudio()2177 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2178 {
2179     // Do nothing if the instance doesn't exist
2180     // likely PaSymbolTable.Load() fails
2181     if (!_paMainloop) {
2182         return 0;
2183     }
2184 
2185     PaLock();
2186 
2187     // Disconnect the context
2188     if (_paContext)
2189     {
2190         LATE(pa_context_disconnect)(_paContext);
2191     }
2192 
2193     // Unreference the context
2194     if (_paContext)
2195     {
2196         LATE(pa_context_unref)(_paContext);
2197     }
2198 
2199     PaUnLock();
2200     _paContext = NULL;
2201 
2202     // Stop the threaded main loop
2203     if (_paMainloop)
2204     {
2205         LATE(pa_threaded_mainloop_stop)(_paMainloop);
2206     }
2207 
2208     // Free the mainloop
2209     if (_paMainloop)
2210     {
2211         LATE(pa_threaded_mainloop_free)(_paMainloop);
2212     }
2213 
2214     _paMainloop = NULL;
2215 
2216     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2217                  "  PulseAudio terminated");
2218 
2219     return 0;
2220 }
2221 
PaLock()2222 void AudioDeviceLinuxPulse::PaLock()
2223 {
2224     LATE(pa_threaded_mainloop_lock)(_paMainloop);
2225 }
2226 
PaUnLock()2227 void AudioDeviceLinuxPulse::PaUnLock()
2228 {
2229     LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2230 }
2231 
WaitForOperationCompletion(pa_operation * paOperation) const2232 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2233     pa_operation* paOperation) const
2234 {
2235     if (!paOperation)
2236     {
2237         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2238                      "paOperation NULL in WaitForOperationCompletion");
2239         return;
2240     }
2241 
2242     while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2243     {
2244         LATE(pa_threaded_mainloop_wait)(_paMainloop);
2245     }
2246 
2247     LATE(pa_operation_unref)(paOperation);
2248 }
2249 
2250 // ============================================================================
2251 //                                  Thread Methods
2252 // ============================================================================
2253 
EnableWriteCallback()2254 void AudioDeviceLinuxPulse::EnableWriteCallback()
2255 {
2256     if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2257     {
2258         // May already have available space. Must check.
2259         _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2260         if (_tempBufferSpace > 0)
2261         {
2262             // Yup, there is already space available, so if we register a write
2263             // callback then it will not receive any event. So dispatch one ourself
2264             // instead
2265             _timeEventPlay.Set();
2266             return;
2267         }
2268     }
2269 
2270     LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2271                                        this);
2272 }
2273 
DisableWriteCallback()2274 void AudioDeviceLinuxPulse::DisableWriteCallback()
2275 {
2276     LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2277 }
2278 
PaStreamWriteCallback(pa_stream *,size_t buffer_space,void * pThis)2279 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2280                                                   size_t buffer_space,
2281                                                   void *pThis)
2282 {
2283     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2284         buffer_space);
2285 }
2286 
PaStreamWriteCallbackHandler(size_t bufferSpace)2287 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2288 {
2289     _tempBufferSpace = bufferSpace;
2290 
2291     // Since we write the data asynchronously on a different thread, we have
2292     // to temporarily disable the write callback or else Pulse will call it
2293     // continuously until we write the data. We re-enable it below.
2294     DisableWriteCallback();
2295     _timeEventPlay.Set();
2296 }
2297 
PaStreamUnderflowCallback(pa_stream *,void * pThis)2298 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2299                                                       void *pThis)
2300 {
2301     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamUnderflowCallbackHandler();
2302 }
2303 
PaStreamUnderflowCallbackHandler()2304 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2305 {
2306     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2307                  "  Playout underflow");
2308 
2309     if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2310     {
2311         // We didn't configure a pa_buffer_attr before, so switching to one now
2312         // would be questionable.
2313         return;
2314     }
2315 
2316     // Otherwise reconfigure the stream with a higher target latency.
2317 
2318     const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2319     if (!spec)
2320     {
2321         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2322                      "  pa_stream_get_sample_spec()");
2323         return;
2324     }
2325 
2326     size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2327     uint32_t newLatency = _configuredLatencyPlay + bytesPerSec
2328         * WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / WEBRTC_PA_MSECS_PER_SEC;
2329 
2330     // Set the play buffer attributes
2331     _playBufferAttr.maxlength = newLatency;
2332     _playBufferAttr.tlength = newLatency;
2333     _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2334     _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2335 
2336     pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2337                                                        &_playBufferAttr, NULL,
2338                                                        NULL);
2339     if (!op)
2340     {
2341         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2342                      "  pa_stream_set_buffer_attr()");
2343         return;
2344     }
2345 
2346     // Don't need to wait for this to complete.
2347     LATE(pa_operation_unref)(op);
2348 
2349     // Save the new latency in case we underflow again.
2350     _configuredLatencyPlay = newLatency;
2351 }
2352 
EnableReadCallback()2353 void AudioDeviceLinuxPulse::EnableReadCallback()
2354 {
2355     LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
2356 }
2357 
DisableReadCallback()2358 void AudioDeviceLinuxPulse::DisableReadCallback()
2359 {
2360     LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2361 }
2362 
PaStreamReadCallback(pa_stream *,size_t,void * pThis)2363 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2364                                                  size_t /*unused2*/,
2365                                                  void *pThis)
2366 {
2367     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamReadCallbackHandler();
2368 }
2369 
PaStreamReadCallbackHandler()2370 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2371 {
2372     // We get the data pointer and size now in order to save one Lock/Unlock
2373     // in the worker thread
2374     if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize)
2375         != 0)
2376     {
2377         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2378                      "  Can't read data!");
2379         return;
2380     }
2381 
2382     // PulseAudio record streams can have holes (for reasons not entirely clear
2383     // to the PA developers themselves). Since version 4 of PA, these are passed
2384     // over to the application (us), signaled by a non-zero sample data size
2385     // (the size of the hole) and a NULL sample data.
2386     // We handle stream holes as recommended by PulseAudio, i.e. by skipping
2387     // it, which is done with a stream drop.
2388     if (_tempSampleDataSize && !_tempSampleData) {
2389         LATE(pa_stream_drop)(_recStream);
2390         _tempSampleDataSize = 0; // reset
2391         return;
2392     }
2393 
2394     // Since we consume the data asynchronously on a different thread, we have
2395     // to temporarily disable the read callback or else Pulse will call it
2396     // continuously until we consume the data. We re-enable it below
2397     DisableReadCallback();
2398     _timeEventRec.Set();
2399 }
2400 
PaStreamOverflowCallback(pa_stream *,void * pThis)2401 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2402                                                      void *pThis)
2403 {
2404     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamOverflowCallbackHandler();
2405 }
2406 
PaStreamOverflowCallbackHandler()2407 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2408 {
2409     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2410                  "  Recording overflow");
2411 }
2412 
LatencyUsecs(pa_stream * stream)2413 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2414 {
2415     if (!WEBRTC_PA_REPORT_LATENCY)
2416     {
2417         return 0;
2418     }
2419 
2420     if (!stream)
2421     {
2422         return 0;
2423     }
2424 
2425     pa_usec_t latency;
2426     int negative;
2427     if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2428     {
2429         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2430                      "  Can't query latency");
2431         // We'd rather continue playout/capture with an incorrect delay than stop
2432         // it altogether, so return a valid value.
2433         return 0;
2434     }
2435 
2436     if (negative)
2437     {
2438         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2439                      "  warning: pa_stream_get_latency reported negative delay");
2440 
2441         // The delay can be negative for monitoring streams if the captured
2442         // samples haven't been played yet. In such a case, "latency" contains the
2443         // magnitude, so we must negate it to get the real value.
2444         int32_t tmpLatency = (int32_t) -latency;
2445         if (tmpLatency < 0)
2446         {
2447             // Make sure that we don't use a negative delay
2448             tmpLatency = 0;
2449         }
2450 
2451         return tmpLatency;
2452     } else
2453     {
2454         return (int32_t) latency;
2455     }
2456 }
2457 
ReadRecordedData(const void * bufferData,size_t bufferSize)2458 int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData,
2459                                                 size_t bufferSize)
2460 {
2461     size_t size = bufferSize;
2462     uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2463 
2464     // Account for the peeked data and the used data
2465     uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2466         / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2467 
2468     _sndCardRecDelay = recDelay;
2469 
2470     if (_playStream)
2471     {
2472         // Get the playout delay
2473         _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2474     }
2475 
2476     if (_recordBufferUsed > 0)
2477     {
2478         // Have to copy to the buffer until it is full
2479         size_t copy = _recordBufferSize - _recordBufferUsed;
2480         if (size < copy)
2481         {
2482             copy = size;
2483         }
2484 
2485         memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2486         _recordBufferUsed += copy;
2487         bufferData = static_cast<const char *> (bufferData) + copy;
2488         size -= copy;
2489 
2490         if (_recordBufferUsed != _recordBufferSize)
2491         {
2492             // Not enough data yet to pass to VoE
2493             return 0;
2494         }
2495 
2496         // Provide data to VoiceEngine
2497         if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2498         {
2499             // We have stopped recording
2500             return -1;
2501         }
2502 
2503         _recordBufferUsed = 0;
2504     }
2505 
2506     // Now process full 10ms sample sets directly from the input
2507     while (size >= _recordBufferSize)
2508     {
2509         // Provide data to VoiceEngine
2510         if (ProcessRecordedData(
2511             static_cast<int8_t *> (const_cast<void *> (bufferData)),
2512             numRecSamples, recDelay) == -1)
2513         {
2514             // We have stopped recording
2515             return -1;
2516         }
2517 
2518         bufferData = static_cast<const char *> (bufferData) + _recordBufferSize;
2519         size -= _recordBufferSize;
2520 
2521         // We have consumed 10ms of data
2522         recDelay -= 10;
2523     }
2524 
2525     // Now save any leftovers for later.
2526     if (size > 0)
2527     {
2528         memcpy(_recBuffer, bufferData, size);
2529         _recordBufferUsed = size;
2530     }
2531 
2532     return 0;
2533 }
2534 
ProcessRecordedData(int8_t * bufferData,uint32_t bufferSizeInSamples,uint32_t recDelay)2535 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2536     int8_t *bufferData,
2537     uint32_t bufferSizeInSamples,
2538     uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2539 {
2540     uint32_t currentMicLevel(0);
2541     uint32_t newMicLevel(0);
2542 
2543     _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2544 
2545     if (AGC())
2546     {
2547         // Store current mic level in the audio buffer if AGC is enabled
2548         if (MicrophoneVolume(currentMicLevel) == 0)
2549         {
2550             // This call does not affect the actual microphone volume
2551             _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2552         }
2553     }
2554 
2555     const uint32_t clockDrift(0);
2556     // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2557     // near-end signals at the AEC for PulseAudio. I think the system delay is
2558     // being correctly calculated here, but for legacy reasons we add +10 ms to
2559     // the value in the AEC. The real fix will be part of a larger investigation
2560     // into managing system delay in the AEC.
2561     if (recDelay > 10)
2562         recDelay -= 10;
2563     else
2564         recDelay = 0;
2565     _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2566     _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2567     // Deliver recorded samples at specified sample rate,
2568     // mic level etc. to the observer using callback
2569     UnLock();
2570     _ptrAudioBuffer->DeliverRecordedData();
2571     Lock();
2572 
2573     // We have been unlocked - check the flag again
2574     if (!_recording)
2575     {
2576         return -1;
2577     }
2578 
2579     if (AGC())
2580     {
2581         newMicLevel = _ptrAudioBuffer->NewMicLevel();
2582         if (newMicLevel != 0)
2583         {
2584             // The VQE will only deliver non-zero microphone levels when a
2585             // change is needed.
2586             // Set this new mic level (received from the observer as return
2587             // value in the callback).
2588             WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2589                          "  AGC change of volume: old=%u => new=%u",
2590                          currentMicLevel, newMicLevel);
2591             if (SetMicrophoneVolume(newMicLevel) == -1)
2592             {
2593                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2594                              _id,
2595                              "  the required modification of the microphone "
2596                              "volume failed");
2597             }
2598         }
2599     }
2600 
2601     return 0;
2602 }
2603 
PlayThreadFunc(void * pThis)2604 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2605 {
2606     return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2607 }
2608 
RecThreadFunc(void * pThis)2609 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2610 {
2611     return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2612 }
2613 
PlayThreadProcess()2614 bool AudioDeviceLinuxPulse::PlayThreadProcess()
2615 {
2616     switch (_timeEventPlay.Wait(1000))
2617     {
2618         case kEventSignaled:
2619             break;
2620         case kEventError:
2621             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2622                          "EventWrapper::Wait() failed");
2623             return true;
2624         case kEventTimeout:
2625             return true;
2626     }
2627 
2628     Lock();
2629 
2630     if (_startPlay)
2631     {
2632         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2633                      "_startPlay true, performing initial actions");
2634 
2635         _startPlay = false;
2636         _playDeviceName = NULL;
2637 
2638         // Set if not default device
2639         if (_outputDeviceIndex > 0)
2640         {
2641             // Get the playout device name
2642             _playDeviceName = new char[kAdmMaxDeviceNameSize];
2643             _deviceIndex = _outputDeviceIndex;
2644             PlayoutDevices();
2645         }
2646 
2647         // Start muted only supported on 0.9.11 and up
2648         if (LATE(pa_context_get_protocol_version)(_paContext)
2649             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2650         {
2651             // Get the currently saved speaker mute status
2652             // and set the initial mute status accordingly
2653             bool enabled(false);
2654             _mixerManager.SpeakerMute(enabled);
2655             if (enabled)
2656             {
2657                 _playStreamFlags |= PA_STREAM_START_MUTED;
2658             }
2659         }
2660 
2661         // Get the currently saved speaker volume
2662         uint32_t volume = 0;
2663         if (update_speaker_volume_at_startup_)
2664           _mixerManager.SpeakerVolume(volume);
2665 
2666         PaLock();
2667 
2668         // NULL gives PA the choice of startup volume.
2669         pa_cvolume* ptr_cvolume = NULL;
2670         if (update_speaker_volume_at_startup_) {
2671           pa_cvolume cVolumes;
2672           ptr_cvolume = &cVolumes;
2673 
2674           // Set the same volume for all channels
2675           const pa_sample_spec *spec =
2676               LATE(pa_stream_get_sample_spec)(_playStream);
2677           LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2678           update_speaker_volume_at_startup_ = false;
2679         }
2680 
2681         // Connect the stream to a sink
2682         if (LATE(pa_stream_connect_playback)(
2683             _playStream,
2684             _playDeviceName,
2685             &_playBufferAttr,
2686             (pa_stream_flags_t) _playStreamFlags,
2687             ptr_cvolume, NULL) != PA_OK)
2688         {
2689             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2690                          "  failed to connect play stream, err=%d",
2691                          LATE(pa_context_errno)(_paContext));
2692         }
2693 
2694         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2695                      "  play stream connected");
2696 
2697         // Wait for state change
2698         while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2699         {
2700             LATE(pa_threaded_mainloop_wait)(_paMainloop);
2701         }
2702 
2703         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2704                      "  play stream ready");
2705 
2706         // We can now handle write callbacks
2707         EnableWriteCallback();
2708 
2709         PaUnLock();
2710 
2711         // Clear device name
2712         if (_playDeviceName)
2713         {
2714             delete [] _playDeviceName;
2715             _playDeviceName = NULL;
2716         }
2717 
2718         _playing = true;
2719         _playStartEvent.Set();
2720 
2721         UnLock();
2722         return true;
2723     }
2724 
2725     if (_playing)
2726     {
2727         if (!_recording)
2728         {
2729             // Update the playout delay
2730             _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2731                 / 1000);
2732         }
2733 
2734         if (_playbackBufferUnused < _playbackBufferSize)
2735         {
2736 
2737             size_t write = _playbackBufferSize - _playbackBufferUnused;
2738             if (_tempBufferSpace < write)
2739             {
2740                 write = _tempBufferSpace;
2741             }
2742 
2743             PaLock();
2744             if (LATE(pa_stream_write)(
2745                                       _playStream,
2746                                       (void *) &_playBuffer[_playbackBufferUnused],
2747                                       write, NULL, (int64_t) 0,
2748                                       PA_SEEK_RELATIVE) != PA_OK)
2749             {
2750                 _writeErrors++;
2751                 if (_writeErrors > 10)
2752                 {
2753                     if (_playError == 1)
2754                     {
2755                         WEBRTC_TRACE(kTraceWarning,
2756                                      kTraceUtility, _id,
2757                                      "  pending playout error exists");
2758                     }
2759                     _playError = 1; // Triggers callback from module process thread
2760                     WEBRTC_TRACE(
2761                                  kTraceError,
2762                                  kTraceUtility,
2763                                  _id,
2764                                  "  kPlayoutError message posted: "
2765                                  "_writeErrors=%u, error=%d",
2766                                  _writeErrors,
2767                                  LATE(pa_context_errno)(_paContext));
2768                     _writeErrors = 0;
2769                 }
2770             }
2771             PaUnLock();
2772 
2773             _playbackBufferUnused += write;
2774             _tempBufferSpace -= write;
2775         }
2776 
2777         uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2778         if (_tempBufferSpace > 0) // Might have been reduced to zero by the above
2779         {
2780             // Ask for new PCM data to be played out using the AudioDeviceBuffer
2781             // ensure that this callback is executed without taking the
2782             // audio-thread lock
2783             UnLock();
2784             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2785                          "  requesting data");
2786             uint32_t nSamples =
2787                 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2788             Lock();
2789 
2790             // We have been unlocked - check the flag again
2791             if (!_playing)
2792             {
2793                 UnLock();
2794                 return true;
2795             }
2796 
2797             nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2798             if (nSamples != numPlaySamples)
2799             {
2800                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2801                              _id, "  invalid number of output samples(%d)",
2802                              nSamples);
2803             }
2804 
2805             size_t write = _playbackBufferSize;
2806             if (_tempBufferSpace < write)
2807             {
2808                 write = _tempBufferSpace;
2809             }
2810 
2811             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2812                          "  will write");
2813             PaLock();
2814             if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2815                                       write, NULL, (int64_t) 0,
2816                                       PA_SEEK_RELATIVE) != PA_OK)
2817             {
2818                 _writeErrors++;
2819                 if (_writeErrors > 10)
2820                 {
2821                     if (_playError == 1)
2822                     {
2823                         WEBRTC_TRACE(kTraceWarning,
2824                                      kTraceUtility, _id,
2825                                      "  pending playout error exists");
2826                     }
2827                     _playError = 1; // triggers callback from module process thread
2828                     WEBRTC_TRACE(
2829                                  kTraceError,
2830                                  kTraceUtility,
2831                                  _id,
2832                                  "  kPlayoutError message posted: "
2833                                  "_writeErrors=%u, error=%d",
2834                                  _writeErrors,
2835                                  LATE(pa_context_errno)(_paContext));
2836                     _writeErrors = 0;
2837                 }
2838             }
2839             PaUnLock();
2840 
2841             _playbackBufferUnused = write;
2842         }
2843 
2844         _tempBufferSpace = 0;
2845         PaLock();
2846         EnableWriteCallback();
2847         PaUnLock();
2848 
2849     }  // _playing
2850 
2851     UnLock();
2852     return true;
2853 }
2854 
RecThreadProcess()2855 bool AudioDeviceLinuxPulse::RecThreadProcess()
2856 {
2857     switch (_timeEventRec.Wait(1000))
2858     {
2859         case kEventSignaled:
2860             break;
2861         case kEventError:
2862             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2863                          "EventWrapper::Wait() failed");
2864             return true;
2865         case kEventTimeout:
2866             return true;
2867     }
2868 
2869     Lock();
2870 
2871     if (_startRec)
2872     {
2873         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2874                      "_startRec true, performing initial actions");
2875 
2876         _recDeviceName = NULL;
2877 
2878         // Set if not default device
2879         if (_inputDeviceIndex > 0)
2880         {
2881             // Get the recording device name
2882             _recDeviceName = new char[kAdmMaxDeviceNameSize];
2883             _deviceIndex = _inputDeviceIndex;
2884             RecordingDevices();
2885         }
2886 
2887         PaLock();
2888 
2889         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2890                      "  connecting stream");
2891 
2892         // Connect the stream to a source
2893         if (LATE(pa_stream_connect_record)(_recStream, _recDeviceName,
2894                                            &_recBufferAttr,
2895                                            (pa_stream_flags_t) _recStreamFlags)
2896             != PA_OK)
2897         {
2898             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2899                          "  failed to connect rec stream, err=%d",
2900                          LATE(pa_context_errno)(_paContext));
2901         }
2902 
2903         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2904                      "  connected");
2905 
2906         // Wait for state change
2907         while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2908         {
2909             LATE(pa_threaded_mainloop_wait)(_paMainloop);
2910         }
2911 
2912         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2913                      "  done");
2914 
2915         // We can now handle read callbacks
2916         EnableReadCallback();
2917 
2918         PaUnLock();
2919 
2920         // Clear device name
2921         if (_recDeviceName)
2922         {
2923             delete [] _recDeviceName;
2924             _recDeviceName = NULL;
2925         }
2926 
2927         _startRec = false;
2928         _recording = true;
2929         _recStartEvent.Set();
2930 
2931         UnLock();
2932         return true;
2933     }
2934 
2935     if (_recording)
2936     {
2937         // Read data and provide it to VoiceEngine
2938         if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2939         {
2940             UnLock();
2941             return true;
2942         }
2943 
2944         _tempSampleData = NULL;
2945         _tempSampleDataSize = 0;
2946 
2947         PaLock();
2948         while (true)
2949         {
2950             // Ack the last thing we read
2951             if (LATE(pa_stream_drop)(_recStream) != 0)
2952             {
2953                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2954                              _id, "  failed to drop, err=%d\n",
2955                              LATE(pa_context_errno)(_paContext));
2956             }
2957 
2958             if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2959             {
2960                 // Then that was all the data
2961                 break;
2962             }
2963 
2964             // Else more data.
2965             const void *sampleData;
2966             size_t sampleDataSize;
2967 
2968             if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2969                 != 0)
2970             {
2971                 _recError = 1; // triggers callback from module process thread
2972                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2973                              _id, "  RECORD_ERROR message posted, error = %d",
2974                              LATE(pa_context_errno)(_paContext));
2975                 break;
2976             }
2977 
2978             _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
2979                 / 1000);
2980 
2981             // Drop lock for sigslot dispatch, which could take a while.
2982             PaUnLock();
2983             // Read data and provide it to VoiceEngine
2984             if (ReadRecordedData(sampleData, sampleDataSize) == -1)
2985             {
2986                 UnLock();
2987                 return true;
2988             }
2989             PaLock();
2990 
2991             // Return to top of loop for the ack and the check for more data.
2992         }
2993 
2994         EnableReadCallback();
2995         PaUnLock();
2996 
2997     }  // _recording
2998 
2999     UnLock();
3000     return true;
3001 }
3002 
KeyPressed() const3003 bool AudioDeviceLinuxPulse::KeyPressed() const{
3004 #ifdef USE_X11
3005   char szKey[32];
3006   unsigned int i = 0;
3007   char state = 0;
3008 
3009   if (!_XDisplay)
3010     return false;
3011 
3012   // Check key map status
3013   XQueryKeymap(_XDisplay, szKey);
3014 
3015   // A bit change in keymap means a key is pressed
3016   for (i = 0; i < sizeof(szKey); i++)
3017     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3018 
3019   // Save old state
3020   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3021   return (state != 0);
3022 #else
3023   return false;
3024 #endif
3025 }
3026 }
3027