1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "webrtc/base/checks.h"
14 #include "webrtc/base/logging.h"
15 #include "webrtc/modules/audio_device/audio_device_config.h"
16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
17 #include "webrtc/system_wrappers/include/event_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19
20 webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
21
22 // Accesses Pulse functions through our late-binding symbol table instead of
23 // directly. This way we don't have to link to libpulse, which means our binary
24 // will work on systems that don't have it.
25 #define LATE(sym) \
26 LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
27
28 namespace webrtc
29 {
30
AudioDeviceLinuxPulse(const int32_t id)31 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
32 _ptrAudioBuffer(NULL),
33 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
34 _timeEventRec(*EventWrapper::Create()),
35 _timeEventPlay(*EventWrapper::Create()),
36 _recStartEvent(*EventWrapper::Create()),
37 _playStartEvent(*EventWrapper::Create()),
38 _id(id),
39 _mixerManager(id),
40 _inputDeviceIndex(0),
41 _outputDeviceIndex(0),
42 _inputDeviceIsSpecified(false),
43 _outputDeviceIsSpecified(false),
44 sample_rate_hz_(0),
45 _recChannels(1),
46 _playChannels(1),
47 _playBufType(AudioDeviceModule::kFixedBufferSize),
48 _initialized(false),
49 _recording(false),
50 _playing(false),
51 _recIsInitialized(false),
52 _playIsInitialized(false),
53 _startRec(false),
54 _stopRec(false),
55 _startPlay(false),
56 _stopPlay(false),
57 _AGC(false),
58 update_speaker_volume_at_startup_(false),
59 _playBufDelayFixed(20),
60 _sndCardPlayDelay(0),
61 _sndCardRecDelay(0),
62 _writeErrors(0),
63 _playWarning(0),
64 _playError(0),
65 _recWarning(0),
66 _recError(0),
67 _deviceIndex(-1),
68 _numPlayDevices(0),
69 _numRecDevices(0),
70 _playDeviceName(NULL),
71 _recDeviceName(NULL),
72 _playDisplayDeviceName(NULL),
73 _recDisplayDeviceName(NULL),
74 _playBuffer(NULL),
75 _playbackBufferSize(0),
76 _playbackBufferUnused(0),
77 _tempBufferSpace(0),
78 _recBuffer(NULL),
79 _recordBufferSize(0),
80 _recordBufferUsed(0),
81 _tempSampleData(NULL),
82 _tempSampleDataSize(0),
83 _configuredLatencyPlay(0),
84 _configuredLatencyRec(0),
85 _paDeviceIndex(-1),
86 _paStateChanged(false),
87 _paMainloop(NULL),
88 _paMainloopApi(NULL),
89 _paContext(NULL),
90 _recStream(NULL),
91 _playStream(NULL),
92 _recStreamFlags(0),
93 _playStreamFlags(0)
94 {
95 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
96 "%s created", __FUNCTION__);
97
98 memset(_paServerVersion, 0, sizeof(_paServerVersion));
99 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
100 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
101 memset(_oldKeyState, 0, sizeof(_oldKeyState));
102 }
103
~AudioDeviceLinuxPulse()104 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
105 {
106 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
107 "%s destroyed", __FUNCTION__);
108 RTC_DCHECK(thread_checker_.CalledOnValidThread());
109 Terminate();
110
111 if (_recBuffer)
112 {
113 delete [] _recBuffer;
114 _recBuffer = NULL;
115 }
116 if (_playBuffer)
117 {
118 delete [] _playBuffer;
119 _playBuffer = NULL;
120 }
121 if (_playDeviceName)
122 {
123 delete [] _playDeviceName;
124 _playDeviceName = NULL;
125 }
126 if (_recDeviceName)
127 {
128 delete [] _recDeviceName;
129 _recDeviceName = NULL;
130 }
131
132 delete &_recStartEvent;
133 delete &_playStartEvent;
134 delete &_timeEventRec;
135 delete &_timeEventPlay;
136 delete &_critSect;
137 }
138
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)139 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
140 {
141 RTC_DCHECK(thread_checker_.CalledOnValidThread());
142
143 _ptrAudioBuffer = audioBuffer;
144
145 // Inform the AudioBuffer about default settings for this implementation.
146 // Set all values to zero here since the actual settings will be done by
147 // InitPlayout and InitRecording later.
148 _ptrAudioBuffer->SetRecordingSampleRate(0);
149 _ptrAudioBuffer->SetPlayoutSampleRate(0);
150 _ptrAudioBuffer->SetRecordingChannels(0);
151 _ptrAudioBuffer->SetPlayoutChannels(0);
152 }
153
154 // ----------------------------------------------------------------------------
155 // ActiveAudioLayer
156 // ----------------------------------------------------------------------------
157
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const158 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
159 AudioDeviceModule::AudioLayer& audioLayer) const
160 {
161 audioLayer = AudioDeviceModule::kLinuxPulseAudio;
162 return 0;
163 }
164
Init()165 AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() {
166 RTC_DCHECK(thread_checker_.CalledOnValidThread());
167 if (_initialized) {
168 return InitStatus::OK;
169 }
170
171 // Initialize PulseAudio
172 if (InitPulseAudio() < 0) {
173 LOG(LS_ERROR) << "failed to initialize PulseAudio";
174 if (TerminatePulseAudio() < 0) {
175 LOG(LS_ERROR) << "failed to terminate PulseAudio";
176 }
177 return InitStatus::OTHER_ERROR;
178 }
179
180 _playWarning = 0;
181 _playError = 0;
182 _recWarning = 0;
183 _recError = 0;
184
185 #ifdef USE_X11
186 // Get X display handle for typing detection
187 _XDisplay = XOpenDisplay(NULL);
188 if (!_XDisplay) {
189 LOG(LS_WARNING)
190 << "failed to open X display, typing detection will not work";
191 }
192 #endif
193
194 // RECORDING
195 _ptrThreadRec.reset(new rtc::PlatformThread(
196 RecThreadFunc, this, "webrtc_audio_module_rec_thread"));
197
198 _ptrThreadRec->Start();
199 _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
200
201 // PLAYOUT
202 _ptrThreadPlay.reset(new rtc::PlatformThread(
203 PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
204 _ptrThreadPlay->Start();
205 _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
206
207 _initialized = true;
208
209 return InitStatus::OK;
210 }
211
Terminate()212 int32_t AudioDeviceLinuxPulse::Terminate()
213 {
214 RTC_DCHECK(thread_checker_.CalledOnValidThread());
215 if (!_initialized)
216 {
217 return 0;
218 }
219
220 _mixerManager.Close();
221
222 // RECORDING
223 if (_ptrThreadRec)
224 {
225 rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
226
227 _timeEventRec.Set();
228 tmpThread->Stop();
229 delete tmpThread;
230 }
231
232 // PLAYOUT
233 if (_ptrThreadPlay)
234 {
235 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
236
237 _timeEventPlay.Set();
238 tmpThread->Stop();
239 delete tmpThread;
240 }
241
242 // Terminate PulseAudio
243 if (TerminatePulseAudio() < 0)
244 {
245 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
246 " failed to terminate PulseAudio");
247 return -1;
248 }
249
250 #ifdef USE_X11
251 if (_XDisplay)
252 {
253 XCloseDisplay(_XDisplay);
254 _XDisplay = NULL;
255 }
256 #endif
257
258 _initialized = false;
259 _outputDeviceIsSpecified = false;
260 _inputDeviceIsSpecified = false;
261
262 return 0;
263 }
264
Initialized() const265 bool AudioDeviceLinuxPulse::Initialized() const
266 {
267 RTC_DCHECK(thread_checker_.CalledOnValidThread());
268 return (_initialized);
269 }
270
InitSpeaker()271 int32_t AudioDeviceLinuxPulse::InitSpeaker()
272 {
273 RTC_DCHECK(thread_checker_.CalledOnValidThread());
274
275 if (_playing)
276 {
277 return -1;
278 }
279
280 if (!_outputDeviceIsSpecified)
281 {
282 return -1;
283 }
284
285 // check if default device
286 if (_outputDeviceIndex == 0)
287 {
288 uint16_t deviceIndex = 0;
289 GetDefaultDeviceInfo(false, NULL, deviceIndex);
290 _paDeviceIndex = deviceIndex;
291 } else
292 {
293 // get the PA device index from
294 // the callback
295 _deviceIndex = _outputDeviceIndex;
296
297 // get playout devices
298 PlayoutDevices();
299 }
300
301 // the callback has now set the _paDeviceIndex to
302 // the PulseAudio index of the device
303 if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
304 {
305 return -1;
306 }
307
308 // clear _deviceIndex
309 _deviceIndex = -1;
310 _paDeviceIndex = -1;
311
312 return 0;
313 }
314
InitMicrophone()315 int32_t AudioDeviceLinuxPulse::InitMicrophone()
316 {
317 RTC_DCHECK(thread_checker_.CalledOnValidThread());
318 if (_recording)
319 {
320 return -1;
321 }
322
323 if (!_inputDeviceIsSpecified)
324 {
325 return -1;
326 }
327
328 // Check if default device
329 if (_inputDeviceIndex == 0)
330 {
331 uint16_t deviceIndex = 0;
332 GetDefaultDeviceInfo(true, NULL, deviceIndex);
333 _paDeviceIndex = deviceIndex;
334 } else
335 {
336 // Get the PA device index from
337 // the callback
338 _deviceIndex = _inputDeviceIndex;
339
340 // get recording devices
341 RecordingDevices();
342 }
343
344 // The callback has now set the _paDeviceIndex to
345 // the PulseAudio index of the device
346 if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
347 {
348 return -1;
349 }
350
351 // Clear _deviceIndex
352 _deviceIndex = -1;
353 _paDeviceIndex = -1;
354
355 return 0;
356 }
357
SpeakerIsInitialized() const358 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
359 {
360 RTC_DCHECK(thread_checker_.CalledOnValidThread());
361 return (_mixerManager.SpeakerIsInitialized());
362 }
363
MicrophoneIsInitialized() const364 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
365 {
366 RTC_DCHECK(thread_checker_.CalledOnValidThread());
367 return (_mixerManager.MicrophoneIsInitialized());
368 }
369
SpeakerVolumeIsAvailable(bool & available)370 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
371 {
372 RTC_DCHECK(thread_checker_.CalledOnValidThread());
373 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
374
375 // Make an attempt to open up the
376 // output mixer corresponding to the currently selected output device.
377 if (!wasInitialized && InitSpeaker() == -1)
378 {
379 // If we end up here it means that the selected speaker has no volume
380 // control.
381 available = false;
382 return 0;
383 }
384
385 // Given that InitSpeaker was successful, we know volume control exists.
386 available = true;
387
388 // Close the initialized output mixer
389 if (!wasInitialized)
390 {
391 _mixerManager.CloseSpeaker();
392 }
393
394 return 0;
395 }
396
SetSpeakerVolume(uint32_t volume)397 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
398 {
399 RTC_DCHECK(thread_checker_.CalledOnValidThread());
400 if (!_playing) {
401 // Only update the volume if it's been set while we weren't playing.
402 update_speaker_volume_at_startup_ = true;
403 }
404 return (_mixerManager.SetSpeakerVolume(volume));
405 }
406
SpeakerVolume(uint32_t & volume) const407 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
408 {
409 RTC_DCHECK(thread_checker_.CalledOnValidThread());
410 uint32_t level(0);
411
412 if (_mixerManager.SpeakerVolume(level) == -1)
413 {
414 return -1;
415 }
416
417 volume = level;
418
419 return 0;
420 }
421
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)422 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
423 uint16_t volumeLeft,
424 uint16_t volumeRight)
425 {
426
427 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
428 " API call not supported on this platform");
429 return -1;
430 }
431
WaveOutVolume(uint16_t &,uint16_t &) const432 int32_t AudioDeviceLinuxPulse::WaveOutVolume(
433 uint16_t& /*volumeLeft*/,
434 uint16_t& /*volumeRight*/) const
435 {
436
437 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
438 " API call not supported on this platform");
439 return -1;
440 }
441
MaxSpeakerVolume(uint32_t & maxVolume) const442 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
443 uint32_t& maxVolume) const
444 {
445 RTC_DCHECK(thread_checker_.CalledOnValidThread());
446 uint32_t maxVol(0);
447
448 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
449 {
450 return -1;
451 }
452
453 maxVolume = maxVol;
454
455 return 0;
456 }
457
MinSpeakerVolume(uint32_t & minVolume) const458 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
459 uint32_t& minVolume) const
460 {
461 RTC_DCHECK(thread_checker_.CalledOnValidThread());
462 uint32_t minVol(0);
463
464 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
465 {
466 return -1;
467 }
468
469 minVolume = minVol;
470
471 return 0;
472 }
473
SpeakerVolumeStepSize(uint16_t & stepSize) const474 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
475 uint16_t& stepSize) const
476 {
477 RTC_DCHECK(thread_checker_.CalledOnValidThread());
478 uint16_t delta(0);
479
480 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
481 {
482 return -1;
483 }
484
485 stepSize = delta;
486
487 return 0;
488 }
489
SpeakerMuteIsAvailable(bool & available)490 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
491 {
492 RTC_DCHECK(thread_checker_.CalledOnValidThread());
493 bool isAvailable(false);
494 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
495
496 // Make an attempt to open up the
497 // output mixer corresponding to the currently selected output device.
498 //
499 if (!wasInitialized && InitSpeaker() == -1)
500 {
501 // If we end up here it means that the selected speaker has no volume
502 // control, hence it is safe to state that there is no mute control
503 // already at this stage.
504 available = false;
505 return 0;
506 }
507
508 // Check if the selected speaker has a mute control
509 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
510
511 available = isAvailable;
512
513 // Close the initialized output mixer
514 if (!wasInitialized)
515 {
516 _mixerManager.CloseSpeaker();
517 }
518
519 return 0;
520 }
521
SetSpeakerMute(bool enable)522 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
523 {
524 RTC_DCHECK(thread_checker_.CalledOnValidThread());
525 return (_mixerManager.SetSpeakerMute(enable));
526 }
527
SpeakerMute(bool & enabled) const528 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
529 {
530 RTC_DCHECK(thread_checker_.CalledOnValidThread());
531 bool muted(0);
532 if (_mixerManager.SpeakerMute(muted) == -1)
533 {
534 return -1;
535 }
536
537 enabled = muted;
538 return 0;
539 }
540
MicrophoneMuteIsAvailable(bool & available)541 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
542 {
543 RTC_DCHECK(thread_checker_.CalledOnValidThread());
544 bool isAvailable(false);
545 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
546
547 // Make an attempt to open up the
548 // input mixer corresponding to the currently selected input device.
549 //
550 if (!wasInitialized && InitMicrophone() == -1)
551 {
552 // If we end up here it means that the selected microphone has no
553 // volume control, hence it is safe to state that there is no
554 // boost control already at this stage.
555 available = false;
556 return 0;
557 }
558
559 // Check if the selected microphone has a mute control
560 //
561 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
562 available = isAvailable;
563
564 // Close the initialized input mixer
565 //
566 if (!wasInitialized)
567 {
568 _mixerManager.CloseMicrophone();
569 }
570
571 return 0;
572 }
573
SetMicrophoneMute(bool enable)574 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
575 {
576 RTC_DCHECK(thread_checker_.CalledOnValidThread());
577 return (_mixerManager.SetMicrophoneMute(enable));
578 }
579
MicrophoneMute(bool & enabled) const580 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
581 {
582 RTC_DCHECK(thread_checker_.CalledOnValidThread());
583 bool muted(0);
584 if (_mixerManager.MicrophoneMute(muted) == -1)
585 {
586 return -1;
587 }
588
589 enabled = muted;
590 return 0;
591 }
592
MicrophoneBoostIsAvailable(bool & available)593 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
594 {
595 RTC_DCHECK(thread_checker_.CalledOnValidThread());
596 bool isAvailable(false);
597 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
598
599 // Enumerate all avaliable microphone and make an attempt to open up the
600 // input mixer corresponding to the currently selected input device.
601 //
602 if (!wasInitialized && InitMicrophone() == -1)
603 {
604 // If we end up here it means that the selected microphone has no
605 // volume control, hence it is safe to state that there is no
606 // boost control already at this stage.
607 available = false;
608 return 0;
609 }
610
611 // Check if the selected microphone has a boost control
612 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
613 available = isAvailable;
614
615 // Close the initialized input mixer
616 if (!wasInitialized)
617 {
618 _mixerManager.CloseMicrophone();
619 }
620
621 return 0;
622 }
623
SetMicrophoneBoost(bool enable)624 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
625 {
626 RTC_DCHECK(thread_checker_.CalledOnValidThread());
627 return (_mixerManager.SetMicrophoneBoost(enable));
628 }
629
MicrophoneBoost(bool & enabled) const630 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
631 {
632 RTC_DCHECK(thread_checker_.CalledOnValidThread());
633 bool onOff(0);
634
635 if (_mixerManager.MicrophoneBoost(onOff) == -1)
636 {
637 return -1;
638 }
639
640 enabled = onOff;
641
642 return 0;
643 }
644
StereoRecordingIsAvailable(bool & available)645 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
646 {
647 RTC_DCHECK(thread_checker_.CalledOnValidThread());
648 if (_recChannels == 2 && _recording) {
649 available = true;
650 return 0;
651 }
652
653 available = false;
654 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
655 int error = 0;
656
657 if (!wasInitialized && InitMicrophone() == -1)
658 {
659 // Cannot open the specified device
660 available = false;
661 return 0;
662 }
663
664 // Check if the selected microphone can record stereo.
665 bool isAvailable(false);
666 error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
667 if (!error)
668 available = isAvailable;
669
670 // Close the initialized input mixer
671 if (!wasInitialized)
672 {
673 _mixerManager.CloseMicrophone();
674 }
675
676 return error;
677 }
678
SetStereoRecording(bool enable)679 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
680 {
681 RTC_DCHECK(thread_checker_.CalledOnValidThread());
682 if (enable)
683 _recChannels = 2;
684 else
685 _recChannels = 1;
686
687 return 0;
688 }
689
StereoRecording(bool & enabled) const690 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
691 {
692 RTC_DCHECK(thread_checker_.CalledOnValidThread());
693 if (_recChannels == 2)
694 enabled = true;
695 else
696 enabled = false;
697
698 return 0;
699 }
700
StereoPlayoutIsAvailable(bool & available)701 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
702 {
703 RTC_DCHECK(thread_checker_.CalledOnValidThread());
704 if (_playChannels == 2 && _playing) {
705 available = true;
706 return 0;
707 }
708
709 available = false;
710 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
711 int error = 0;
712
713 if (!wasInitialized && InitSpeaker() == -1)
714 {
715 // Cannot open the specified device.
716 return -1;
717 }
718
719 // Check if the selected speaker can play stereo.
720 bool isAvailable(false);
721 error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
722 if (!error)
723 available = isAvailable;
724
725 // Close the initialized input mixer
726 if (!wasInitialized)
727 {
728 _mixerManager.CloseSpeaker();
729 }
730
731 return error;
732 }
733
SetStereoPlayout(bool enable)734 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
735 {
736 RTC_DCHECK(thread_checker_.CalledOnValidThread());
737 if (enable)
738 _playChannels = 2;
739 else
740 _playChannels = 1;
741
742 return 0;
743 }
744
StereoPlayout(bool & enabled) const745 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
746 {
747 RTC_DCHECK(thread_checker_.CalledOnValidThread());
748 if (_playChannels == 2)
749 enabled = true;
750 else
751 enabled = false;
752
753 return 0;
754 }
755
SetAGC(bool enable)756 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
757 {
758 CriticalSectionScoped lock(&_critSect);
759 _AGC = enable;
760
761 return 0;
762 }
763
AGC() const764 bool AudioDeviceLinuxPulse::AGC() const
765 {
766 CriticalSectionScoped lock(&_critSect);
767 return _AGC;
768 }
769
MicrophoneVolumeIsAvailable(bool & available)770 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
771 bool& available)
772 {
773 RTC_DCHECK(thread_checker_.CalledOnValidThread());
774 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
775
776 // Make an attempt to open up the
777 // input mixer corresponding to the currently selected output device.
778 if (!wasInitialized && InitMicrophone() == -1)
779 {
780 // If we end up here it means that the selected microphone has no
781 // volume control.
782 available = false;
783 return 0;
784 }
785
786 // Given that InitMicrophone was successful, we know that a volume control
787 // exists.
788 available = true;
789
790 // Close the initialized input mixer
791 if (!wasInitialized)
792 {
793 _mixerManager.CloseMicrophone();
794 }
795
796 return 0;
797 }
798
SetMicrophoneVolume(uint32_t volume)799 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
800 {
801 return (_mixerManager.SetMicrophoneVolume(volume));
802 }
803
MicrophoneVolume(uint32_t & volume) const804 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
805 uint32_t& volume) const
806 {
807
808 uint32_t level(0);
809
810 if (_mixerManager.MicrophoneVolume(level) == -1)
811 {
812 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
813 " failed to retrive current microphone level");
814 return -1;
815 }
816
817 volume = level;
818
819 return 0;
820 }
821
MaxMicrophoneVolume(uint32_t & maxVolume) const822 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
823 uint32_t& maxVolume) const
824 {
825
826 uint32_t maxVol(0);
827
828 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
829 {
830 return -1;
831 }
832
833 maxVolume = maxVol;
834
835 return 0;
836 }
837
MinMicrophoneVolume(uint32_t & minVolume) const838 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
839 uint32_t& minVolume) const
840 {
841
842 uint32_t minVol(0);
843
844 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
845 {
846 return -1;
847 }
848
849 minVolume = minVol;
850
851 return 0;
852 }
853
MicrophoneVolumeStepSize(uint16_t & stepSize) const854 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
855 uint16_t& stepSize) const
856 {
857 RTC_DCHECK(thread_checker_.CalledOnValidThread());
858 uint16_t delta(0);
859
860 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
861 {
862 return -1;
863 }
864
865 stepSize = delta;
866
867 return 0;
868 }
869
PlayoutDevices()870 int16_t AudioDeviceLinuxPulse::PlayoutDevices()
871 {
872 PaLock();
873
874 pa_operation* paOperation = NULL;
875 _numPlayDevices = 1; // init to 1 to account for "default"
876
877 // get the whole list of devices and update _numPlayDevices
878 paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
879 PaSinkInfoCallback,
880 this);
881
882 WaitForOperationCompletion(paOperation);
883
884 PaUnLock();
885
886 return _numPlayDevices;
887 }
888
SetPlayoutDevice(uint16_t index)889 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
890 {
891 RTC_DCHECK(thread_checker_.CalledOnValidThread());
892 if (_playIsInitialized)
893 {
894 return -1;
895 }
896
897 const uint16_t nDevices = PlayoutDevices();
898
899 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
900 " number of availiable output devices is %u", nDevices);
901
902 if (index > (nDevices - 1))
903 {
904 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
905 " device index is out of range [0,%u]", (nDevices - 1));
906 return -1;
907 }
908
909 _outputDeviceIndex = index;
910 _outputDeviceIsSpecified = true;
911
912 return 0;
913 }
914
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)915 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
916 AudioDeviceModule::WindowsDeviceType /*device*/)
917 {
918 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
919 "WindowsDeviceType not supported");
920 return -1;
921 }
922
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])923 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
924 uint16_t index,
925 char name[kAdmMaxDeviceNameSize],
926 char guid[kAdmMaxGuidSize])
927 {
928 RTC_DCHECK(thread_checker_.CalledOnValidThread());
929 const uint16_t nDevices = PlayoutDevices();
930
931 if ((index > (nDevices - 1)) || (name == NULL))
932 {
933 return -1;
934 }
935
936 memset(name, 0, kAdmMaxDeviceNameSize);
937
938 if (guid != NULL)
939 {
940 memset(guid, 0, kAdmMaxGuidSize);
941 }
942
943 // Check if default device
944 if (index == 0)
945 {
946 uint16_t deviceIndex = 0;
947 return GetDefaultDeviceInfo(false, name, deviceIndex);
948 }
949
950 // Tell the callback that we want
951 // The name for this device
952 _playDisplayDeviceName = name;
953 _deviceIndex = index;
954
955 // get playout devices
956 PlayoutDevices();
957
958 // clear device name and index
959 _playDisplayDeviceName = NULL;
960 _deviceIndex = -1;
961
962 return 0;
963 }
964
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])965 int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
966 uint16_t index,
967 char name[kAdmMaxDeviceNameSize],
968 char guid[kAdmMaxGuidSize])
969 {
970 RTC_DCHECK(thread_checker_.CalledOnValidThread());
971 const uint16_t nDevices(RecordingDevices());
972
973 if ((index > (nDevices - 1)) || (name == NULL))
974 {
975 return -1;
976 }
977
978 memset(name, 0, kAdmMaxDeviceNameSize);
979
980 if (guid != NULL)
981 {
982 memset(guid, 0, kAdmMaxGuidSize);
983 }
984
985 // Check if default device
986 if (index == 0)
987 {
988 uint16_t deviceIndex = 0;
989 return GetDefaultDeviceInfo(true, name, deviceIndex);
990 }
991
992 // Tell the callback that we want
993 // the name for this device
994 _recDisplayDeviceName = name;
995 _deviceIndex = index;
996
997 // Get recording devices
998 RecordingDevices();
999
1000 // Clear device name and index
1001 _recDisplayDeviceName = NULL;
1002 _deviceIndex = -1;
1003
1004 return 0;
1005 }
1006
RecordingDevices()1007 int16_t AudioDeviceLinuxPulse::RecordingDevices()
1008 {
1009 PaLock();
1010
1011 pa_operation* paOperation = NULL;
1012 _numRecDevices = 1; // Init to 1 to account for "default"
1013
1014 // Get the whole list of devices and update _numRecDevices
1015 paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1016 PaSourceInfoCallback,
1017 this);
1018
1019 WaitForOperationCompletion(paOperation);
1020
1021 PaUnLock();
1022
1023 return _numRecDevices;
1024 }
1025
SetRecordingDevice(uint16_t index)1026 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1027 {
1028 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1029 if (_recIsInitialized)
1030 {
1031 return -1;
1032 }
1033
1034 const uint16_t nDevices(RecordingDevices());
1035
1036 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1037 " number of availiable input devices is %u", nDevices);
1038
1039 if (index > (nDevices - 1))
1040 {
1041 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1042 " device index is out of range [0,%u]", (nDevices - 1));
1043 return -1;
1044 }
1045
1046 _inputDeviceIndex = index;
1047 _inputDeviceIsSpecified = true;
1048
1049 return 0;
1050 }
1051
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)1052 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1053 AudioDeviceModule::WindowsDeviceType /*device*/)
1054 {
1055 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1056 "WindowsDeviceType not supported");
1057 return -1;
1058 }
1059
PlayoutIsAvailable(bool & available)1060 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1061 {
1062 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1063 available = false;
1064
1065 // Try to initialize the playout side
1066 int32_t res = InitPlayout();
1067
1068 // Cancel effect of initialization
1069 StopPlayout();
1070
1071 if (res != -1)
1072 {
1073 available = true;
1074 }
1075
1076 return res;
1077 }
1078
RecordingIsAvailable(bool & available)1079 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1080 {
1081 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1082 available = false;
1083
1084 // Try to initialize the playout side
1085 int32_t res = InitRecording();
1086
1087 // Cancel effect of initialization
1088 StopRecording();
1089
1090 if (res != -1)
1091 {
1092 available = true;
1093 }
1094
1095 return res;
1096 }
1097
InitPlayout()1098 int32_t AudioDeviceLinuxPulse::InitPlayout()
1099 {
1100 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1101
1102 if (_playing)
1103 {
1104 return -1;
1105 }
1106
1107 if (!_outputDeviceIsSpecified)
1108 {
1109 return -1;
1110 }
1111
1112 if (_playIsInitialized)
1113 {
1114 return 0;
1115 }
1116
1117 // Initialize the speaker (devices might have been added or removed)
1118 if (InitSpeaker() == -1)
1119 {
1120 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1121 " InitSpeaker() failed");
1122 }
1123
1124 // Set the play sample specification
1125 pa_sample_spec playSampleSpec;
1126 playSampleSpec.channels = _playChannels;
1127 playSampleSpec.format = PA_SAMPLE_S16LE;
1128 playSampleSpec.rate = sample_rate_hz_;
1129
1130 // Create a new play stream
1131 _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1132 &playSampleSpec, NULL);
1133
1134 if (!_playStream)
1135 {
1136 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1137 " failed to create play stream, err=%d",
1138 LATE(pa_context_errno)(_paContext));
1139 return -1;
1140 }
1141
1142 // Provide the playStream to the mixer
1143 _mixerManager.SetPlayStream(_playStream);
1144
1145 if (_ptrAudioBuffer)
1146 {
1147 // Update audio buffer with the selected parameters
1148 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1149 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1150 }
1151
1152 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1153 " stream state %d\n",
1154 LATE(pa_stream_get_state)(_playStream));
1155
1156 // Set stream flags
1157 _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1158 | PA_STREAM_INTERPOLATE_TIMING);
1159
1160 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1161 {
1162 // If configuring a specific latency then we want to specify
1163 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1164 // automatically to reach that target latency. However, that flag
1165 // doesn't exist in Ubuntu 8.04 and many people still use that,
1166 // so we have to check the protocol version of libpulse.
1167 if (LATE(pa_context_get_protocol_version)(_paContext)
1168 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1169 {
1170 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1171 }
1172
1173 const pa_sample_spec *spec =
1174 LATE(pa_stream_get_sample_spec)(_playStream);
1175 if (!spec)
1176 {
1177 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1178 " pa_stream_get_sample_spec()");
1179 return -1;
1180 }
1181
1182 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1183 uint32_t latency = bytesPerSec *
1184 WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
1185 WEBRTC_PA_MSECS_PER_SEC;
1186
1187 // Set the play buffer attributes
1188 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1189 _playBufferAttr.tlength = latency; // target fill level of play buffer
1190 // minimum free num bytes before server request more data
1191 _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1192 // prebuffer tlength before starting playout
1193 _playBufferAttr.prebuf = _playBufferAttr.tlength -
1194 _playBufferAttr.minreq;
1195
1196 _configuredLatencyPlay = latency;
1197 }
1198
1199 // num samples in bytes * num channels
1200 _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1201 _playbackBufferUnused = _playbackBufferSize;
1202 _playBuffer = new int8_t[_playbackBufferSize];
1203
1204 // Enable underflow callback
1205 LATE(pa_stream_set_underflow_callback)(_playStream,
1206 PaStreamUnderflowCallback, this);
1207
1208 // Set the state callback function for the stream
1209 LATE(pa_stream_set_state_callback)(_playStream,
1210 PaStreamStateCallback, this);
1211
1212 // Mark playout side as initialized
1213 _playIsInitialized = true;
1214 _sndCardPlayDelay = 0;
1215 _sndCardRecDelay = 0;
1216
1217 return 0;
1218 }
1219
InitRecording()1220 int32_t AudioDeviceLinuxPulse::InitRecording()
1221 {
1222 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1223
1224 if (_recording)
1225 {
1226 return -1;
1227 }
1228
1229 if (!_inputDeviceIsSpecified)
1230 {
1231 return -1;
1232 }
1233
1234 if (_recIsInitialized)
1235 {
1236 return 0;
1237 }
1238
1239 // Initialize the microphone (devices might have been added or removed)
1240 if (InitMicrophone() == -1)
1241 {
1242 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1243 " InitMicrophone() failed");
1244 }
1245
1246 // Set the rec sample specification
1247 pa_sample_spec recSampleSpec;
1248 recSampleSpec.channels = _recChannels;
1249 recSampleSpec.format = PA_SAMPLE_S16LE;
1250 recSampleSpec.rate = sample_rate_hz_;
1251
1252 // Create a new rec stream
1253 _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1254 NULL);
1255 if (!_recStream)
1256 {
1257 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1258 " failed to create rec stream, err=%d",
1259 LATE(pa_context_errno)(_paContext));
1260 return -1;
1261 }
1262
1263 // Provide the recStream to the mixer
1264 _mixerManager.SetRecStream(_recStream);
1265
1266 if (_ptrAudioBuffer)
1267 {
1268 // Update audio buffer with the selected parameters
1269 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1270 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1271 }
1272
1273 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1274 {
1275 _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1276 | PA_STREAM_INTERPOLATE_TIMING);
1277
1278 // If configuring a specific latency then we want to specify
1279 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1280 // automatically to reach that target latency. However, that flag
1281 // doesn't exist in Ubuntu 8.04 and many people still use that,
1282 // so we have to check the protocol version of libpulse.
1283 if (LATE(pa_context_get_protocol_version)(_paContext)
1284 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1285 {
1286 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1287 }
1288
1289 const pa_sample_spec *spec =
1290 LATE(pa_stream_get_sample_spec)(_recStream);
1291 if (!spec)
1292 {
1293 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1294 " pa_stream_get_sample_spec(rec)");
1295 return -1;
1296 }
1297
1298 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1299 uint32_t latency = bytesPerSec
1300 * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1301
1302 // Set the rec buffer attributes
1303 // Note: fragsize specifies a maximum transfer size, not a minimum, so
1304 // it is not possible to force a high latency setting, only a low one.
1305 _recBufferAttr.fragsize = latency; // size of fragment
1306 _recBufferAttr.maxlength = latency + bytesPerSec
1307 * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1308
1309 _configuredLatencyRec = latency;
1310 }
1311
1312 _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1313 _recordBufferUsed = 0;
1314 _recBuffer = new int8_t[_recordBufferSize];
1315
1316 // Enable overflow callback
1317 LATE(pa_stream_set_overflow_callback)(_recStream,
1318 PaStreamOverflowCallback,
1319 this);
1320
1321 // Set the state callback function for the stream
1322 LATE(pa_stream_set_state_callback)(_recStream,
1323 PaStreamStateCallback,
1324 this);
1325
1326 // Mark recording side as initialized
1327 _recIsInitialized = true;
1328
1329 return 0;
1330 }
1331
StartRecording()1332 int32_t AudioDeviceLinuxPulse::StartRecording()
1333 {
1334 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1335 if (!_recIsInitialized)
1336 {
1337 return -1;
1338 }
1339
1340 if (_recording)
1341 {
1342 return 0;
1343 }
1344
1345 // Set state to ensure that the recording starts from the audio thread.
1346 _startRec = true;
1347
1348 // The audio thread will signal when recording has started.
1349 _timeEventRec.Set();
1350 if (kEventTimeout == _recStartEvent.Wait(10000))
1351 {
1352 {
1353 CriticalSectionScoped lock(&_critSect);
1354 _startRec = false;
1355 }
1356 StopRecording();
1357 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1358 " failed to activate recording");
1359 return -1;
1360 }
1361
1362 {
1363 CriticalSectionScoped lock(&_critSect);
1364 if (_recording)
1365 {
1366 // The recording state is set by the audio thread after recording
1367 // has started.
1368 } else
1369 {
1370 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1371 " failed to activate recording");
1372 return -1;
1373 }
1374 }
1375
1376 return 0;
1377 }
1378
StopRecording()1379 int32_t AudioDeviceLinuxPulse::StopRecording()
1380 {
1381 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1382 CriticalSectionScoped lock(&_critSect);
1383
1384 if (!_recIsInitialized)
1385 {
1386 return 0;
1387 }
1388
1389 if (_recStream == NULL)
1390 {
1391 return -1;
1392 }
1393
1394 _recIsInitialized = false;
1395 _recording = false;
1396
1397 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1398 " stopping recording");
1399
1400 // Stop Recording
1401 PaLock();
1402
1403 DisableReadCallback();
1404 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1405
1406 // Unset this here so that we don't get a TERMINATED callback
1407 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1408
1409 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1410 {
1411 // Disconnect the stream
1412 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1413 {
1414 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1415 " failed to disconnect rec stream, err=%d\n",
1416 LATE(pa_context_errno)(_paContext));
1417 PaUnLock();
1418 return -1;
1419 }
1420
1421 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1422 " disconnected recording");
1423 }
1424
1425 LATE(pa_stream_unref)(_recStream);
1426 _recStream = NULL;
1427
1428 PaUnLock();
1429
1430 // Provide the recStream to the mixer
1431 _mixerManager.SetRecStream(_recStream);
1432
1433 if (_recBuffer)
1434 {
1435 delete [] _recBuffer;
1436 _recBuffer = NULL;
1437 }
1438
1439 return 0;
1440 }
1441
RecordingIsInitialized() const1442 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1443 {
1444 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1445 return (_recIsInitialized);
1446 }
1447
Recording() const1448 bool AudioDeviceLinuxPulse::Recording() const
1449 {
1450 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1451 return (_recording);
1452 }
1453
PlayoutIsInitialized() const1454 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1455 {
1456 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1457 return (_playIsInitialized);
1458 }
1459
StartPlayout()1460 int32_t AudioDeviceLinuxPulse::StartPlayout()
1461 {
1462 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1463
1464 if (!_playIsInitialized)
1465 {
1466 return -1;
1467 }
1468
1469 if (_playing)
1470 {
1471 return 0;
1472 }
1473
1474 // Set state to ensure that playout starts from the audio thread.
1475 {
1476 CriticalSectionScoped lock(&_critSect);
1477 _startPlay = true;
1478 }
1479
1480 // Both |_startPlay| and |_playing| needs protction since they are also
1481 // accessed on the playout thread.
1482
1483 // The audio thread will signal when playout has started.
1484 _timeEventPlay.Set();
1485 if (kEventTimeout == _playStartEvent.Wait(10000))
1486 {
1487 {
1488 CriticalSectionScoped lock(&_critSect);
1489 _startPlay = false;
1490 }
1491 StopPlayout();
1492 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1493 " failed to activate playout");
1494 return -1;
1495 }
1496
1497 {
1498 CriticalSectionScoped lock(&_critSect);
1499 if (_playing)
1500 {
1501 // The playing state is set by the audio thread after playout
1502 // has started.
1503 } else
1504 {
1505 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1506 " failed to activate playing");
1507 return -1;
1508 }
1509 }
1510
1511 return 0;
1512 }
1513
StopPlayout()1514 int32_t AudioDeviceLinuxPulse::StopPlayout()
1515 {
1516 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1517 CriticalSectionScoped lock(&_critSect);
1518
1519 if (!_playIsInitialized)
1520 {
1521 return 0;
1522 }
1523
1524 if (_playStream == NULL)
1525 {
1526 return -1;
1527 }
1528
1529 _playIsInitialized = false;
1530 _playing = false;
1531 _sndCardPlayDelay = 0;
1532 _sndCardRecDelay = 0;
1533
1534 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1535 " stopping playback");
1536
1537 // Stop Playout
1538 PaLock();
1539
1540 DisableWriteCallback();
1541 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1542
1543 // Unset this here so that we don't get a TERMINATED callback
1544 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1545
1546 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1547 {
1548 // Disconnect the stream
1549 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1550 {
1551 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1552 " failed to disconnect play stream, err=%d",
1553 LATE(pa_context_errno)(_paContext));
1554 PaUnLock();
1555 return -1;
1556 }
1557
1558 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1559 " disconnected playback");
1560 }
1561
1562 LATE(pa_stream_unref)(_playStream);
1563 _playStream = NULL;
1564
1565 PaUnLock();
1566
1567 // Provide the playStream to the mixer
1568 _mixerManager.SetPlayStream(_playStream);
1569
1570 if (_playBuffer)
1571 {
1572 delete [] _playBuffer;
1573 _playBuffer = NULL;
1574 }
1575
1576 return 0;
1577 }
1578
PlayoutDelay(uint16_t & delayMS) const1579 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1580 {
1581 CriticalSectionScoped lock(&_critSect);
1582 delayMS = (uint16_t) _sndCardPlayDelay;
1583 return 0;
1584 }
1585
RecordingDelay(uint16_t & delayMS) const1586 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1587 {
1588 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1589 delayMS = (uint16_t) _sndCardRecDelay;
1590 return 0;
1591 }
1592
Playing() const1593 bool AudioDeviceLinuxPulse::Playing() const
1594 {
1595 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1596 return (_playing);
1597 }
1598
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1599 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1600 const AudioDeviceModule::BufferType type,
1601 uint16_t sizeMS)
1602 {
1603 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1604 if (type != AudioDeviceModule::kFixedBufferSize)
1605 {
1606 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1607 " Adaptive buffer size not supported on this platform");
1608 return -1;
1609 }
1610
1611 _playBufType = type;
1612 _playBufDelayFixed = sizeMS;
1613
1614 return 0;
1615 }
1616
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1617 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1618 AudioDeviceModule::BufferType& type,
1619 uint16_t& sizeMS) const
1620 {
1621 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1622 type = _playBufType;
1623 sizeMS = _playBufDelayFixed;
1624
1625 return 0;
1626 }
1627
CPULoad(uint16_t &) const1628 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1629 {
1630
1631 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1632 " API call not supported on this platform");
1633 return -1;
1634 }
1635
PlayoutWarning() const1636 bool AudioDeviceLinuxPulse::PlayoutWarning() const
1637 {
1638 CriticalSectionScoped lock(&_critSect);
1639 return (_playWarning > 0);
1640 }
1641
PlayoutError() const1642 bool AudioDeviceLinuxPulse::PlayoutError() const
1643 {
1644 CriticalSectionScoped lock(&_critSect);
1645 return (_playError > 0);
1646 }
1647
RecordingWarning() const1648 bool AudioDeviceLinuxPulse::RecordingWarning() const
1649 {
1650 CriticalSectionScoped lock(&_critSect);
1651 return (_recWarning > 0);
1652 }
1653
RecordingError() const1654 bool AudioDeviceLinuxPulse::RecordingError() const
1655 {
1656 CriticalSectionScoped lock(&_critSect);
1657 return (_recError > 0);
1658 }
1659
ClearPlayoutWarning()1660 void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1661 {
1662 CriticalSectionScoped lock(&_critSect);
1663 _playWarning = 0;
1664 }
1665
ClearPlayoutError()1666 void AudioDeviceLinuxPulse::ClearPlayoutError()
1667 {
1668 CriticalSectionScoped lock(&_critSect);
1669 _playError = 0;
1670 }
1671
ClearRecordingWarning()1672 void AudioDeviceLinuxPulse::ClearRecordingWarning()
1673 {
1674 CriticalSectionScoped lock(&_critSect);
1675 _recWarning = 0;
1676 }
1677
ClearRecordingError()1678 void AudioDeviceLinuxPulse::ClearRecordingError()
1679 {
1680 CriticalSectionScoped lock(&_critSect);
1681 _recError = 0;
1682 }
1683
1684 // ============================================================================
1685 // Private Methods
1686 // ============================================================================
1687
PaContextStateCallback(pa_context * c,void * pThis)1688 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1689 {
1690 static_cast<AudioDeviceLinuxPulse*> (pThis)->
1691 PaContextStateCallbackHandler(c);
1692 }
1693
1694 // ----------------------------------------------------------------------------
1695 // PaSinkInfoCallback
1696 // ----------------------------------------------------------------------------
1697
PaSinkInfoCallback(pa_context *,const pa_sink_info * i,int eol,void * pThis)1698 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1699 const pa_sink_info *i, int eol,
1700 void *pThis)
1701 {
1702 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1703 i, eol);
1704 }
1705
PaSourceInfoCallback(pa_context *,const pa_source_info * i,int eol,void * pThis)1706 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1707 const pa_source_info *i,
1708 int eol, void *pThis)
1709 {
1710 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1711 i, eol);
1712 }
1713
PaServerInfoCallback(pa_context *,const pa_server_info * i,void * pThis)1714 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1715 const pa_server_info *i,
1716 void *pThis)
1717 {
1718 static_cast<AudioDeviceLinuxPulse*> (pThis)->
1719 PaServerInfoCallbackHandler(i);
1720 }
1721
PaStreamStateCallback(pa_stream * p,void * pThis)1722 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1723 {
1724 static_cast<AudioDeviceLinuxPulse*> (pThis)->
1725 PaStreamStateCallbackHandler(p);
1726 }
1727
PaContextStateCallbackHandler(pa_context * c)1728 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1729 {
1730 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1731 " context state cb");
1732
1733 pa_context_state_t state = LATE(pa_context_get_state)(c);
1734 switch (state)
1735 {
1736 case PA_CONTEXT_UNCONNECTED:
1737 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1738 " unconnected");
1739 break;
1740 case PA_CONTEXT_CONNECTING:
1741 case PA_CONTEXT_AUTHORIZING:
1742 case PA_CONTEXT_SETTING_NAME:
1743 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1744 " no state");
1745 break;
1746 case PA_CONTEXT_FAILED:
1747 case PA_CONTEXT_TERMINATED:
1748 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1749 " failed");
1750 _paStateChanged = true;
1751 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1752 break;
1753 case PA_CONTEXT_READY:
1754 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1755 " ready");
1756 _paStateChanged = true;
1757 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1758 break;
1759 }
1760 }
1761
PaSinkInfoCallbackHandler(const pa_sink_info * i,int eol)1762 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1763 int eol)
1764 {
1765 if (eol)
1766 {
1767 // Signal that we are done
1768 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1769 return;
1770 }
1771
1772 if (_numPlayDevices == _deviceIndex)
1773 {
1774 // Convert the device index to the one of the sink
1775 _paDeviceIndex = i->index;
1776
1777 if (_playDeviceName)
1778 {
1779 // Copy the sink name
1780 strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1781 _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1782 }
1783 if (_playDisplayDeviceName)
1784 {
1785 // Copy the sink display name
1786 strncpy(_playDisplayDeviceName, i->description,
1787 kAdmMaxDeviceNameSize);
1788 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1789 }
1790 }
1791
1792 _numPlayDevices++;
1793 }
1794
PaSourceInfoCallbackHandler(const pa_source_info * i,int eol)1795 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1796 const pa_source_info *i,
1797 int eol)
1798 {
1799 if (eol)
1800 {
1801 // Signal that we are done
1802 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1803 return;
1804 }
1805
1806 // We don't want to list output devices
1807 if (i->monitor_of_sink == PA_INVALID_INDEX)
1808 {
1809 if (_numRecDevices == _deviceIndex)
1810 {
1811 // Convert the device index to the one of the source
1812 _paDeviceIndex = i->index;
1813
1814 if (_recDeviceName)
1815 {
1816 // copy the source name
1817 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1818 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1819 }
1820 if (_recDisplayDeviceName)
1821 {
1822 // Copy the source display name
1823 strncpy(_recDisplayDeviceName, i->description,
1824 kAdmMaxDeviceNameSize);
1825 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1826 }
1827 }
1828
1829 _numRecDevices++;
1830 }
1831 }
1832
PaServerInfoCallbackHandler(const pa_server_info * i)1833 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(
1834 const pa_server_info *i)
1835 {
1836 // Use PA native sampling rate
1837 sample_rate_hz_ = i->sample_spec.rate;
1838
1839 // Copy the PA server version
1840 strncpy(_paServerVersion, i->server_version, 31);
1841 _paServerVersion[31] = '\0';
1842
1843 if (_recDisplayDeviceName)
1844 {
1845 // Copy the source name
1846 strncpy(_recDisplayDeviceName, i->default_source_name,
1847 kAdmMaxDeviceNameSize);
1848 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1849 }
1850
1851 if (_playDisplayDeviceName)
1852 {
1853 // Copy the sink name
1854 strncpy(_playDisplayDeviceName, i->default_sink_name,
1855 kAdmMaxDeviceNameSize);
1856 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1857 }
1858
1859 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1860 }
1861
PaStreamStateCallbackHandler(pa_stream * p)1862 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1863 {
1864 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1865 " stream state cb");
1866
1867 pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1868 switch (state)
1869 {
1870 case PA_STREAM_UNCONNECTED:
1871 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1872 " unconnected");
1873 break;
1874 case PA_STREAM_CREATING:
1875 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1876 " creating");
1877 break;
1878 case PA_STREAM_FAILED:
1879 case PA_STREAM_TERMINATED:
1880 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1881 " failed");
1882 break;
1883 case PA_STREAM_READY:
1884 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1885 " ready");
1886 break;
1887 }
1888
1889 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1890 }
1891
CheckPulseAudioVersion()1892 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1893 {
1894 PaLock();
1895
1896 pa_operation* paOperation = NULL;
1897
1898 // get the server info and update deviceName
1899 paOperation = LATE(pa_context_get_server_info)(_paContext,
1900 PaServerInfoCallback,
1901 this);
1902
1903 WaitForOperationCompletion(paOperation);
1904
1905 PaUnLock();
1906
1907 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1908 " checking PulseAudio version: %s", _paServerVersion);
1909
1910 return 0;
1911 }
1912
InitSamplingFrequency()1913 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1914 {
1915 PaLock();
1916
1917 pa_operation* paOperation = NULL;
1918
1919 // Get the server info and update sample_rate_hz_
1920 paOperation = LATE(pa_context_get_server_info)(_paContext,
1921 PaServerInfoCallback,
1922 this);
1923
1924 WaitForOperationCompletion(paOperation);
1925
1926 PaUnLock();
1927
1928 return 0;
1929 }
1930
GetDefaultDeviceInfo(bool recDevice,char * name,uint16_t & index)1931 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1932 char* name,
1933 uint16_t& index)
1934 {
1935 char tmpName[kAdmMaxDeviceNameSize] = {0};
1936 // subtract length of "default: "
1937 uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
1938 char* pName = NULL;
1939
1940 if (name)
1941 {
1942 // Add "default: "
1943 strcpy(name, "default: ");
1944 pName = &name[9];
1945 }
1946
1947 // Tell the callback that we want
1948 // the name for this device
1949 if (recDevice)
1950 {
1951 _recDisplayDeviceName = tmpName;
1952 } else
1953 {
1954 _playDisplayDeviceName = tmpName;
1955 }
1956
1957 // Set members
1958 _paDeviceIndex = -1;
1959 _deviceIndex = 0;
1960 _numPlayDevices = 0;
1961 _numRecDevices = 0;
1962
1963 PaLock();
1964
1965 pa_operation* paOperation = NULL;
1966
1967 // Get the server info and update deviceName
1968 paOperation = LATE(pa_context_get_server_info)(_paContext,
1969 PaServerInfoCallback,
1970 this);
1971
1972 WaitForOperationCompletion(paOperation);
1973
1974 // Get the device index
1975 if (recDevice)
1976 {
1977 paOperation
1978 = LATE(pa_context_get_source_info_by_name)(_paContext,
1979 (char *) tmpName,
1980 PaSourceInfoCallback,
1981 this);
1982 } else
1983 {
1984 paOperation
1985 = LATE(pa_context_get_sink_info_by_name)(_paContext,
1986 (char *) tmpName,
1987 PaSinkInfoCallback,
1988 this);
1989 }
1990
1991 WaitForOperationCompletion(paOperation);
1992
1993 PaUnLock();
1994
1995 // Set the index
1996 index = _paDeviceIndex;
1997
1998 if (name)
1999 {
2000 // Copy to name string
2001 strncpy(pName, tmpName, nameLen);
2002 }
2003
2004 // Clear members
2005 _playDisplayDeviceName = NULL;
2006 _recDisplayDeviceName = NULL;
2007 _paDeviceIndex = -1;
2008 _deviceIndex = -1;
2009 _numPlayDevices = 0;
2010 _numRecDevices = 0;
2011
2012 return 0;
2013 }
2014
InitPulseAudio()2015 int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2016 {
2017 int retVal = 0;
2018
2019 // Load libpulse
2020 if (!PaSymbolTable.Load())
2021 {
2022 // Most likely the Pulse library and sound server are not installed on
2023 // this system
2024 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2025 " failed to load symbol table");
2026 return -1;
2027 }
2028
2029 // Create a mainloop API and connection to the default server
2030 // the mainloop is the internal asynchronous API event loop
2031 if (_paMainloop) {
2032 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2033 " PA mainloop has already existed");
2034 return -1;
2035 }
2036 _paMainloop = LATE(pa_threaded_mainloop_new)();
2037 if (!_paMainloop)
2038 {
2039 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2040 " could not create mainloop");
2041 return -1;
2042 }
2043
2044 // Start the threaded main loop
2045 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2046 if (retVal != PA_OK)
2047 {
2048 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2049 " failed to start main loop, error=%d", retVal);
2050 return -1;
2051 }
2052
2053 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2054 " mainloop running!");
2055
2056 PaLock();
2057
2058 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2059 if (!_paMainloopApi)
2060 {
2061 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2062 " could not create mainloop API");
2063 PaUnLock();
2064 return -1;
2065 }
2066
2067 // Create a new PulseAudio context
2068 if (_paContext){
2069 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2070 " PA context has already existed");
2071 PaUnLock();
2072 return -1;
2073 }
2074 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2075
2076 if (!_paContext)
2077 {
2078 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2079 " could not create context");
2080 PaUnLock();
2081 return -1;
2082 }
2083
2084 // Set state callback function
2085 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2086 this);
2087
2088 // Connect the context to a server (default)
2089 _paStateChanged = false;
2090 retVal = LATE(pa_context_connect)(_paContext,
2091 NULL,
2092 PA_CONTEXT_NOAUTOSPAWN,
2093 NULL);
2094
2095 if (retVal != PA_OK)
2096 {
2097 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2098 " failed to connect context, error=%d", retVal);
2099 PaUnLock();
2100 return -1;
2101 }
2102
2103 // Wait for state change
2104 while (!_paStateChanged)
2105 {
2106 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2107 }
2108
2109 // Now check to see what final state we reached.
2110 pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2111
2112 if (state != PA_CONTEXT_READY)
2113 {
2114 if (state == PA_CONTEXT_FAILED)
2115 {
2116 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2117 " failed to connect to PulseAudio sound server");
2118 } else if (state == PA_CONTEXT_TERMINATED)
2119 {
2120 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2121 " PulseAudio connection terminated early");
2122 } else
2123 {
2124 // Shouldn't happen, because we only signal on one of those three
2125 // states
2126 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2127 " unknown problem connecting to PulseAudio");
2128 }
2129 PaUnLock();
2130 return -1;
2131 }
2132
2133 PaUnLock();
2134
2135 // Give the objects to the mixer manager
2136 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2137
2138 // Check the version
2139 if (CheckPulseAudioVersion() < 0)
2140 {
2141 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2142 " PulseAudio version %s not supported",
2143 _paServerVersion);
2144 return -1;
2145 }
2146
2147 // Initialize sampling frequency
2148 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2149 {
2150 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2151 " failed to initialize sampling frequency,"
2152 " set to %d Hz",
2153 sample_rate_hz_);
2154 return -1;
2155 }
2156
2157 return 0;
2158 }
2159
TerminatePulseAudio()2160 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2161 {
2162 // Do nothing if the instance doesn't exist
2163 // likely PaSymbolTable.Load() fails
2164 if (!_paMainloop) {
2165 return 0;
2166 }
2167
2168 PaLock();
2169
2170 // Disconnect the context
2171 if (_paContext)
2172 {
2173 LATE(pa_context_disconnect)(_paContext);
2174 }
2175
2176 // Unreference the context
2177 if (_paContext)
2178 {
2179 LATE(pa_context_unref)(_paContext);
2180 }
2181
2182 PaUnLock();
2183 _paContext = NULL;
2184
2185 // Stop the threaded main loop
2186 if (_paMainloop)
2187 {
2188 LATE(pa_threaded_mainloop_stop)(_paMainloop);
2189 }
2190
2191 // Free the mainloop
2192 if (_paMainloop)
2193 {
2194 LATE(pa_threaded_mainloop_free)(_paMainloop);
2195 }
2196
2197 _paMainloop = NULL;
2198
2199 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2200 " PulseAudio terminated");
2201
2202 return 0;
2203 }
2204
PaLock()2205 void AudioDeviceLinuxPulse::PaLock()
2206 {
2207 LATE(pa_threaded_mainloop_lock)(_paMainloop);
2208 }
2209
PaUnLock()2210 void AudioDeviceLinuxPulse::PaUnLock()
2211 {
2212 LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2213 }
2214
WaitForOperationCompletion(pa_operation * paOperation) const2215 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2216 pa_operation* paOperation) const
2217 {
2218 if (!paOperation)
2219 {
2220 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2221 "paOperation NULL in WaitForOperationCompletion");
2222 return;
2223 }
2224
2225 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2226 {
2227 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2228 }
2229
2230 LATE(pa_operation_unref)(paOperation);
2231 }
2232
2233 // ============================================================================
2234 // Thread Methods
2235 // ============================================================================
2236
EnableWriteCallback()2237 void AudioDeviceLinuxPulse::EnableWriteCallback()
2238 {
2239 if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2240 {
2241 // May already have available space. Must check.
2242 _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2243 if (_tempBufferSpace > 0)
2244 {
2245 // Yup, there is already space available, so if we register a
2246 // write callback then it will not receive any event. So dispatch
2247 // one ourself instead.
2248 _timeEventPlay.Set();
2249 return;
2250 }
2251 }
2252
2253 LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2254 this);
2255 }
2256
DisableWriteCallback()2257 void AudioDeviceLinuxPulse::DisableWriteCallback()
2258 {
2259 LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2260 }
2261
PaStreamWriteCallback(pa_stream *,size_t buffer_space,void * pThis)2262 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2263 size_t buffer_space,
2264 void *pThis)
2265 {
2266 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2267 buffer_space);
2268 }
2269
PaStreamWriteCallbackHandler(size_t bufferSpace)2270 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2271 {
2272 _tempBufferSpace = bufferSpace;
2273
2274 // Since we write the data asynchronously on a different thread, we have
2275 // to temporarily disable the write callback or else Pulse will call it
2276 // continuously until we write the data. We re-enable it below.
2277 DisableWriteCallback();
2278 _timeEventPlay.Set();
2279 }
2280
PaStreamUnderflowCallback(pa_stream *,void * pThis)2281 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2282 void *pThis)
2283 {
2284 static_cast<AudioDeviceLinuxPulse*> (pThis)->
2285 PaStreamUnderflowCallbackHandler();
2286 }
2287
PaStreamUnderflowCallbackHandler()2288 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2289 {
2290 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2291 " Playout underflow");
2292
2293 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2294 {
2295 // We didn't configure a pa_buffer_attr before, so switching to
2296 // one now would be questionable.
2297 return;
2298 }
2299
2300 // Otherwise reconfigure the stream with a higher target latency.
2301
2302 const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2303 if (!spec)
2304 {
2305 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2306 " pa_stream_get_sample_spec()");
2307 return;
2308 }
2309
2310 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2311 uint32_t newLatency = _configuredLatencyPlay + bytesPerSec *
2312 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
2313 WEBRTC_PA_MSECS_PER_SEC;
2314
2315 // Set the play buffer attributes
2316 _playBufferAttr.maxlength = newLatency;
2317 _playBufferAttr.tlength = newLatency;
2318 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2319 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2320
2321 pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2322 &_playBufferAttr, NULL,
2323 NULL);
2324 if (!op)
2325 {
2326 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2327 " pa_stream_set_buffer_attr()");
2328 return;
2329 }
2330
2331 // Don't need to wait for this to complete.
2332 LATE(pa_operation_unref)(op);
2333
2334 // Save the new latency in case we underflow again.
2335 _configuredLatencyPlay = newLatency;
2336 }
2337
EnableReadCallback()2338 void AudioDeviceLinuxPulse::EnableReadCallback()
2339 {
2340 LATE(pa_stream_set_read_callback)(_recStream,
2341 &PaStreamReadCallback,
2342 this);
2343 }
2344
DisableReadCallback()2345 void AudioDeviceLinuxPulse::DisableReadCallback()
2346 {
2347 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2348 }
2349
PaStreamReadCallback(pa_stream *,size_t,void * pThis)2350 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2351 size_t /*unused2*/,
2352 void *pThis)
2353 {
2354 static_cast<AudioDeviceLinuxPulse*> (pThis)->
2355 PaStreamReadCallbackHandler();
2356 }
2357
PaStreamReadCallbackHandler()2358 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2359 {
2360 // We get the data pointer and size now in order to save one Lock/Unlock
2361 // in the worker thread.
2362 if (LATE(pa_stream_peek)(_recStream,
2363 &_tempSampleData,
2364 &_tempSampleDataSize) != 0)
2365 {
2366 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2367 " Can't read data!");
2368 return;
2369 }
2370
2371 // PulseAudio record streams can have holes (for reasons not entirely clear
2372 // to the PA developers themselves). Since version 4 of PA, these are passed
2373 // over to the application (us), signaled by a non-zero sample data size
2374 // (the size of the hole) and a NULL sample data.
2375 // We handle stream holes as recommended by PulseAudio, i.e. by skipping
2376 // it, which is done with a stream drop.
2377 if (_tempSampleDataSize && !_tempSampleData) {
2378 LATE(pa_stream_drop)(_recStream);
2379 _tempSampleDataSize = 0; // reset
2380 return;
2381 }
2382
2383 // Since we consume the data asynchronously on a different thread, we have
2384 // to temporarily disable the read callback or else Pulse will call it
2385 // continuously until we consume the data. We re-enable it below.
2386 DisableReadCallback();
2387 _timeEventRec.Set();
2388 }
2389
PaStreamOverflowCallback(pa_stream *,void * pThis)2390 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2391 void *pThis)
2392 {
2393 static_cast<AudioDeviceLinuxPulse*> (pThis)->
2394 PaStreamOverflowCallbackHandler();
2395 }
2396
PaStreamOverflowCallbackHandler()2397 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2398 {
2399 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2400 " Recording overflow");
2401 }
2402
LatencyUsecs(pa_stream * stream)2403 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2404 {
2405 if (!WEBRTC_PA_REPORT_LATENCY)
2406 {
2407 return 0;
2408 }
2409
2410 if (!stream)
2411 {
2412 return 0;
2413 }
2414
2415 pa_usec_t latency;
2416 int negative;
2417 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2418 {
2419 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2420 " Can't query latency");
2421 // We'd rather continue playout/capture with an incorrect delay than
2422 // stop it altogether, so return a valid value.
2423 return 0;
2424 }
2425
2426 if (negative)
2427 {
2428 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2429 " warning: pa_stream_get_latency reported negative "
2430 "delay");
2431
2432 // The delay can be negative for monitoring streams if the captured
2433 // samples haven't been played yet. In such a case, "latency"
2434 // contains the magnitude, so we must negate it to get the real value.
2435 int32_t tmpLatency = (int32_t) -latency;
2436 if (tmpLatency < 0)
2437 {
2438 // Make sure that we don't use a negative delay.
2439 tmpLatency = 0;
2440 }
2441
2442 return tmpLatency;
2443 } else
2444 {
2445 return (int32_t) latency;
2446 }
2447 }
2448
ReadRecordedData(const void * bufferData,size_t bufferSize)2449 int32_t AudioDeviceLinuxPulse::ReadRecordedData(
2450 const void* bufferData,
2451 size_t bufferSize) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2452 {
2453 size_t size = bufferSize;
2454 uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2455
2456 // Account for the peeked data and the used data.
2457 uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2458 / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2459
2460 _sndCardRecDelay = recDelay;
2461
2462 if (_playStream)
2463 {
2464 // Get the playout delay.
2465 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2466 }
2467
2468 if (_recordBufferUsed > 0)
2469 {
2470 // Have to copy to the buffer until it is full.
2471 size_t copy = _recordBufferSize - _recordBufferUsed;
2472 if (size < copy)
2473 {
2474 copy = size;
2475 }
2476
2477 memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2478 _recordBufferUsed += copy;
2479 bufferData = static_cast<const char *> (bufferData) + copy;
2480 size -= copy;
2481
2482 if (_recordBufferUsed != _recordBufferSize)
2483 {
2484 // Not enough data yet to pass to VoE.
2485 return 0;
2486 }
2487
2488 // Provide data to VoiceEngine.
2489 if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2490 {
2491 // We have stopped recording.
2492 return -1;
2493 }
2494
2495 _recordBufferUsed = 0;
2496 }
2497
2498 // Now process full 10ms sample sets directly from the input.
2499 while (size >= _recordBufferSize)
2500 {
2501 // Provide data to VoiceEngine.
2502 if (ProcessRecordedData(
2503 static_cast<int8_t *> (const_cast<void *> (bufferData)),
2504 numRecSamples, recDelay) == -1)
2505 {
2506 // We have stopped recording.
2507 return -1;
2508 }
2509
2510 bufferData = static_cast<const char *> (bufferData) +
2511 _recordBufferSize;
2512 size -= _recordBufferSize;
2513
2514 // We have consumed 10ms of data.
2515 recDelay -= 10;
2516 }
2517
2518 // Now save any leftovers for later.
2519 if (size > 0)
2520 {
2521 memcpy(_recBuffer, bufferData, size);
2522 _recordBufferUsed = size;
2523 }
2524
2525 return 0;
2526 }
2527
ProcessRecordedData(int8_t * bufferData,uint32_t bufferSizeInSamples,uint32_t recDelay)2528 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2529 int8_t *bufferData,
2530 uint32_t bufferSizeInSamples,
2531 uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2532 {
2533 uint32_t currentMicLevel(0);
2534 uint32_t newMicLevel(0);
2535
2536 _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2537
2538 if (AGC())
2539 {
2540 // Store current mic level in the audio buffer if AGC is enabled
2541 if (MicrophoneVolume(currentMicLevel) == 0)
2542 {
2543 // This call does not affect the actual microphone volume
2544 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2545 }
2546 }
2547
2548 const uint32_t clockDrift(0);
2549 // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2550 // near-end signals at the AEC for PulseAudio. I think the system delay is
2551 // being correctly calculated here, but for legacy reasons we add +10 ms
2552 // to the value in the AEC. The real fix will be part of a larger
2553 // investigation into managing system delay in the AEC.
2554 if (recDelay > 10)
2555 recDelay -= 10;
2556 else
2557 recDelay = 0;
2558 _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2559 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2560 // Deliver recorded samples at specified sample rate,
2561 // mic level etc. to the observer using callback.
2562 UnLock();
2563 _ptrAudioBuffer->DeliverRecordedData();
2564 Lock();
2565
2566 // We have been unlocked - check the flag again.
2567 if (!_recording)
2568 {
2569 return -1;
2570 }
2571
2572 if (AGC())
2573 {
2574 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2575 if (newMicLevel != 0)
2576 {
2577 // The VQE will only deliver non-zero microphone levels when a
2578 // change is needed.
2579 // Set this new mic level (received from the observer as return
2580 // value in the callback).
2581 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2582 " AGC change of volume: old=%u => new=%u",
2583 currentMicLevel, newMicLevel);
2584 if (SetMicrophoneVolume(newMicLevel) == -1)
2585 {
2586 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2587 _id,
2588 " the required modification of the microphone "
2589 "volume failed");
2590 }
2591 }
2592 }
2593
2594 return 0;
2595 }
2596
PlayThreadFunc(void * pThis)2597 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2598 {
2599 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2600 }
2601
RecThreadFunc(void * pThis)2602 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2603 {
2604 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2605 }
2606
PlayThreadProcess()2607 bool AudioDeviceLinuxPulse::PlayThreadProcess()
2608 {
2609 switch (_timeEventPlay.Wait(1000))
2610 {
2611 case kEventSignaled:
2612 break;
2613 case kEventError:
2614 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2615 "EventWrapper::Wait() failed");
2616 return true;
2617 case kEventTimeout:
2618 return true;
2619 }
2620
2621 CriticalSectionScoped lock(&_critSect);
2622
2623 if (_startPlay)
2624 {
2625 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2626 "_startPlay true, performing initial actions");
2627
2628 _startPlay = false;
2629 _playDeviceName = NULL;
2630
2631 // Set if not default device
2632 if (_outputDeviceIndex > 0)
2633 {
2634 // Get the playout device name
2635 _playDeviceName = new char[kAdmMaxDeviceNameSize];
2636 _deviceIndex = _outputDeviceIndex;
2637 PlayoutDevices();
2638 }
2639
2640 // Start muted only supported on 0.9.11 and up
2641 if (LATE(pa_context_get_protocol_version)(_paContext)
2642 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2643 {
2644 // Get the currently saved speaker mute status
2645 // and set the initial mute status accordingly
2646 bool enabled(false);
2647 _mixerManager.SpeakerMute(enabled);
2648 if (enabled)
2649 {
2650 _playStreamFlags |= PA_STREAM_START_MUTED;
2651 }
2652 }
2653
2654 // Get the currently saved speaker volume
2655 uint32_t volume = 0;
2656 if (update_speaker_volume_at_startup_)
2657 _mixerManager.SpeakerVolume(volume);
2658
2659 PaLock();
2660
2661 // NULL gives PA the choice of startup volume.
2662 pa_cvolume* ptr_cvolume = NULL;
2663 if (update_speaker_volume_at_startup_) {
2664 pa_cvolume cVolumes;
2665 ptr_cvolume = &cVolumes;
2666
2667 // Set the same volume for all channels
2668 const pa_sample_spec *spec =
2669 LATE(pa_stream_get_sample_spec)(_playStream);
2670 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2671 update_speaker_volume_at_startup_ = false;
2672 }
2673
2674 // Connect the stream to a sink
2675 if (LATE(pa_stream_connect_playback)(
2676 _playStream,
2677 _playDeviceName,
2678 &_playBufferAttr,
2679 (pa_stream_flags_t) _playStreamFlags,
2680 ptr_cvolume, NULL) != PA_OK)
2681 {
2682 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2683 " failed to connect play stream, err=%d",
2684 LATE(pa_context_errno)(_paContext));
2685 }
2686
2687 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2688 " play stream connected");
2689
2690 // Wait for state change
2691 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2692 {
2693 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2694 }
2695
2696 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2697 " play stream ready");
2698
2699 // We can now handle write callbacks
2700 EnableWriteCallback();
2701
2702 PaUnLock();
2703
2704 // Clear device name
2705 if (_playDeviceName)
2706 {
2707 delete [] _playDeviceName;
2708 _playDeviceName = NULL;
2709 }
2710
2711 _playing = true;
2712 _playStartEvent.Set();
2713
2714 return true;
2715 }
2716
2717 if (_playing)
2718 {
2719 if (!_recording)
2720 {
2721 // Update the playout delay
2722 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2723 / 1000);
2724 }
2725
2726 if (_playbackBufferUnused < _playbackBufferSize)
2727 {
2728
2729 size_t write = _playbackBufferSize - _playbackBufferUnused;
2730 if (_tempBufferSpace < write)
2731 {
2732 write = _tempBufferSpace;
2733 }
2734
2735 PaLock();
2736 if (LATE(pa_stream_write)(
2737 _playStream,
2738 (void *) &_playBuffer[_playbackBufferUnused],
2739 write, NULL, (int64_t) 0,
2740 PA_SEEK_RELATIVE) != PA_OK)
2741 {
2742 _writeErrors++;
2743 if (_writeErrors > 10)
2744 {
2745 if (_playError == 1)
2746 {
2747 WEBRTC_TRACE(kTraceWarning,
2748 kTraceUtility, _id,
2749 " pending playout error exists");
2750 }
2751 // Triggers callback from module process thread.
2752 _playError = 1;
2753 WEBRTC_TRACE(
2754 kTraceError,
2755 kTraceUtility,
2756 _id,
2757 " kPlayoutError message posted: "
2758 "_writeErrors=%u, error=%d",
2759 _writeErrors,
2760 LATE(pa_context_errno)(_paContext));
2761 _writeErrors = 0;
2762 }
2763 }
2764 PaUnLock();
2765
2766 _playbackBufferUnused += write;
2767 _tempBufferSpace -= write;
2768 }
2769
2770 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2771 // Might have been reduced to zero by the above.
2772 if (_tempBufferSpace > 0)
2773 {
2774 // Ask for new PCM data to be played out using the
2775 // AudioDeviceBuffer ensure that this callback is executed
2776 // without taking the audio-thread lock.
2777 UnLock();
2778 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2779 " requesting data");
2780 uint32_t nSamples =
2781 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2782 Lock();
2783
2784 // We have been unlocked - check the flag again.
2785 if (!_playing)
2786 {
2787 return true;
2788 }
2789
2790 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2791 if (nSamples != numPlaySamples)
2792 {
2793 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2794 _id, " invalid number of output samples(%d)",
2795 nSamples);
2796 }
2797
2798 size_t write = _playbackBufferSize;
2799 if (_tempBufferSpace < write)
2800 {
2801 write = _tempBufferSpace;
2802 }
2803
2804 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2805 " will write");
2806 PaLock();
2807 if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2808 write, NULL, (int64_t) 0,
2809 PA_SEEK_RELATIVE) != PA_OK)
2810 {
2811 _writeErrors++;
2812 if (_writeErrors > 10)
2813 {
2814 if (_playError == 1)
2815 {
2816 WEBRTC_TRACE(kTraceWarning,
2817 kTraceUtility, _id,
2818 " pending playout error exists");
2819 }
2820 // Triggers callback from module process thread.
2821 _playError = 1;
2822 WEBRTC_TRACE(
2823 kTraceError,
2824 kTraceUtility,
2825 _id,
2826 " kPlayoutError message posted: "
2827 "_writeErrors=%u, error=%d",
2828 _writeErrors,
2829 LATE(pa_context_errno)(_paContext));
2830 _writeErrors = 0;
2831 }
2832 }
2833 PaUnLock();
2834
2835 _playbackBufferUnused = write;
2836 }
2837
2838 _tempBufferSpace = 0;
2839 PaLock();
2840 EnableWriteCallback();
2841 PaUnLock();
2842
2843 } // _playing
2844
2845 return true;
2846 }
2847
RecThreadProcess()2848 bool AudioDeviceLinuxPulse::RecThreadProcess()
2849 {
2850 switch (_timeEventRec.Wait(1000))
2851 {
2852 case kEventSignaled:
2853 break;
2854 case kEventError:
2855 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2856 "EventWrapper::Wait() failed");
2857 return true;
2858 case kEventTimeout:
2859 return true;
2860 }
2861
2862 CriticalSectionScoped lock(&_critSect);
2863
2864 if (_startRec)
2865 {
2866 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2867 "_startRec true, performing initial actions");
2868
2869 _recDeviceName = NULL;
2870
2871 // Set if not default device
2872 if (_inputDeviceIndex > 0)
2873 {
2874 // Get the recording device name
2875 _recDeviceName = new char[kAdmMaxDeviceNameSize];
2876 _deviceIndex = _inputDeviceIndex;
2877 RecordingDevices();
2878 }
2879
2880 PaLock();
2881
2882 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2883 " connecting stream");
2884
2885 // Connect the stream to a source
2886 if (LATE(pa_stream_connect_record)(_recStream,
2887 _recDeviceName,
2888 &_recBufferAttr,
2889 (pa_stream_flags_t) _recStreamFlags) != PA_OK)
2890 {
2891 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2892 " failed to connect rec stream, err=%d",
2893 LATE(pa_context_errno)(_paContext));
2894 }
2895
2896 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2897 " connected");
2898
2899 // Wait for state change
2900 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2901 {
2902 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2903 }
2904
2905 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2906 " done");
2907
2908 // We can now handle read callbacks
2909 EnableReadCallback();
2910
2911 PaUnLock();
2912
2913 // Clear device name
2914 if (_recDeviceName)
2915 {
2916 delete [] _recDeviceName;
2917 _recDeviceName = NULL;
2918 }
2919
2920 _startRec = false;
2921 _recording = true;
2922 _recStartEvent.Set();
2923
2924 return true;
2925 }
2926
2927 if (_recording)
2928 {
2929 // Read data and provide it to VoiceEngine
2930 if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2931 {
2932 return true;
2933 }
2934
2935 _tempSampleData = NULL;
2936 _tempSampleDataSize = 0;
2937
2938 PaLock();
2939 while (true)
2940 {
2941 // Ack the last thing we read
2942 if (LATE(pa_stream_drop)(_recStream) != 0)
2943 {
2944 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2945 _id, " failed to drop, err=%d\n",
2946 LATE(pa_context_errno)(_paContext));
2947 }
2948
2949 if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2950 {
2951 // Then that was all the data
2952 break;
2953 }
2954
2955 // Else more data.
2956 const void *sampleData;
2957 size_t sampleDataSize;
2958
2959 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2960 != 0)
2961 {
2962 _recError = 1; // triggers callback from module process thread
2963 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2964 _id, " RECORD_ERROR message posted, error = %d",
2965 LATE(pa_context_errno)(_paContext));
2966 break;
2967 }
2968
2969 _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
2970 / 1000);
2971
2972 // Drop lock for sigslot dispatch, which could take a while.
2973 PaUnLock();
2974 // Read data and provide it to VoiceEngine
2975 if (ReadRecordedData(sampleData, sampleDataSize) == -1)
2976 {
2977 return true;
2978 }
2979 PaLock();
2980
2981 // Return to top of loop for the ack and the check for more data.
2982 }
2983
2984 EnableReadCallback();
2985 PaUnLock();
2986
2987 } // _recording
2988
2989 return true;
2990 }
2991
KeyPressed() const2992 bool AudioDeviceLinuxPulse::KeyPressed() const{
2993
2994 #ifdef USE_X11
2995 char szKey[32];
2996 unsigned int i = 0;
2997 char state = 0;
2998
2999 if (!_XDisplay)
3000 return false;
3001
3002 // Check key map status
3003 XQueryKeymap(_XDisplay, szKey);
3004
3005 // A bit change in keymap means a key is pressed
3006 for (i = 0; i < sizeof(szKey); i++)
3007 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3008
3009 // Save old state
3010 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3011 return (state != 0);
3012 #else
3013 return false;
3014 #endif
3015 }
3016 }
3017