1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "modules/audio_device/mac/audio_device_mac.h"
12 
13 #include <ApplicationServices/ApplicationServices.h>
14 #include <libkern/OSAtomic.h>  // OSAtomicCompareAndSwap()
15 #include <mach/mach.h>         // mach_task_self()
16 #include <sys/sysctl.h>        // sysctlbyname()
17 
18 #include <memory>
19 
20 #include "modules/audio_device/audio_device_config.h"
21 #include "modules/third_party/portaudio/pa_ringbuffer.h"
22 #include "rtc_base/arraysize.h"
23 #include "rtc_base/checks.h"
24 #include "rtc_base/platform_thread.h"
25 #include "rtc_base/system/arch.h"
26 
27 namespace webrtc {
28 
29 #define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
30   do {                                                               \
31     err = expr;                                                      \
32     if (err != noErr) {                                              \
33       logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
34       return -1;                                                     \
35     }                                                                \
36   } while (0)
37 
38 #define WEBRTC_CA_LOG_ERR(expr)                                      \
39   do {                                                               \
40     err = expr;                                                      \
41     if (err != noErr) {                                              \
42       logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
43     }                                                                \
44   } while (0)
45 
46 #define WEBRTC_CA_LOG_WARN(expr)                                       \
47   do {                                                                 \
48     err = expr;                                                        \
49     if (err != noErr) {                                                \
50       logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
51     }                                                                  \
52   } while (0)
53 
54 enum { MaxNumberDevices = 64 };
55 
AtomicSet32(int32_t * theValue,int32_t newValue)56 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
57   while (1) {
58     int32_t oldValue = *theValue;
59     if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) {
60       return;
61     }
62   }
63 }
64 
AtomicGet32(int32_t * theValue)65 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) {
66   while (1) {
67     int32_t value = *theValue;
68     if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) {
69       return value;
70     }
71   }
72 }
73 
74 // CoreAudio errors are best interpreted as four character strings.
logCAMsg(const rtc::LoggingSeverity sev,const char * msg,const char * err)75 void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
76                               const char* msg,
77                               const char* err) {
78   RTC_DCHECK(msg != NULL);
79   RTC_DCHECK(err != NULL);
80 
81 #ifdef WEBRTC_ARCH_BIG_ENDIAN
82   switch (sev) {
83     case rtc::LS_ERROR:
84       RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
85       break;
86     case rtc::LS_WARNING:
87       RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2]
88                           << err[3];
89       break;
90     case rtc::LS_VERBOSE:
91       RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2]
92                           << err[3];
93       break;
94     default:
95       break;
96   }
97 #else
98   // We need to flip the characters in this case.
99   switch (sev) {
100     case rtc::LS_ERROR:
101       RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
102       break;
103     case rtc::LS_WARNING:
104       RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1]
105                           << err[0];
106       break;
107     case rtc::LS_VERBOSE:
108       RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1]
109                           << err[0];
110       break;
111     default:
112       break;
113   }
114 #endif
115 }
116 
AudioDeviceMac()117 AudioDeviceMac::AudioDeviceMac()
118     : _ptrAudioBuffer(NULL),
119       _mixerManager(),
120       _inputDeviceIndex(0),
121       _outputDeviceIndex(0),
122       _inputDeviceID(kAudioObjectUnknown),
123       _outputDeviceID(kAudioObjectUnknown),
124       _inputDeviceIsSpecified(false),
125       _outputDeviceIsSpecified(false),
126       _recChannels(N_REC_CHANNELS),
127       _playChannels(N_PLAY_CHANNELS),
128       _captureBufData(NULL),
129       _renderBufData(NULL),
130       _initialized(false),
131       _isShutDown(false),
132       _recording(false),
133       _playing(false),
134       _recIsInitialized(false),
135       _playIsInitialized(false),
136       _renderDeviceIsAlive(1),
137       _captureDeviceIsAlive(1),
138       _twoDevices(true),
139       _doStop(false),
140       _doStopRec(false),
141       _macBookPro(false),
142       _macBookProPanRight(false),
143       _captureLatencyUs(0),
144       _renderLatencyUs(0),
145       _captureDelayUs(0),
146       _renderDelayUs(0),
147       _renderDelayOffsetSamples(0),
148       _paCaptureBuffer(NULL),
149       _paRenderBuffer(NULL),
150       _captureBufSizeSamples(0),
151       _renderBufSizeSamples(0),
152       prev_key_state_() {
153   RTC_LOG(LS_INFO) << __FUNCTION__ << " created";
154 
155   memset(_renderConvertData, 0, sizeof(_renderConvertData));
156   memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
157   memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
158   memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
159   memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
160 }
161 
~AudioDeviceMac()162 AudioDeviceMac::~AudioDeviceMac() {
163   RTC_LOG(LS_INFO) << __FUNCTION__ << " destroyed";
164 
165   if (!_isShutDown) {
166     Terminate();
167   }
168 
169   RTC_DCHECK(!capture_worker_thread_.get());
170   RTC_DCHECK(!render_worker_thread_.get());
171 
172   if (_paRenderBuffer) {
173     delete _paRenderBuffer;
174     _paRenderBuffer = NULL;
175   }
176 
177   if (_paCaptureBuffer) {
178     delete _paCaptureBuffer;
179     _paCaptureBuffer = NULL;
180   }
181 
182   if (_renderBufData) {
183     delete[] _renderBufData;
184     _renderBufData = NULL;
185   }
186 
187   if (_captureBufData) {
188     delete[] _captureBufData;
189     _captureBufData = NULL;
190   }
191 
192   kern_return_t kernErr = KERN_SUCCESS;
193   kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
194   if (kernErr != KERN_SUCCESS) {
195     RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
196   }
197 
198   kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
199   if (kernErr != KERN_SUCCESS) {
200     RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
201   }
202 }
203 
204 // ============================================================================
205 //                                     API
206 // ============================================================================
207 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)208 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
209   MutexLock lock(&mutex_);
210 
211   _ptrAudioBuffer = audioBuffer;
212 
213   // inform the AudioBuffer about default settings for this implementation
214   _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
215   _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
216   _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
217   _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
218 }
219 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const220 int32_t AudioDeviceMac::ActiveAudioLayer(
221     AudioDeviceModule::AudioLayer& audioLayer) const {
222   audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
223   return 0;
224 }
225 
Init()226 AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
227   MutexLock lock(&mutex_);
228 
229   if (_initialized) {
230     return InitStatus::OK;
231   }
232 
233   OSStatus err = noErr;
234 
235   _isShutDown = false;
236 
237   // PortAudio ring buffers require an elementCount which is a power of two.
238   if (_renderBufData == NULL) {
239     UInt32 powerOfTwo = 1;
240     while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
241       powerOfTwo <<= 1;
242     }
243     _renderBufSizeSamples = powerOfTwo;
244     _renderBufData = new SInt16[_renderBufSizeSamples];
245   }
246 
247   if (_paRenderBuffer == NULL) {
248     _paRenderBuffer = new PaUtilRingBuffer;
249     PaRingBufferSize bufSize = -1;
250     bufSize = PaUtil_InitializeRingBuffer(
251         _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
252     if (bufSize == -1) {
253       RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
254       return InitStatus::PLAYOUT_ERROR;
255     }
256   }
257 
258   if (_captureBufData == NULL) {
259     UInt32 powerOfTwo = 1;
260     while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
261       powerOfTwo <<= 1;
262     }
263     _captureBufSizeSamples = powerOfTwo;
264     _captureBufData = new Float32[_captureBufSizeSamples];
265   }
266 
267   if (_paCaptureBuffer == NULL) {
268     _paCaptureBuffer = new PaUtilRingBuffer;
269     PaRingBufferSize bufSize = -1;
270     bufSize =
271         PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
272                                     _captureBufSizeSamples, _captureBufData);
273     if (bufSize == -1) {
274       RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
275       return InitStatus::RECORDING_ERROR;
276     }
277   }
278 
279   kern_return_t kernErr = KERN_SUCCESS;
280   kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
281                              SYNC_POLICY_FIFO, 0);
282   if (kernErr != KERN_SUCCESS) {
283     RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
284     return InitStatus::OTHER_ERROR;
285   }
286 
287   kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
288                              SYNC_POLICY_FIFO, 0);
289   if (kernErr != KERN_SUCCESS) {
290     RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
291     return InitStatus::OTHER_ERROR;
292   }
293 
294   // Setting RunLoop to NULL here instructs HAL to manage its own thread for
295   // notifications. This was the default behaviour on OS X 10.5 and earlier,
296   // but now must be explicitly specified. HAL would otherwise try to use the
297   // main thread to issue notifications.
298   AudioObjectPropertyAddress propertyAddress = {
299       kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
300       kAudioObjectPropertyElementMaster};
301   CFRunLoopRef runLoop = NULL;
302   UInt32 size = sizeof(CFRunLoopRef);
303   int aoerr = AudioObjectSetPropertyData(
304       kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop);
305   if (aoerr != noErr) {
306     RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: "
307                       << (const char*)&aoerr;
308     return InitStatus::OTHER_ERROR;
309   }
310 
311   // Listen for any device changes.
312   propertyAddress.mSelector = kAudioHardwarePropertyDevices;
313   WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
314       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
315 
316   // Determine if this is a MacBook Pro
317   _macBookPro = false;
318   _macBookProPanRight = false;
319   char buf[128];
320   size_t length = sizeof(buf);
321   memset(buf, 0, length);
322 
323   int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
324   if (intErr != 0) {
325     RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err;
326   } else {
327     RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf;
328     if (strncmp(buf, "MacBookPro", 10) == 0) {
329       _macBookPro = true;
330     }
331   }
332 
333   _initialized = true;
334 
335   return InitStatus::OK;
336 }
337 
Terminate()338 int32_t AudioDeviceMac::Terminate() {
339   if (!_initialized) {
340     return 0;
341   }
342 
343   if (_recording) {
344     RTC_LOG(LS_ERROR) << "Recording must be stopped";
345     return -1;
346   }
347 
348   if (_playing) {
349     RTC_LOG(LS_ERROR) << "Playback must be stopped";
350     return -1;
351   }
352 
353   MutexLock lock(&mutex_);
354   _mixerManager.Close();
355 
356   OSStatus err = noErr;
357   int retVal = 0;
358 
359   AudioObjectPropertyAddress propertyAddress = {
360       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
361       kAudioObjectPropertyElementMaster};
362   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
363       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
364 
365   err = AudioHardwareUnload();
366   if (err != noErr) {
367     logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
368              (const char*)&err);
369     retVal = -1;
370   }
371 
372   _isShutDown = true;
373   _initialized = false;
374   _outputDeviceIsSpecified = false;
375   _inputDeviceIsSpecified = false;
376 
377   return retVal;
378 }
379 
Initialized() const380 bool AudioDeviceMac::Initialized() const {
381   return (_initialized);
382 }
383 
SpeakerIsAvailable(bool & available)384 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
385   MutexLock lock(&mutex_);
386   return SpeakerIsAvailableLocked(available);
387 }
388 
SpeakerIsAvailableLocked(bool & available)389 int32_t AudioDeviceMac::SpeakerIsAvailableLocked(bool& available) {
390   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
391 
392   // Make an attempt to open up the
393   // output mixer corresponding to the currently selected output device.
394   //
395   if (!wasInitialized && InitSpeakerLocked() == -1) {
396     available = false;
397     return 0;
398   }
399 
400   // Given that InitSpeaker was successful, we know that a valid speaker
401   // exists.
402   available = true;
403 
404   // Close the initialized output mixer
405   //
406   if (!wasInitialized) {
407     _mixerManager.CloseSpeaker();
408   }
409 
410   return 0;
411 }
412 
InitSpeaker()413 int32_t AudioDeviceMac::InitSpeaker() {
414   MutexLock lock(&mutex_);
415   return InitSpeakerLocked();
416 }
417 
InitSpeakerLocked()418 int32_t AudioDeviceMac::InitSpeakerLocked() {
419   if (_playing) {
420     return -1;
421   }
422 
423   if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
424     return -1;
425   }
426 
427   if (_inputDeviceID == _outputDeviceID) {
428     _twoDevices = false;
429   } else {
430     _twoDevices = true;
431   }
432 
433   if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
434     return -1;
435   }
436 
437   return 0;
438 }
439 
MicrophoneIsAvailable(bool & available)440 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
441   MutexLock lock(&mutex_);
442   return MicrophoneIsAvailableLocked(available);
443 }
444 
MicrophoneIsAvailableLocked(bool & available)445 int32_t AudioDeviceMac::MicrophoneIsAvailableLocked(bool& available) {
446   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
447 
448   // Make an attempt to open up the
449   // input mixer corresponding to the currently selected output device.
450   //
451   if (!wasInitialized && InitMicrophoneLocked() == -1) {
452     available = false;
453     return 0;
454   }
455 
456   // Given that InitMicrophone was successful, we know that a valid microphone
457   // exists.
458   available = true;
459 
460   // Close the initialized input mixer
461   //
462   if (!wasInitialized) {
463     _mixerManager.CloseMicrophone();
464   }
465 
466   return 0;
467 }
468 
InitMicrophone()469 int32_t AudioDeviceMac::InitMicrophone() {
470   MutexLock lock(&mutex_);
471   return InitMicrophoneLocked();
472 }
473 
InitMicrophoneLocked()474 int32_t AudioDeviceMac::InitMicrophoneLocked() {
475   if (_recording) {
476     return -1;
477   }
478 
479   if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
480     return -1;
481   }
482 
483   if (_inputDeviceID == _outputDeviceID) {
484     _twoDevices = false;
485   } else {
486     _twoDevices = true;
487   }
488 
489   if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
490     return -1;
491   }
492 
493   return 0;
494 }
495 
SpeakerIsInitialized() const496 bool AudioDeviceMac::SpeakerIsInitialized() const {
497   return (_mixerManager.SpeakerIsInitialized());
498 }
499 
MicrophoneIsInitialized() const500 bool AudioDeviceMac::MicrophoneIsInitialized() const {
501   return (_mixerManager.MicrophoneIsInitialized());
502 }
503 
SpeakerVolumeIsAvailable(bool & available)504 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
505   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
506 
507   // Make an attempt to open up the
508   // output mixer corresponding to the currently selected output device.
509   //
510   if (!wasInitialized && InitSpeaker() == -1) {
511     // If we end up here it means that the selected speaker has no volume
512     // control.
513     available = false;
514     return 0;
515   }
516 
517   // Given that InitSpeaker was successful, we know that a volume control exists
518   //
519   available = true;
520 
521   // Close the initialized output mixer
522   //
523   if (!wasInitialized) {
524     _mixerManager.CloseSpeaker();
525   }
526 
527   return 0;
528 }
529 
SetSpeakerVolume(uint32_t volume)530 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
531   return (_mixerManager.SetSpeakerVolume(volume));
532 }
533 
SpeakerVolume(uint32_t & volume) const534 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
535   uint32_t level(0);
536 
537   if (_mixerManager.SpeakerVolume(level) == -1) {
538     return -1;
539   }
540 
541   volume = level;
542   return 0;
543 }
544 
MaxSpeakerVolume(uint32_t & maxVolume) const545 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
546   uint32_t maxVol(0);
547 
548   if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
549     return -1;
550   }
551 
552   maxVolume = maxVol;
553   return 0;
554 }
555 
MinSpeakerVolume(uint32_t & minVolume) const556 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
557   uint32_t minVol(0);
558 
559   if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
560     return -1;
561   }
562 
563   minVolume = minVol;
564   return 0;
565 }
566 
SpeakerMuteIsAvailable(bool & available)567 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
568   bool isAvailable(false);
569   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
570 
571   // Make an attempt to open up the
572   // output mixer corresponding to the currently selected output device.
573   //
574   if (!wasInitialized && InitSpeaker() == -1) {
575     // If we end up here it means that the selected speaker has no volume
576     // control, hence it is safe to state that there is no mute control
577     // already at this stage.
578     available = false;
579     return 0;
580   }
581 
582   // Check if the selected speaker has a mute control
583   //
584   _mixerManager.SpeakerMuteIsAvailable(isAvailable);
585 
586   available = isAvailable;
587 
588   // Close the initialized output mixer
589   //
590   if (!wasInitialized) {
591     _mixerManager.CloseSpeaker();
592   }
593 
594   return 0;
595 }
596 
SetSpeakerMute(bool enable)597 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
598   return (_mixerManager.SetSpeakerMute(enable));
599 }
600 
SpeakerMute(bool & enabled) const601 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
602   bool muted(0);
603 
604   if (_mixerManager.SpeakerMute(muted) == -1) {
605     return -1;
606   }
607 
608   enabled = muted;
609   return 0;
610 }
611 
MicrophoneMuteIsAvailable(bool & available)612 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
613   bool isAvailable(false);
614   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
615 
616   // Make an attempt to open up the
617   // input mixer corresponding to the currently selected input device.
618   //
619   if (!wasInitialized && InitMicrophone() == -1) {
620     // If we end up here it means that the selected microphone has no volume
621     // control, hence it is safe to state that there is no boost control
622     // already at this stage.
623     available = false;
624     return 0;
625   }
626 
627   // Check if the selected microphone has a mute control
628   //
629   _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
630   available = isAvailable;
631 
632   // Close the initialized input mixer
633   //
634   if (!wasInitialized) {
635     _mixerManager.CloseMicrophone();
636   }
637 
638   return 0;
639 }
640 
SetMicrophoneMute(bool enable)641 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
642   return (_mixerManager.SetMicrophoneMute(enable));
643 }
644 
MicrophoneMute(bool & enabled) const645 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
646   bool muted(0);
647 
648   if (_mixerManager.MicrophoneMute(muted) == -1) {
649     return -1;
650   }
651 
652   enabled = muted;
653   return 0;
654 }
655 
StereoRecordingIsAvailable(bool & available)656 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
657   bool isAvailable(false);
658   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
659 
660   if (!wasInitialized && InitMicrophone() == -1) {
661     // Cannot open the specified device
662     available = false;
663     return 0;
664   }
665 
666   // Check if the selected microphone can record stereo
667   //
668   _mixerManager.StereoRecordingIsAvailable(isAvailable);
669   available = isAvailable;
670 
671   // Close the initialized input mixer
672   //
673   if (!wasInitialized) {
674     _mixerManager.CloseMicrophone();
675   }
676 
677   return 0;
678 }
679 
SetStereoRecording(bool enable)680 int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
681   if (enable)
682     _recChannels = 2;
683   else
684     _recChannels = 1;
685 
686   return 0;
687 }
688 
StereoRecording(bool & enabled) const689 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
690   if (_recChannels == 2)
691     enabled = true;
692   else
693     enabled = false;
694 
695   return 0;
696 }
697 
StereoPlayoutIsAvailable(bool & available)698 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
699   bool isAvailable(false);
700   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
701 
702   if (!wasInitialized && InitSpeaker() == -1) {
703     // Cannot open the specified device
704     available = false;
705     return 0;
706   }
707 
708   // Check if the selected microphone can record stereo
709   //
710   _mixerManager.StereoPlayoutIsAvailable(isAvailable);
711   available = isAvailable;
712 
713   // Close the initialized input mixer
714   //
715   if (!wasInitialized) {
716     _mixerManager.CloseSpeaker();
717   }
718 
719   return 0;
720 }
721 
SetStereoPlayout(bool enable)722 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
723   if (enable)
724     _playChannels = 2;
725   else
726     _playChannels = 1;
727 
728   return 0;
729 }
730 
StereoPlayout(bool & enabled) const731 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
732   if (_playChannels == 2)
733     enabled = true;
734   else
735     enabled = false;
736 
737   return 0;
738 }
739 
MicrophoneVolumeIsAvailable(bool & available)740 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
741   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
742 
743   // Make an attempt to open up the
744   // input mixer corresponding to the currently selected output device.
745   //
746   if (!wasInitialized && InitMicrophone() == -1) {
747     // If we end up here it means that the selected microphone has no volume
748     // control.
749     available = false;
750     return 0;
751   }
752 
753   // Given that InitMicrophone was successful, we know that a volume control
754   // exists
755   //
756   available = true;
757 
758   // Close the initialized input mixer
759   //
760   if (!wasInitialized) {
761     _mixerManager.CloseMicrophone();
762   }
763 
764   return 0;
765 }
766 
SetMicrophoneVolume(uint32_t volume)767 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
768   return (_mixerManager.SetMicrophoneVolume(volume));
769 }
770 
MicrophoneVolume(uint32_t & volume) const771 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
772   uint32_t level(0);
773 
774   if (_mixerManager.MicrophoneVolume(level) == -1) {
775     RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
776     return -1;
777   }
778 
779   volume = level;
780   return 0;
781 }
782 
MaxMicrophoneVolume(uint32_t & maxVolume) const783 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
784   uint32_t maxVol(0);
785 
786   if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
787     return -1;
788   }
789 
790   maxVolume = maxVol;
791   return 0;
792 }
793 
MinMicrophoneVolume(uint32_t & minVolume) const794 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
795   uint32_t minVol(0);
796 
797   if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
798     return -1;
799   }
800 
801   minVolume = minVol;
802   return 0;
803 }
804 
PlayoutDevices()805 int16_t AudioDeviceMac::PlayoutDevices() {
806   AudioDeviceID playDevices[MaxNumberDevices];
807   return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
808                           MaxNumberDevices);
809 }
810 
SetPlayoutDevice(uint16_t index)811 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
812   MutexLock lock(&mutex_);
813 
814   if (_playIsInitialized) {
815     return -1;
816   }
817 
818   AudioDeviceID playDevices[MaxNumberDevices];
819   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
820                                        playDevices, MaxNumberDevices);
821   RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is "
822                       << nDevices;
823 
824   if (index > (nDevices - 1)) {
825     RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
826                       << "]";
827     return -1;
828   }
829 
830   _outputDeviceIndex = index;
831   _outputDeviceIsSpecified = true;
832 
833   return 0;
834 }
835 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)836 int32_t AudioDeviceMac::SetPlayoutDevice(
837     AudioDeviceModule::WindowsDeviceType /*device*/) {
838   RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
839   return -1;
840 }
841 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])842 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
843                                           char name[kAdmMaxDeviceNameSize],
844                                           char guid[kAdmMaxGuidSize]) {
845   const uint16_t nDevices(PlayoutDevices());
846 
847   if ((index > (nDevices - 1)) || (name == NULL)) {
848     return -1;
849   }
850 
851   memset(name, 0, kAdmMaxDeviceNameSize);
852 
853   if (guid != NULL) {
854     memset(guid, 0, kAdmMaxGuidSize);
855   }
856 
857   return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
858 }
859 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])860 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
861                                             char name[kAdmMaxDeviceNameSize],
862                                             char guid[kAdmMaxGuidSize]) {
863   const uint16_t nDevices(RecordingDevices());
864 
865   if ((index > (nDevices - 1)) || (name == NULL)) {
866     return -1;
867   }
868 
869   memset(name, 0, kAdmMaxDeviceNameSize);
870 
871   if (guid != NULL) {
872     memset(guid, 0, kAdmMaxGuidSize);
873   }
874 
875   return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
876 }
877 
RecordingDevices()878 int16_t AudioDeviceMac::RecordingDevices() {
879   AudioDeviceID recDevices[MaxNumberDevices];
880   return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
881                           MaxNumberDevices);
882 }
883 
SetRecordingDevice(uint16_t index)884 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
885   if (_recIsInitialized) {
886     return -1;
887   }
888 
889   AudioDeviceID recDevices[MaxNumberDevices];
890   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
891                                        recDevices, MaxNumberDevices);
892   RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is "
893                       << nDevices;
894 
895   if (index > (nDevices - 1)) {
896     RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
897                       << "]";
898     return -1;
899   }
900 
901   _inputDeviceIndex = index;
902   _inputDeviceIsSpecified = true;
903 
904   return 0;
905 }
906 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)907 int32_t AudioDeviceMac::SetRecordingDevice(
908     AudioDeviceModule::WindowsDeviceType /*device*/) {
909   RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
910   return -1;
911 }
912 
PlayoutIsAvailable(bool & available)913 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
914   available = true;
915 
916   // Try to initialize the playout side
917   if (InitPlayout() == -1) {
918     available = false;
919   }
920 
921   // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
922   // We must actually start playout here in order to have the IOProc
923   // deleted by calling StopPlayout().
924   if (StartPlayout() == -1) {
925     available = false;
926   }
927 
928   // Cancel effect of initialization
929   if (StopPlayout() == -1) {
930     available = false;
931   }
932 
933   return 0;
934 }
935 
RecordingIsAvailable(bool & available)936 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
937   available = true;
938 
939   // Try to initialize the recording side
940   if (InitRecording() == -1) {
941     available = false;
942   }
943 
944   // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
945   // We must actually start recording here in order to have the IOProc
946   // deleted by calling StopRecording().
947   if (StartRecording() == -1) {
948     available = false;
949   }
950 
951   // Cancel effect of initialization
952   if (StopRecording() == -1) {
953     available = false;
954   }
955 
956   return 0;
957 }
958 
InitPlayout()959 int32_t AudioDeviceMac::InitPlayout() {
960   RTC_LOG(LS_INFO) << "InitPlayout";
961   MutexLock lock(&mutex_);
962 
963   if (_playing) {
964     return -1;
965   }
966 
967   if (!_outputDeviceIsSpecified) {
968     return -1;
969   }
970 
971   if (_playIsInitialized) {
972     return 0;
973   }
974 
975   // Initialize the speaker (devices might have been added or removed)
976   if (InitSpeakerLocked() == -1) {
977     RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
978   }
979 
980   if (!MicrophoneIsInitialized()) {
981     // Make this call to check if we are using
982     // one or two devices (_twoDevices)
983     bool available = false;
984     if (MicrophoneIsAvailableLocked(available) == -1) {
985       RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed";
986     }
987   }
988 
989   PaUtil_FlushRingBuffer(_paRenderBuffer);
990 
991   OSStatus err = noErr;
992   UInt32 size = 0;
993   _renderDelayOffsetSamples = 0;
994   _renderDelayUs = 0;
995   _renderLatencyUs = 0;
996   _renderDeviceIsAlive = 1;
997   _doStop = false;
998 
999   // The internal microphone of a MacBook Pro is located under the left speaker
1000   // grille. When the internal speakers are in use, we want to fully stereo
1001   // pan to the right.
1002   AudioObjectPropertyAddress propertyAddress = {
1003       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
1004   if (_macBookPro) {
1005     _macBookProPanRight = false;
1006     Boolean hasProperty =
1007         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
1008     if (hasProperty) {
1009       UInt32 dataSource = 0;
1010       size = sizeof(dataSource);
1011       WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
1012           _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
1013 
1014       if (dataSource == 'ispk') {
1015         _macBookProPanRight = true;
1016         RTC_LOG(LS_VERBOSE)
1017             << "MacBook Pro using internal speakers; stereo panning right";
1018       } else {
1019         RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
1020       }
1021 
1022       // Add a listener to determine if the status changes.
1023       WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1024           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1025     }
1026   }
1027 
1028   // Get current stream description
1029   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1030   memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
1031   size = sizeof(_outStreamFormat);
1032   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1033       _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
1034 
1035   if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1036     logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
1037              (const char*)&_outStreamFormat.mFormatID);
1038     return -1;
1039   }
1040 
1041   if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1042     RTC_LOG(LS_ERROR)
1043         << "Too many channels on output device (mChannelsPerFrame = "
1044         << _outStreamFormat.mChannelsPerFrame << ")";
1045     return -1;
1046   }
1047 
1048   if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
1049     RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported."
1050                          "AudioHardware streams should not have this format.";
1051     return -1;
1052   }
1053 
1054   RTC_LOG(LS_VERBOSE) << "Ouput stream format:";
1055   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate
1056                       << ", mChannelsPerFrame = "
1057                       << _outStreamFormat.mChannelsPerFrame;
1058   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = "
1059                       << _outStreamFormat.mBytesPerPacket
1060                       << ", mFramesPerPacket = "
1061                       << _outStreamFormat.mFramesPerPacket;
1062   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame
1063                       << ", mBitsPerChannel = "
1064                       << _outStreamFormat.mBitsPerChannel;
1065   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags;
1066   logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1067            (const char*)&_outStreamFormat.mFormatID);
1068 
1069   // Our preferred format to work with.
1070   if (_outStreamFormat.mChannelsPerFrame < 2) {
1071     // Disable stereo playout when we only have one channel on the device.
1072     _playChannels = 1;
1073     RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
1074   }
1075   WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
1076 
1077   // Listen for format changes.
1078   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1079   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1080       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1081 
1082   // Listen for processor overloads.
1083   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1084   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1085       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1086 
1087   if (_twoDevices || !_recIsInitialized) {
1088     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1089         _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1090   }
1091 
1092   _playIsInitialized = true;
1093 
1094   return 0;
1095 }
1096 
InitRecording()1097 int32_t AudioDeviceMac::InitRecording() {
1098   RTC_LOG(LS_INFO) << "InitRecording";
1099   MutexLock lock(&mutex_);
1100 
1101   if (_recording) {
1102     return -1;
1103   }
1104 
1105   if (!_inputDeviceIsSpecified) {
1106     return -1;
1107   }
1108 
1109   if (_recIsInitialized) {
1110     return 0;
1111   }
1112 
1113   // Initialize the microphone (devices might have been added or removed)
1114   if (InitMicrophoneLocked() == -1) {
1115     RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
1116   }
1117 
1118   if (!SpeakerIsInitialized()) {
1119     // Make this call to check if we are using
1120     // one or two devices (_twoDevices)
1121     bool available = false;
1122     if (SpeakerIsAvailableLocked(available) == -1) {
1123       RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed";
1124     }
1125   }
1126 
1127   OSStatus err = noErr;
1128   UInt32 size = 0;
1129 
1130   PaUtil_FlushRingBuffer(_paCaptureBuffer);
1131 
1132   _captureDelayUs = 0;
1133   _captureLatencyUs = 0;
1134   _captureDeviceIsAlive = 1;
1135   _doStopRec = false;
1136 
1137   // Get current stream description
1138   AudioObjectPropertyAddress propertyAddress = {
1139       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1140   memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
1141   size = sizeof(_inStreamFormat);
1142   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1143       _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
1144 
1145   if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1146     logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
1147              (const char*)&_inStreamFormat.mFormatID);
1148     return -1;
1149   }
1150 
1151   if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1152     RTC_LOG(LS_ERROR)
1153         << "Too many channels on input device (mChannelsPerFrame = "
1154         << _inStreamFormat.mChannelsPerFrame << ")";
1155     return -1;
1156   }
1157 
1158   const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
1159                                     _inStreamFormat.mSampleRate / 100 *
1160                                     N_BLOCKS_IO;
1161   if (io_block_size_samples > _captureBufSizeSamples) {
1162     RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
1163                       << ") is larger than ring buffer ("
1164                       << _captureBufSizeSamples << ")";
1165     return -1;
1166   }
1167 
1168   RTC_LOG(LS_VERBOSE) << "Input stream format:";
1169   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate
1170                       << ", mChannelsPerFrame = "
1171                       << _inStreamFormat.mChannelsPerFrame;
1172   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket
1173                       << ", mFramesPerPacket = "
1174                       << _inStreamFormat.mFramesPerPacket;
1175   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame
1176                       << ", mBitsPerChannel = "
1177                       << _inStreamFormat.mBitsPerChannel;
1178   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags;
1179   logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1180            (const char*)&_inStreamFormat.mFormatID);
1181 
1182   // Our preferred format to work with
1183   if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
1184     _inDesiredFormat.mChannelsPerFrame = 2;
1185   } else {
1186     // Disable stereo recording when we only have one channel on the device.
1187     _inDesiredFormat.mChannelsPerFrame = 1;
1188     _recChannels = 1;
1189     RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
1190   }
1191 
1192   if (_ptrAudioBuffer) {
1193     // Update audio buffer with the selected parameters
1194     _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
1195     _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
1196   }
1197 
1198   _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
1199   _inDesiredFormat.mBytesPerPacket =
1200       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1201   _inDesiredFormat.mFramesPerPacket = 1;
1202   _inDesiredFormat.mBytesPerFrame =
1203       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1204   _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1205 
1206   _inDesiredFormat.mFormatFlags =
1207       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1208 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1209   _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1210 #endif
1211   _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1212 
1213   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
1214                                             &_captureConverter));
1215 
1216   // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
1217   // TODO(xians): investigate this block.
1218   UInt32 bufByteCount =
1219       (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
1220                _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
1221   if (_inStreamFormat.mFramesPerPacket != 0) {
1222     if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
1223       bufByteCount =
1224           ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
1225           _inStreamFormat.mFramesPerPacket;
1226     }
1227   }
1228 
1229   // Ensure the buffer size is within the acceptable range provided by the
1230   // device.
1231   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1232   AudioValueRange range;
1233   size = sizeof(range);
1234   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1235       _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1236   if (range.mMinimum > bufByteCount) {
1237     bufByteCount = range.mMinimum;
1238   } else if (range.mMaximum < bufByteCount) {
1239     bufByteCount = range.mMaximum;
1240   }
1241 
1242   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1243   size = sizeof(bufByteCount);
1244   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1245       _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1246 
1247   // Get capture device latency
1248   propertyAddress.mSelector = kAudioDevicePropertyLatency;
1249   UInt32 latency = 0;
1250   size = sizeof(UInt32);
1251   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1252       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1253   _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1254 
1255   // Get capture stream latency
1256   propertyAddress.mSelector = kAudioDevicePropertyStreams;
1257   AudioStreamID stream = 0;
1258   size = sizeof(AudioStreamID);
1259   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1260       _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1261   propertyAddress.mSelector = kAudioStreamPropertyLatency;
1262   size = sizeof(UInt32);
1263   latency = 0;
1264   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1265       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1266   _captureLatencyUs +=
1267       (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1268 
1269   // Listen for format changes
1270   // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
1271   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1272   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1273       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1274 
1275   // Listen for processor overloads
1276   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1277   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1278       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1279 
1280   if (_twoDevices) {
1281     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1282         _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
1283   } else if (!_playIsInitialized) {
1284     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1285         _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1286   }
1287 
1288   // Mark recording side as initialized
1289   _recIsInitialized = true;
1290 
1291   return 0;
1292 }
1293 
StartRecording()1294 int32_t AudioDeviceMac::StartRecording() {
1295   RTC_LOG(LS_INFO) << "StartRecording";
1296   MutexLock lock(&mutex_);
1297 
1298   if (!_recIsInitialized) {
1299     return -1;
1300   }
1301 
1302   if (_recording) {
1303     return 0;
1304   }
1305 
1306   if (!_initialized) {
1307     RTC_LOG(LS_ERROR) << "Recording worker thread has not been started";
1308     return -1;
1309   }
1310 
1311   RTC_DCHECK(!capture_worker_thread_.get());
1312   capture_worker_thread_.reset(new rtc::PlatformThread(
1313       RunCapture, this, "CaptureWorkerThread", rtc::kRealtimePriority));
1314   RTC_DCHECK(capture_worker_thread_.get());
1315   capture_worker_thread_->Start();
1316 
1317   OSStatus err = noErr;
1318   if (_twoDevices) {
1319     WEBRTC_CA_RETURN_ON_ERR(
1320         AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
1321   } else if (!_playing) {
1322     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
1323   }
1324 
1325   _recording = true;
1326 
1327   return 0;
1328 }
1329 
StopRecording()1330 int32_t AudioDeviceMac::StopRecording() {
1331   RTC_LOG(LS_INFO) << "StopRecording";
1332   MutexLock lock(&mutex_);
1333 
1334   if (!_recIsInitialized) {
1335     return 0;
1336   }
1337 
1338   OSStatus err = noErr;
1339   int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
1340   if (_twoDevices && captureDeviceIsAlive == 1) {
1341     // Recording side uses its own dedicated device and IOProc.
1342     if (_recording) {
1343       _recording = false;
1344       _doStopRec = true;  // Signal to io proc to stop audio device
1345       mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1346       if (!_stopEventRec.Wait(2000)) {
1347         MutexLock lockScoped(&mutex_);
1348         RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
1349                                "We may have failed to detect a device removal.";
1350         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
1351         WEBRTC_CA_LOG_WARN(
1352             AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1353       }
1354       mutex_.Lock();
1355       _doStopRec = false;
1356       RTC_LOG(LS_INFO) << "Recording stopped (input device)";
1357     } else if (_recIsInitialized) {
1358       WEBRTC_CA_LOG_WARN(
1359           AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1360       RTC_LOG(LS_INFO) << "Recording uninitialized (input device)";
1361     }
1362   } else {
1363     // We signal a stop for a shared device even when rendering has
1364     // not yet ended. This is to ensure the IOProc will return early as
1365     // intended (by checking |_recording|) before accessing
1366     // resources we free below (e.g. the capture converter).
1367     //
1368     // In the case of a shared devcie, the IOProc will verify
1369     // rendering has ended before stopping itself.
1370     if (_recording && captureDeviceIsAlive == 1) {
1371       _recording = false;
1372       _doStop = true;     // Signal to io proc to stop audio device
1373       mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1374       if (!_stopEvent.Wait(2000)) {
1375         MutexLock lockScoped(&mutex_);
1376         RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
1377                                "We may have failed to detect a device removal.";
1378         // We assume rendering on a shared device has stopped as well if
1379         // the IOProc times out.
1380         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1381         WEBRTC_CA_LOG_WARN(
1382             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1383       }
1384       mutex_.Lock();
1385       _doStop = false;
1386       RTC_LOG(LS_INFO) << "Recording stopped (shared device)";
1387     } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
1388       WEBRTC_CA_LOG_WARN(
1389           AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1390       RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)";
1391     }
1392   }
1393 
1394   // Setting this signal will allow the worker thread to be stopped.
1395   AtomicSet32(&_captureDeviceIsAlive, 0);
1396 
1397   if (capture_worker_thread_.get()) {
1398     mutex_.Unlock();
1399     capture_worker_thread_->Stop();
1400     capture_worker_thread_.reset();
1401     mutex_.Lock();
1402   }
1403 
1404   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
1405 
1406   // Remove listeners.
1407   AudioObjectPropertyAddress propertyAddress = {
1408       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1409   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1410       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1411 
1412   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1413   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1414       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1415 
1416   _recIsInitialized = false;
1417   _recording = false;
1418 
1419   return 0;
1420 }
1421 
RecordingIsInitialized() const1422 bool AudioDeviceMac::RecordingIsInitialized() const {
1423   return (_recIsInitialized);
1424 }
1425 
Recording() const1426 bool AudioDeviceMac::Recording() const {
1427   return (_recording);
1428 }
1429 
PlayoutIsInitialized() const1430 bool AudioDeviceMac::PlayoutIsInitialized() const {
1431   return (_playIsInitialized);
1432 }
1433 
StartPlayout()1434 int32_t AudioDeviceMac::StartPlayout() {
1435   RTC_LOG(LS_INFO) << "StartPlayout";
1436   MutexLock lock(&mutex_);
1437 
1438   if (!_playIsInitialized) {
1439     return -1;
1440   }
1441 
1442   if (_playing) {
1443     return 0;
1444   }
1445 
1446   RTC_DCHECK(!render_worker_thread_.get());
1447   render_worker_thread_.reset(new rtc::PlatformThread(
1448       RunRender, this, "RenderWorkerThread", rtc::kRealtimePriority));
1449   render_worker_thread_->Start();
1450 
1451   if (_twoDevices || !_recording) {
1452     OSStatus err = noErr;
1453     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
1454   }
1455   _playing = true;
1456 
1457   return 0;
1458 }
1459 
StopPlayout()1460 int32_t AudioDeviceMac::StopPlayout() {
1461   RTC_LOG(LS_INFO) << "StopPlayout";
1462   MutexLock lock(&mutex_);
1463 
1464   if (!_playIsInitialized) {
1465     return 0;
1466   }
1467 
1468   OSStatus err = noErr;
1469   int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
1470   if (_playing && renderDeviceIsAlive == 1) {
1471     // We signal a stop for a shared device even when capturing has not
1472     // yet ended. This is to ensure the IOProc will return early as
1473     // intended (by checking |_playing|) before accessing resources we
1474     // free below (e.g. the render converter).
1475     //
1476     // In the case of a shared device, the IOProc will verify capturing
1477     // has ended before stopping itself.
1478     _playing = false;
1479     _doStop = true;     // Signal to io proc to stop audio device
1480     mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1481     if (!_stopEvent.Wait(2000)) {
1482       MutexLock lockScoped(&mutex_);
1483       RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc."
1484                              "We may have failed to detect a device removal.";
1485 
1486       // We assume capturing on a shared device has stopped as well if the
1487       // IOProc times out.
1488       WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1489       WEBRTC_CA_LOG_WARN(
1490           AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1491     }
1492     mutex_.Lock();
1493     _doStop = false;
1494     RTC_LOG(LS_INFO) << "Playout stopped";
1495   } else if (_twoDevices && _playIsInitialized) {
1496     WEBRTC_CA_LOG_WARN(
1497         AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1498     RTC_LOG(LS_INFO) << "Playout uninitialized (output device)";
1499   } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
1500     WEBRTC_CA_LOG_WARN(
1501         AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1502     RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)";
1503   }
1504 
1505   // Setting this signal will allow the worker thread to be stopped.
1506   AtomicSet32(&_renderDeviceIsAlive, 0);
1507   if (render_worker_thread_.get()) {
1508     mutex_.Unlock();
1509     render_worker_thread_->Stop();
1510     render_worker_thread_.reset();
1511     mutex_.Lock();
1512   }
1513 
1514   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
1515 
1516   // Remove listeners.
1517   AudioObjectPropertyAddress propertyAddress = {
1518       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
1519   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1520       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1521 
1522   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1523   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1524       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1525 
1526   if (_macBookPro) {
1527     Boolean hasProperty =
1528         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
1529     if (hasProperty) {
1530       propertyAddress.mSelector = kAudioDevicePropertyDataSource;
1531       WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1532           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1533     }
1534   }
1535 
1536   _playIsInitialized = false;
1537   _playing = false;
1538 
1539   return 0;
1540 }
1541 
PlayoutDelay(uint16_t & delayMS) const1542 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
1543   int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
1544   delayMS =
1545       static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
1546   return 0;
1547 }
1548 
Playing() const1549 bool AudioDeviceMac::Playing() const {
1550   return (_playing);
1551 }
1552 
1553 // ============================================================================
1554 //                                 Private Methods
1555 // ============================================================================
1556 
GetNumberDevices(const AudioObjectPropertyScope scope,AudioDeviceID scopedDeviceIds[],const uint32_t deviceListLength)1557 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
1558                                          AudioDeviceID scopedDeviceIds[],
1559                                          const uint32_t deviceListLength) {
1560   OSStatus err = noErr;
1561 
1562   AudioObjectPropertyAddress propertyAddress = {
1563       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
1564       kAudioObjectPropertyElementMaster};
1565   UInt32 size = 0;
1566   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
1567       kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
1568   if (size == 0) {
1569     RTC_LOG(LS_WARNING) << "No devices";
1570     return 0;
1571   }
1572 
1573   UInt32 numberDevices = size / sizeof(AudioDeviceID);
1574   const auto deviceIds = std::make_unique<AudioDeviceID[]>(numberDevices);
1575   AudioBufferList* bufferList = NULL;
1576   UInt32 numberScopedDevices = 0;
1577 
1578   // First check if there is a default device and list it
1579   UInt32 hardwareProperty = 0;
1580   if (scope == kAudioDevicePropertyScopeOutput) {
1581     hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1582   } else {
1583     hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1584   }
1585 
1586   AudioObjectPropertyAddress propertyAddressDefault = {
1587       hardwareProperty, kAudioObjectPropertyScopeGlobal,
1588       kAudioObjectPropertyElementMaster};
1589 
1590   AudioDeviceID usedID;
1591   UInt32 uintSize = sizeof(UInt32);
1592   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1593                                                      &propertyAddressDefault, 0,
1594                                                      NULL, &uintSize, &usedID));
1595   if (usedID != kAudioDeviceUnknown) {
1596     scopedDeviceIds[numberScopedDevices] = usedID;
1597     numberScopedDevices++;
1598   } else {
1599     RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown";
1600   }
1601 
1602   // Then list the rest of the devices
1603   bool listOK = true;
1604 
1605   WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1606                                                &propertyAddress, 0, NULL, &size,
1607                                                deviceIds.get()));
1608   if (err != noErr) {
1609     listOK = false;
1610   } else {
1611     propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
1612     propertyAddress.mScope = scope;
1613     propertyAddress.mElement = 0;
1614     for (UInt32 i = 0; i < numberDevices; i++) {
1615       // Check for input channels
1616       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
1617           deviceIds[i], &propertyAddress, 0, NULL, &size));
1618       if (err == kAudioHardwareBadDeviceError) {
1619         // This device doesn't actually exist; continue iterating.
1620         continue;
1621       } else if (err != noErr) {
1622         listOK = false;
1623         break;
1624       }
1625 
1626       bufferList = (AudioBufferList*)malloc(size);
1627       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
1628           deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
1629       if (err != noErr) {
1630         listOK = false;
1631         break;
1632       }
1633 
1634       if (bufferList->mNumberBuffers > 0) {
1635         if (numberScopedDevices >= deviceListLength) {
1636           RTC_LOG(LS_ERROR) << "Device list is not long enough";
1637           listOK = false;
1638           break;
1639         }
1640 
1641         scopedDeviceIds[numberScopedDevices] = deviceIds[i];
1642         numberScopedDevices++;
1643       }
1644 
1645       free(bufferList);
1646       bufferList = NULL;
1647     }  // for
1648   }
1649 
1650   if (!listOK) {
1651     if (bufferList) {
1652       free(bufferList);
1653       bufferList = NULL;
1654     }
1655     return -1;
1656   }
1657 
1658   return numberScopedDevices;
1659 }
1660 
GetDeviceName(const AudioObjectPropertyScope scope,const uint16_t index,char * name)1661 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
1662                                       const uint16_t index,
1663                                       char* name) {
1664   OSStatus err = noErr;
1665   UInt32 len = kAdmMaxDeviceNameSize;
1666   AudioDeviceID deviceIds[MaxNumberDevices];
1667 
1668   int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
1669   if (numberDevices < 0) {
1670     return -1;
1671   } else if (numberDevices == 0) {
1672     RTC_LOG(LS_ERROR) << "No devices";
1673     return -1;
1674   }
1675 
1676   // If the number is below the number of devices, assume it's "WEBRTC ID"
1677   // otherwise assume it's a CoreAudio ID
1678   AudioDeviceID usedID;
1679 
1680   // Check if there is a default device
1681   bool isDefaultDevice = false;
1682   if (index == 0) {
1683     UInt32 hardwareProperty = 0;
1684     if (scope == kAudioDevicePropertyScopeOutput) {
1685       hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1686     } else {
1687       hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1688     }
1689     AudioObjectPropertyAddress propertyAddress = {
1690         hardwareProperty, kAudioObjectPropertyScopeGlobal,
1691         kAudioObjectPropertyElementMaster};
1692     UInt32 size = sizeof(UInt32);
1693     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1694         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
1695     if (usedID == kAudioDeviceUnknown) {
1696       RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown";
1697     } else {
1698       isDefaultDevice = true;
1699     }
1700   }
1701 
1702   AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
1703                                                 scope, 0};
1704 
1705   if (isDefaultDevice) {
1706     char devName[len];
1707 
1708     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
1709                                                        0, NULL, &len, devName));
1710 
1711     sprintf(name, "default (%s)", devName);
1712   } else {
1713     if (index < numberDevices) {
1714       usedID = deviceIds[index];
1715     } else {
1716       usedID = index;
1717     }
1718 
1719     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
1720                                                        0, NULL, &len, name));
1721   }
1722 
1723   return 0;
1724 }
1725 
InitDevice(const uint16_t userDeviceIndex,AudioDeviceID & deviceId,const bool isInput)1726 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
1727                                    AudioDeviceID& deviceId,
1728                                    const bool isInput) {
1729   OSStatus err = noErr;
1730   UInt32 size = 0;
1731   AudioObjectPropertyScope deviceScope;
1732   AudioObjectPropertySelector defaultDeviceSelector;
1733   AudioDeviceID deviceIds[MaxNumberDevices];
1734 
1735   if (isInput) {
1736     deviceScope = kAudioDevicePropertyScopeInput;
1737     defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
1738   } else {
1739     deviceScope = kAudioDevicePropertyScopeOutput;
1740     defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
1741   }
1742 
1743   AudioObjectPropertyAddress propertyAddress = {
1744       defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
1745       kAudioObjectPropertyElementMaster};
1746 
1747   // Get the actual device IDs
1748   int numberDevices =
1749       GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
1750   if (numberDevices < 0) {
1751     return -1;
1752   } else if (numberDevices == 0) {
1753     RTC_LOG(LS_ERROR) << "InitDevice(): No devices";
1754     return -1;
1755   }
1756 
1757   bool isDefaultDevice = false;
1758   deviceId = kAudioDeviceUnknown;
1759   if (userDeviceIndex == 0) {
1760     // Try to use default system device
1761     size = sizeof(AudioDeviceID);
1762     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1763         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
1764     if (deviceId == kAudioDeviceUnknown) {
1765       RTC_LOG(LS_WARNING) << "No default device exists";
1766     } else {
1767       isDefaultDevice = true;
1768     }
1769   }
1770 
1771   if (!isDefaultDevice) {
1772     deviceId = deviceIds[userDeviceIndex];
1773   }
1774 
1775   // Obtain device name and manufacturer for logging.
1776   // Also use this as a test to ensure a user-set device ID is valid.
1777   char devName[128];
1778   char devManf[128];
1779   memset(devName, 0, sizeof(devName));
1780   memset(devManf, 0, sizeof(devManf));
1781 
1782   propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
1783   propertyAddress.mScope = deviceScope;
1784   propertyAddress.mElement = 0;
1785   size = sizeof(devName);
1786   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1787                                                      0, NULL, &size, devName));
1788 
1789   propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
1790   size = sizeof(devManf);
1791   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1792                                                      0, NULL, &size, devManf));
1793 
1794   if (isInput) {
1795     RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName;
1796   } else {
1797     RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName;
1798   }
1799 
1800   return 0;
1801 }
1802 
SetDesiredPlayoutFormat()1803 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
1804   // Our preferred format to work with.
1805   _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
1806   _outDesiredFormat.mChannelsPerFrame = _playChannels;
1807 
1808   if (_ptrAudioBuffer) {
1809     // Update audio buffer with the selected parameters.
1810     _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
1811     _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
1812   }
1813 
1814   _renderDelayOffsetSamples =
1815       _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
1816                                   _outDesiredFormat.mChannelsPerFrame;
1817 
1818   _outDesiredFormat.mBytesPerPacket =
1819       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1820   // In uncompressed audio, a packet is one frame.
1821   _outDesiredFormat.mFramesPerPacket = 1;
1822   _outDesiredFormat.mBytesPerFrame =
1823       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1824   _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1825 
1826   _outDesiredFormat.mFormatFlags =
1827       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1828 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1829   _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1830 #endif
1831   _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1832 
1833   OSStatus err = noErr;
1834   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
1835       &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
1836 
1837   // Try to set buffer size to desired value set to 20ms.
1838   const uint16_t kPlayBufDelayFixed = 20;
1839   UInt32 bufByteCount = static_cast<UInt32>(
1840       (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed *
1841       _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
1842   if (_outStreamFormat.mFramesPerPacket != 0) {
1843     if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
1844       bufByteCount = (static_cast<UInt32>(bufByteCount /
1845                                           _outStreamFormat.mFramesPerPacket) +
1846                       1) *
1847                      _outStreamFormat.mFramesPerPacket;
1848     }
1849   }
1850 
1851   // Ensure the buffer size is within the range provided by the device.
1852   AudioObjectPropertyAddress propertyAddress = {
1853       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
1854   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1855   AudioValueRange range;
1856   UInt32 size = sizeof(range);
1857   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1858       _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1859   if (range.mMinimum > bufByteCount) {
1860     bufByteCount = range.mMinimum;
1861   } else if (range.mMaximum < bufByteCount) {
1862     bufByteCount = range.mMaximum;
1863   }
1864 
1865   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1866   size = sizeof(bufByteCount);
1867   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1868       _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1869 
1870   // Get render device latency.
1871   propertyAddress.mSelector = kAudioDevicePropertyLatency;
1872   UInt32 latency = 0;
1873   size = sizeof(UInt32);
1874   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1875       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1876   _renderLatencyUs =
1877       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1878 
1879   // Get render stream latency.
1880   propertyAddress.mSelector = kAudioDevicePropertyStreams;
1881   AudioStreamID stream = 0;
1882   size = sizeof(AudioStreamID);
1883   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1884       _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1885   propertyAddress.mSelector = kAudioStreamPropertyLatency;
1886   size = sizeof(UInt32);
1887   latency = 0;
1888   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1889       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1890   _renderLatencyUs +=
1891       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1892 
1893   RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
1894                       << _renderDelayOffsetSamples
1895                       << ", _renderDelayUs=" << _renderDelayUs
1896                       << ", _renderLatencyUs=" << _renderLatencyUs;
1897   return 0;
1898 }
1899 
objectListenerProc(AudioObjectID objectId,UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[],void * clientData)1900 OSStatus AudioDeviceMac::objectListenerProc(
1901     AudioObjectID objectId,
1902     UInt32 numberAddresses,
1903     const AudioObjectPropertyAddress addresses[],
1904     void* clientData) {
1905   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
1906   RTC_DCHECK(ptrThis != NULL);
1907 
1908   ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
1909 
1910   // AudioObjectPropertyListenerProc functions are supposed to return 0
1911   return 0;
1912 }
1913 
implObjectListenerProc(const AudioObjectID objectId,const UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[])1914 OSStatus AudioDeviceMac::implObjectListenerProc(
1915     const AudioObjectID objectId,
1916     const UInt32 numberAddresses,
1917     const AudioObjectPropertyAddress addresses[]) {
1918   RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()";
1919 
1920   for (UInt32 i = 0; i < numberAddresses; i++) {
1921     if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
1922       HandleDeviceChange();
1923     } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
1924       HandleStreamFormatChange(objectId, addresses[i]);
1925     } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
1926       HandleDataSourceChange(objectId, addresses[i]);
1927     } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
1928       HandleProcessorOverload(addresses[i]);
1929     }
1930   }
1931 
1932   return 0;
1933 }
1934 
HandleDeviceChange()1935 int32_t AudioDeviceMac::HandleDeviceChange() {
1936   OSStatus err = noErr;
1937 
1938   RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices";
1939 
1940   // A device has changed. Check if our registered devices have been removed.
1941   // Ensure the devices have been initialized, meaning the IDs are valid.
1942   if (MicrophoneIsInitialized()) {
1943     AudioObjectPropertyAddress propertyAddress = {
1944         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
1945     UInt32 deviceIsAlive = 1;
1946     UInt32 size = sizeof(UInt32);
1947     err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
1948                                      &size, &deviceIsAlive);
1949 
1950     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1951       RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)";
1952       AtomicSet32(&_captureDeviceIsAlive, 0);
1953       _mixerManager.CloseMicrophone();
1954     } else if (err != noErr) {
1955       logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1956                (const char*)&err);
1957       return -1;
1958     }
1959   }
1960 
1961   if (SpeakerIsInitialized()) {
1962     AudioObjectPropertyAddress propertyAddress = {
1963         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
1964     UInt32 deviceIsAlive = 1;
1965     UInt32 size = sizeof(UInt32);
1966     err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
1967                                      &size, &deviceIsAlive);
1968 
1969     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1970       RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)";
1971       AtomicSet32(&_renderDeviceIsAlive, 0);
1972       _mixerManager.CloseSpeaker();
1973     } else if (err != noErr) {
1974       logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1975                (const char*)&err);
1976       return -1;
1977     }
1978   }
1979 
1980   return 0;
1981 }
1982 
HandleStreamFormatChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)1983 int32_t AudioDeviceMac::HandleStreamFormatChange(
1984     const AudioObjectID objectId,
1985     const AudioObjectPropertyAddress propertyAddress) {
1986   OSStatus err = noErr;
1987 
1988   RTC_LOG(LS_VERBOSE) << "Stream format changed";
1989 
1990   if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
1991     return 0;
1992   }
1993 
1994   // Get the new device format
1995   AudioStreamBasicDescription streamFormat;
1996   UInt32 size = sizeof(streamFormat);
1997   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1998       objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
1999 
2000   if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
2001     logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
2002              (const char*)&streamFormat.mFormatID);
2003     return -1;
2004   }
2005 
2006   if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
2007     RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = "
2008                       << streamFormat.mChannelsPerFrame << ")";
2009     return -1;
2010   }
2011 
2012   if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) {
2013     RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = "
2014                       << streamFormat.mChannelsPerFrame << ")";
2015     return -1;
2016   }
2017 
2018   RTC_LOG(LS_VERBOSE) << "Stream format:";
2019   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate
2020                       << ", mChannelsPerFrame = "
2021                       << streamFormat.mChannelsPerFrame;
2022   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket
2023                       << ", mFramesPerPacket = "
2024                       << streamFormat.mFramesPerPacket;
2025   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
2026                       << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
2027   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
2028   logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
2029 
2030   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2031     const int io_block_size_samples = streamFormat.mChannelsPerFrame *
2032                                       streamFormat.mSampleRate / 100 *
2033                                       N_BLOCKS_IO;
2034     if (io_block_size_samples > _captureBufSizeSamples) {
2035       RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
2036                         << ") is larger than ring buffer ("
2037                         << _captureBufSizeSamples << ")";
2038       return -1;
2039     }
2040 
2041     memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
2042 
2043     if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
2044       _inDesiredFormat.mChannelsPerFrame = 2;
2045     } else {
2046       // Disable stereo recording when we only have one channel on the device.
2047       _inDesiredFormat.mChannelsPerFrame = 1;
2048       _recChannels = 1;
2049       RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
2050     }
2051 
2052     // Recreate the converter with the new format
2053     // TODO(xians): make this thread safe
2054     WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
2055 
2056     WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
2057                                               &_captureConverter));
2058   } else {
2059     memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
2060 
2061     // Our preferred format to work with
2062     if (_outStreamFormat.mChannelsPerFrame < 2) {
2063       _playChannels = 1;
2064       RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
2065     }
2066     WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
2067   }
2068   return 0;
2069 }
2070 
HandleDataSourceChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)2071 int32_t AudioDeviceMac::HandleDataSourceChange(
2072     const AudioObjectID objectId,
2073     const AudioObjectPropertyAddress propertyAddress) {
2074   OSStatus err = noErr;
2075 
2076   if (_macBookPro &&
2077       propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
2078     RTC_LOG(LS_VERBOSE) << "Data source changed";
2079 
2080     _macBookProPanRight = false;
2081     UInt32 dataSource = 0;
2082     UInt32 size = sizeof(UInt32);
2083     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
2084         objectId, &propertyAddress, 0, NULL, &size, &dataSource));
2085     if (dataSource == 'ispk') {
2086       _macBookProPanRight = true;
2087       RTC_LOG(LS_VERBOSE)
2088           << "MacBook Pro using internal speakers; stereo panning right";
2089     } else {
2090       RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
2091     }
2092   }
2093 
2094   return 0;
2095 }
HandleProcessorOverload(const AudioObjectPropertyAddress propertyAddress)2096 int32_t AudioDeviceMac::HandleProcessorOverload(
2097     const AudioObjectPropertyAddress propertyAddress) {
2098   // TODO(xians): we probably want to notify the user in some way of the
2099   // overload. However, the Windows interpretations of these errors seem to
2100   // be more severe than what ProcessorOverload is thrown for.
2101   //
2102   // We don't log the notification, as it's sent from the HAL's IO thread. We
2103   // don't want to slow it down even further.
2104   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2105     // RTC_LOG(LS_WARNING) << "Capture processor // overload";
2106     //_callback->ProblemIsReported(
2107     // SndCardStreamObserver::ERecordingProblem);
2108   } else {
2109     // RTC_LOG(LS_WARNING) << "Render processor overload";
2110     //_callback->ProblemIsReported(
2111     // SndCardStreamObserver::EPlaybackProblem);
2112   }
2113 
2114   return 0;
2115 }
2116 
2117 // ============================================================================
2118 //                                  Thread Methods
2119 // ============================================================================
2120 
deviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime,void * clientData)2121 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
2122                                       const AudioTimeStamp*,
2123                                       const AudioBufferList* inputData,
2124                                       const AudioTimeStamp* inputTime,
2125                                       AudioBufferList* outputData,
2126                                       const AudioTimeStamp* outputTime,
2127                                       void* clientData) {
2128   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2129   RTC_DCHECK(ptrThis != NULL);
2130 
2131   ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
2132 
2133   // AudioDeviceIOProc functions are supposed to return 0
2134   return 0;
2135 }
2136 
outConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2137 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
2138                                           UInt32* numberDataPackets,
2139                                           AudioBufferList* data,
2140                                           AudioStreamPacketDescription**,
2141                                           void* userData) {
2142   AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
2143   RTC_DCHECK(ptrThis != NULL);
2144 
2145   return ptrThis->implOutConverterProc(numberDataPackets, data);
2146 }
2147 
inDeviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList *,const AudioTimeStamp *,void * clientData)2148 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
2149                                         const AudioTimeStamp*,
2150                                         const AudioBufferList* inputData,
2151                                         const AudioTimeStamp* inputTime,
2152                                         AudioBufferList*,
2153                                         const AudioTimeStamp*,
2154                                         void* clientData) {
2155   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2156   RTC_DCHECK(ptrThis != NULL);
2157 
2158   ptrThis->implInDeviceIOProc(inputData, inputTime);
2159 
2160   // AudioDeviceIOProc functions are supposed to return 0
2161   return 0;
2162 }
2163 
inConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2164 OSStatus AudioDeviceMac::inConverterProc(
2165     AudioConverterRef,
2166     UInt32* numberDataPackets,
2167     AudioBufferList* data,
2168     AudioStreamPacketDescription** /*dataPacketDescription*/,
2169     void* userData) {
2170   AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
2171   RTC_DCHECK(ptrThis != NULL);
2172 
2173   return ptrThis->implInConverterProc(numberDataPackets, data);
2174 }
2175 
implDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime)2176 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
2177                                           const AudioTimeStamp* inputTime,
2178                                           AudioBufferList* outputData,
2179                                           const AudioTimeStamp* outputTime) {
2180   OSStatus err = noErr;
2181   UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
2182   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2183 
2184   if (!_twoDevices && _recording) {
2185     implInDeviceIOProc(inputData, inputTime);
2186   }
2187 
2188   // Check if we should close down audio device
2189   // Double-checked locking optimization to remove locking overhead
2190   if (_doStop) {
2191     MutexLock lock(&mutex_);
2192     if (_doStop) {
2193       if (_twoDevices || (!_recording && !_playing)) {
2194         // In the case of a shared device, the single driving ioProc
2195         // is stopped here
2196         WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
2197         WEBRTC_CA_LOG_WARN(
2198             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
2199         if (err == noErr) {
2200           RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped";
2201         }
2202       }
2203 
2204       _doStop = false;
2205       _stopEvent.Set();
2206       return 0;
2207     }
2208   }
2209 
2210   if (!_playing) {
2211     // This can be the case when a shared device is capturing but not
2212     // rendering. We allow the checks above before returning to avoid a
2213     // timeout when capturing is stopped.
2214     return 0;
2215   }
2216 
2217   RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
2218   UInt32 size =
2219       outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
2220 
2221   // TODO(xians): signal an error somehow?
2222   err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
2223                                         this, &size, outputData, NULL);
2224   if (err != noErr) {
2225     if (err == 1) {
2226       // This is our own error.
2227       RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
2228       return 1;
2229     } else {
2230       logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2231                (const char*)&err);
2232       return 1;
2233     }
2234   }
2235 
2236   PaRingBufferSize bufSizeSamples =
2237       PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
2238 
2239   int32_t renderDelayUs =
2240       static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
2241   renderDelayUs += static_cast<int32_t>(
2242       (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
2243           _outDesiredFormat.mSampleRate +
2244       0.5);
2245 
2246   AtomicSet32(&_renderDelayUs, renderDelayUs);
2247 
2248   return 0;
2249 }
2250 
implOutConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2251 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
2252                                               AudioBufferList* data) {
2253   RTC_DCHECK(data->mNumberBuffers == 1);
2254   PaRingBufferSize numSamples =
2255       *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
2256 
2257   data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
2258   // Always give the converter as much as it wants, zero padding as required.
2259   data->mBuffers->mDataByteSize =
2260       *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
2261   data->mBuffers->mData = _renderConvertData;
2262   memset(_renderConvertData, 0, sizeof(_renderConvertData));
2263 
2264   PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
2265 
2266   kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
2267   if (kernErr != KERN_SUCCESS) {
2268     RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2269     return 1;
2270   }
2271 
2272   return 0;
2273 }
2274 
implInDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime)2275 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
2276                                             const AudioTimeStamp* inputTime) {
2277   OSStatus err = noErr;
2278   UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
2279   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2280 
2281   // Check if we should close down audio device
2282   // Double-checked locking optimization to remove locking overhead
2283   if (_doStopRec) {
2284     MutexLock lock(&mutex_);
2285     if (_doStopRec) {
2286       // This will be signalled only when a shared device is not in use.
2287       WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
2288       WEBRTC_CA_LOG_WARN(
2289           AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
2290       if (err == noErr) {
2291         RTC_LOG(LS_VERBOSE) << "Recording device stopped";
2292       }
2293 
2294       _doStopRec = false;
2295       _stopEventRec.Set();
2296       return 0;
2297     }
2298   }
2299 
2300   if (!_recording) {
2301     // Allow above checks to avoid a timeout on stopping capture.
2302     return 0;
2303   }
2304 
2305   PaRingBufferSize bufSizeSamples =
2306       PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
2307 
2308   int32_t captureDelayUs =
2309       static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
2310   captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
2311                                              _inStreamFormat.mChannelsPerFrame /
2312                                              _inStreamFormat.mSampleRate +
2313                                          0.5);
2314 
2315   AtomicSet32(&_captureDelayUs, captureDelayUs);
2316 
2317   RTC_DCHECK(inputData->mNumberBuffers == 1);
2318   PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize *
2319                                 _inStreamFormat.mChannelsPerFrame /
2320                                 _inStreamFormat.mBytesPerPacket;
2321   PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
2322                          numSamples);
2323 
2324   kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
2325   if (kernErr != KERN_SUCCESS) {
2326     RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2327   }
2328 
2329   return err;
2330 }
2331 
implInConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2332 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
2333                                              AudioBufferList* data) {
2334   RTC_DCHECK(data->mNumberBuffers == 1);
2335   PaRingBufferSize numSamples =
2336       *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
2337 
2338   while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
2339     mach_timespec_t timeout;
2340     timeout.tv_sec = 0;
2341     timeout.tv_nsec = TIMER_PERIOD_MS;
2342 
2343     kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
2344     if (kernErr == KERN_OPERATION_TIMED_OUT) {
2345       int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
2346       if (signal == 0) {
2347         // The capture device is no longer alive; stop the worker thread.
2348         *numberDataPackets = 0;
2349         return 1;
2350       }
2351     } else if (kernErr != KERN_SUCCESS) {
2352       RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr;
2353     }
2354   }
2355 
2356   // Pass the read pointer directly to the converter to avoid a memcpy.
2357   void* dummyPtr;
2358   PaRingBufferSize dummySize;
2359   PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
2360                                   &data->mBuffers->mData, &numSamples,
2361                                   &dummyPtr, &dummySize);
2362   PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
2363 
2364   data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
2365   *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
2366   data->mBuffers->mDataByteSize =
2367       *numberDataPackets * _inStreamFormat.mBytesPerPacket;
2368 
2369   return 0;
2370 }
2371 
RunRender(void * ptrThis)2372 void AudioDeviceMac::RunRender(void* ptrThis) {
2373   AudioDeviceMac* device = static_cast<AudioDeviceMac*>(ptrThis);
2374   while (device->RenderWorkerThread()) {
2375   }
2376 }
2377 
RenderWorkerThread()2378 bool AudioDeviceMac::RenderWorkerThread() {
2379   PaRingBufferSize numSamples =
2380       ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
2381   while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
2382              _renderDelayOffsetSamples <
2383          numSamples) {
2384     mach_timespec_t timeout;
2385     timeout.tv_sec = 0;
2386     timeout.tv_nsec = TIMER_PERIOD_MS;
2387 
2388     kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
2389     if (kernErr == KERN_OPERATION_TIMED_OUT) {
2390       int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
2391       if (signal == 0) {
2392         // The render device is no longer alive; stop the worker thread.
2393         return false;
2394       }
2395     } else if (kernErr != KERN_SUCCESS) {
2396       RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr;
2397     }
2398   }
2399 
2400   int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
2401 
2402   if (!_ptrAudioBuffer) {
2403     RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2404     return false;
2405   }
2406 
2407   // Ask for new PCM data to be played out using the AudioDeviceBuffer.
2408   uint32_t nSamples =
2409       _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
2410 
2411   nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
2412   if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
2413     RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
2414   }
2415 
2416   uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
2417 
2418   SInt16* pPlayBuffer = (SInt16*)&playBuffer;
2419   if (_macBookProPanRight && (_playChannels == 2)) {
2420     // Mix entirely into the right channel and zero the left channel.
2421     SInt32 sampleInt32 = 0;
2422     for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
2423       sampleInt32 = pPlayBuffer[sampleIdx];
2424       sampleInt32 += pPlayBuffer[sampleIdx + 1];
2425       sampleInt32 /= 2;
2426 
2427       if (sampleInt32 > 32767) {
2428         sampleInt32 = 32767;
2429       } else if (sampleInt32 < -32768) {
2430         sampleInt32 = -32768;
2431       }
2432 
2433       pPlayBuffer[sampleIdx] = 0;
2434       pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
2435     }
2436   }
2437 
2438   PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
2439 
2440   return true;
2441 }
2442 
RunCapture(void * ptrThis)2443 void AudioDeviceMac::RunCapture(void* ptrThis) {
2444   AudioDeviceMac* device = static_cast<AudioDeviceMac*>(ptrThis);
2445   while (device->CaptureWorkerThread()) {
2446   }
2447 }
2448 
CaptureWorkerThread()2449 bool AudioDeviceMac::CaptureWorkerThread() {
2450   OSStatus err = noErr;
2451   UInt32 noRecSamples =
2452       ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
2453   SInt16 recordBuffer[noRecSamples];
2454   UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
2455 
2456   AudioBufferList engineBuffer;
2457   engineBuffer.mNumberBuffers = 1;  // Interleaved channels.
2458   engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
2459   engineBuffer.mBuffers->mDataByteSize =
2460       _inDesiredFormat.mBytesPerPacket * noRecSamples;
2461   engineBuffer.mBuffers->mData = recordBuffer;
2462 
2463   err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
2464                                         this, &size, &engineBuffer, NULL);
2465   if (err != noErr) {
2466     if (err == 1) {
2467       // This is our own error.
2468       return false;
2469     } else {
2470       logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2471                (const char*)&err);
2472       return false;
2473     }
2474   }
2475 
2476   // TODO(xians): what if the returned size is incorrect?
2477   if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
2478     int32_t msecOnPlaySide;
2479     int32_t msecOnRecordSide;
2480 
2481     int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
2482     int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
2483 
2484     msecOnPlaySide =
2485         static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
2486     msecOnRecordSide =
2487         static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
2488 
2489     if (!_ptrAudioBuffer) {
2490       RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2491       return false;
2492     }
2493 
2494     // store the recorded buffer (no action will be taken if the
2495     // #recorded samples is not a full buffer)
2496     _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
2497     _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide);
2498     _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2499 
2500     // deliver recorded samples at specified sample rate, mic level etc.
2501     // to the observer using callback
2502     _ptrAudioBuffer->DeliverRecordedData();
2503   }
2504 
2505   return true;
2506 }
2507 
KeyPressed()2508 bool AudioDeviceMac::KeyPressed() {
2509   bool key_down = false;
2510   // Loop through all Mac virtual key constant values.
2511   for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
2512        ++key_index) {
2513     bool keyState =
2514         CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
2515     // A false -> true change in keymap means a key is pressed.
2516     key_down |= (keyState && !prev_key_state_[key_index]);
2517     // Save current state.
2518     prev_key_state_[key_index] = keyState;
2519   }
2520   return key_down;
2521 }
2522 }  // namespace webrtc
2523