1 /*
2  *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 package org.webrtc.voiceengine;
12 
13 import android.content.Context;
14 import android.content.pm.PackageManager;
15 import android.media.AudioFormat;
16 import android.media.AudioManager;
17 import android.media.AudioRecord;
18 import android.media.AudioTrack;
19 import android.os.Build;
20 import android.support.annotation.Nullable;
21 import java.util.Timer;
22 import java.util.TimerTask;
23 import org.webrtc.ContextUtils;
24 import org.webrtc.Logging;
25 
26 // WebRtcAudioManager handles tasks that uses android.media.AudioManager.
27 // At construction, storeAudioParameters() is called and it retrieves
28 // fundamental audio parameters like native sample rate and number of channels.
29 // The result is then provided to the caller by nativeCacheAudioParameters().
30 // It is also possible to call init() to set up the audio environment for best
31 // possible "VoIP performance". All settings done in init() are reverted by
32 // dispose(). This class can also be used without calling init() if the user
33 // prefers to set up the audio environment separately. However, it is
34 // recommended to always use AudioManager.MODE_IN_COMMUNICATION.
35 public class WebRtcAudioManager {
36   private static final boolean DEBUG = false;
37 
38   private static final String TAG = "WebRtcAudioManager";
39 
40   // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
41   // been completed. Goal is to always return false on Android O MR1 and higher.
42   private static final boolean blacklistDeviceForAAudioUsage = true;
43 
44   // Use mono as default for both audio directions.
45   private static boolean useStereoOutput;
46   private static boolean useStereoInput;
47 
48   private static boolean blacklistDeviceForOpenSLESUsage;
49   private static boolean blacklistDeviceForOpenSLESUsageIsOverridden;
50 
51   // Call this method to override the default list of blacklisted devices
52   // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
53   // Allows an app to take control over which devices to exclude from using
54   // the OpenSL ES audio output path
55   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
56   @SuppressWarnings("NoSynchronizedMethodCheck")
setBlacklistDeviceForOpenSLESUsage(boolean enable)57   public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
58     blacklistDeviceForOpenSLESUsageIsOverridden = true;
59     blacklistDeviceForOpenSLESUsage = enable;
60   }
61 
62   // Call these methods to override the default mono audio modes for the specified direction(s)
63   // (input and/or output).
64   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
65   @SuppressWarnings("NoSynchronizedMethodCheck")
setStereoOutput(boolean enable)66   public static synchronized void setStereoOutput(boolean enable) {
67     Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
68     useStereoOutput = enable;
69   }
70 
71   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
72   @SuppressWarnings("NoSynchronizedMethodCheck")
setStereoInput(boolean enable)73   public static synchronized void setStereoInput(boolean enable) {
74     Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
75     useStereoInput = enable;
76   }
77 
78   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
79   @SuppressWarnings("NoSynchronizedMethodCheck")
getStereoOutput()80   public static synchronized boolean getStereoOutput() {
81     return useStereoOutput;
82   }
83 
84   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
85   @SuppressWarnings("NoSynchronizedMethodCheck")
getStereoInput()86   public static synchronized boolean getStereoInput() {
87     return useStereoInput;
88   }
89 
90   // Default audio data format is PCM 16 bit per sample.
91   // Guaranteed to be supported by all devices.
92   private static final int BITS_PER_SAMPLE = 16;
93 
94   private static final int DEFAULT_FRAME_PER_BUFFER = 256;
95 
96   // Private utility class that periodically checks and logs the volume level
97   // of the audio stream that is currently controlled by the volume control.
98   // A timer triggers logs once every 30 seconds and the timer's associated
99   // thread is named "WebRtcVolumeLevelLoggerThread".
100   private static class VolumeLogger {
101     private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
102     private static final int TIMER_PERIOD_IN_SECONDS = 30;
103 
104     private final AudioManager audioManager;
105     private @Nullable Timer timer;
106 
VolumeLogger(AudioManager audioManager)107     public VolumeLogger(AudioManager audioManager) {
108       this.audioManager = audioManager;
109     }
110 
start()111     public void start() {
112       timer = new Timer(THREAD_NAME);
113       timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
114                          audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
115           0, TIMER_PERIOD_IN_SECONDS * 1000);
116     }
117 
118     private class LogVolumeTask extends TimerTask {
119       private final int maxRingVolume;
120       private final int maxVoiceCallVolume;
121 
LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume)122       LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
123         this.maxRingVolume = maxRingVolume;
124         this.maxVoiceCallVolume = maxVoiceCallVolume;
125       }
126 
127       @Override
run()128       public void run() {
129         final int mode = audioManager.getMode();
130         if (mode == AudioManager.MODE_RINGTONE) {
131           Logging.d(TAG, "STREAM_RING stream volume: "
132                   + audioManager.getStreamVolume(AudioManager.STREAM_RING) + " (max="
133                   + maxRingVolume + ")");
134         } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
135           Logging.d(TAG, "VOICE_CALL stream volume: "
136                   + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL) + " (max="
137                   + maxVoiceCallVolume + ")");
138         }
139       }
140     }
141 
stop()142     private void stop() {
143       if (timer != null) {
144         timer.cancel();
145         timer = null;
146       }
147     }
148   }
149 
150   private final long nativeAudioManager;
151   private final AudioManager audioManager;
152 
153   private boolean initialized;
154   private int nativeSampleRate;
155   private int nativeChannels;
156 
157   private boolean hardwareAEC;
158   private boolean hardwareAGC;
159   private boolean hardwareNS;
160   private boolean lowLatencyOutput;
161   private boolean lowLatencyInput;
162   private boolean proAudio;
163   private boolean aAudio;
164   private int sampleRate;
165   private int outputChannels;
166   private int inputChannels;
167   private int outputBufferSize;
168   private int inputBufferSize;
169 
170   private final VolumeLogger volumeLogger;
171 
WebRtcAudioManager(long nativeAudioManager)172   WebRtcAudioManager(long nativeAudioManager) {
173     Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
174     this.nativeAudioManager = nativeAudioManager;
175     audioManager =
176         (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
177     if (DEBUG) {
178       WebRtcAudioUtils.logDeviceInfo(TAG);
179     }
180     volumeLogger = new VolumeLogger(audioManager);
181     storeAudioParameters();
182     nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
183         hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
184         inputBufferSize, nativeAudioManager);
185     WebRtcAudioUtils.logAudioState(TAG);
186   }
187 
init()188   private boolean init() {
189     Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
190     if (initialized) {
191       return true;
192     }
193     Logging.d(TAG, "audio mode is: "
194         + WebRtcAudioUtils.modeToString(audioManager.getMode()));
195     initialized = true;
196     volumeLogger.start();
197     return true;
198   }
199 
dispose()200   private void dispose() {
201     Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
202     if (!initialized) {
203       return;
204     }
205     volumeLogger.stop();
206   }
207 
isCommunicationModeEnabled()208   private boolean isCommunicationModeEnabled() {
209     return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
210   }
211 
isDeviceBlacklistedForOpenSLESUsage()212   private boolean isDeviceBlacklistedForOpenSLESUsage() {
213     boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
214         ? blacklistDeviceForOpenSLESUsage
215         : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
216     if (blacklisted) {
217       Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
218     }
219     return blacklisted;
220   }
221 
storeAudioParameters()222   private void storeAudioParameters() {
223     outputChannels = getStereoOutput() ? 2 : 1;
224     inputChannels = getStereoInput() ? 2 : 1;
225     sampleRate = getNativeOutputSampleRate();
226     hardwareAEC = isAcousticEchoCancelerSupported();
227     // TODO(henrika): use of hardware AGC is no longer supported. Currently
228     // hardcoded to false. To be removed.
229     hardwareAGC = false;
230     hardwareNS = isNoiseSuppressorSupported();
231     lowLatencyOutput = isLowLatencyOutputSupported();
232     lowLatencyInput = isLowLatencyInputSupported();
233     proAudio = isProAudioSupported();
234     aAudio = isAAudioSupported();
235     outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
236                                         : getMinOutputFrameSize(sampleRate, outputChannels);
237     inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
238                                       : getMinInputFrameSize(sampleRate, inputChannels);
239   }
240 
241   // Gets the current earpiece state.
hasEarpiece()242   private boolean hasEarpiece() {
243     return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
244         PackageManager.FEATURE_TELEPHONY);
245   }
246 
247   // Returns true if low-latency audio output is supported.
isLowLatencyOutputSupported()248   private boolean isLowLatencyOutputSupported() {
249     return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
250         PackageManager.FEATURE_AUDIO_LOW_LATENCY);
251   }
252 
253   // Returns true if low-latency audio input is supported.
254   // TODO(henrika): remove the hardcoded false return value when OpenSL ES
255   // input performance has been evaluated and tested more.
isLowLatencyInputSupported()256   public boolean isLowLatencyInputSupported() {
257     // TODO(henrika): investigate if some sort of device list is needed here
258     // as well. The NDK doc states that: "As of API level 21, lower latency
259     // audio input is supported on select devices. To take advantage of this
260     // feature, first confirm that lower latency output is available".
261     return Build.VERSION.SDK_INT >= 21 && isLowLatencyOutputSupported();
262   }
263 
264   // Returns true if the device has professional audio level of functionality
265   // and therefore supports the lowest possible round-trip latency.
isProAudioSupported()266   private boolean isProAudioSupported() {
267     return Build.VERSION.SDK_INT >= 23
268         && ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
269                PackageManager.FEATURE_AUDIO_PRO);
270   }
271 
272   // AAudio is supported on Androio Oreo MR1 (API 27) and higher.
273   // TODO(bugs.webrtc.org/8914): currently disabled by default.
isAAudioSupported()274   private boolean isAAudioSupported() {
275     if (blacklistDeviceForAAudioUsage) {
276       Logging.w(TAG, "AAudio support is currently disabled on all devices!");
277     }
278     return !blacklistDeviceForAAudioUsage && Build.VERSION.SDK_INT >= 27;
279   }
280 
281   // Returns the native output sample rate for this device's output stream.
getNativeOutputSampleRate()282   private int getNativeOutputSampleRate() {
283     // Override this if we're running on an old emulator image which only
284     // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
285     if (WebRtcAudioUtils.runningOnEmulator()) {
286       Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
287       return 8000;
288     }
289     // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
290     // If so, use that value and return here.
291     if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
292       Logging.d(TAG, "Default sample rate is overriden to "
293               + WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
294       return WebRtcAudioUtils.getDefaultSampleRateHz();
295     }
296     // No overrides available. Deliver best possible estimate based on default
297     // Android AudioManager APIs.
298     final int sampleRateHz = getSampleRateForApiLevel();
299     Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
300     return sampleRateHz;
301   }
302 
getSampleRateForApiLevel()303   private int getSampleRateForApiLevel() {
304     if (Build.VERSION.SDK_INT < 17) {
305       return WebRtcAudioUtils.getDefaultSampleRateHz();
306     }
307     String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
308     return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
309                                       : Integer.parseInt(sampleRateString);
310   }
311 
312   // Returns the native output buffer size for low-latency output streams.
getLowLatencyOutputFramesPerBuffer()313   private int getLowLatencyOutputFramesPerBuffer() {
314     assertTrue(isLowLatencyOutputSupported());
315     if (Build.VERSION.SDK_INT < 17) {
316       return DEFAULT_FRAME_PER_BUFFER;
317     }
318     String framesPerBuffer =
319         audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
320     return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
321   }
322 
323   // Returns true if the device supports an audio effect (AEC or NS).
324   // Four conditions must be fulfilled if functions are to return true:
325   // 1) the platform must support the built-in (HW) effect,
326   // 2) explicit use (override) of a WebRTC based version must not be set,
327   // 3) the device must not be blacklisted for use of the effect, and
328   // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
isAcousticEchoCancelerSupported()329   private static boolean isAcousticEchoCancelerSupported() {
330     return WebRtcAudioEffects.canUseAcousticEchoCanceler();
331   }
isNoiseSuppressorSupported()332   private static boolean isNoiseSuppressorSupported() {
333     return WebRtcAudioEffects.canUseNoiseSuppressor();
334   }
335 
336   // Returns the minimum output buffer size for Java based audio (AudioTrack).
337   // This size can also be used for OpenSL ES implementations on devices that
338   // lacks support of low-latency output.
getMinOutputFrameSize(int sampleRateInHz, int numChannels)339   private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
340     final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
341     final int channelConfig =
342         (numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
343     return AudioTrack.getMinBufferSize(
344                sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
345         / bytesPerFrame;
346   }
347 
348   // Returns the native input buffer size for input streams.
getLowLatencyInputFramesPerBuffer()349   private int getLowLatencyInputFramesPerBuffer() {
350     assertTrue(isLowLatencyInputSupported());
351     return getLowLatencyOutputFramesPerBuffer();
352   }
353 
354   // Returns the minimum input buffer size for Java based audio (AudioRecord).
355   // This size can calso be used for OpenSL ES implementations on devices that
356   // lacks support of low-latency input.
getMinInputFrameSize(int sampleRateInHz, int numChannels)357   private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
358     final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
359     final int channelConfig =
360         (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
361     return AudioRecord.getMinBufferSize(
362                sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
363         / bytesPerFrame;
364   }
365 
366   // Helper method which throws an exception  when an assertion has failed.
assertTrue(boolean condition)367   private static void assertTrue(boolean condition) {
368     if (!condition) {
369       throw new AssertionError("Expected condition to be true");
370     }
371   }
372 
nativeCacheAudioParameters(int sampleRate, int outputChannels, int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS, boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio, int outputBufferSize, int inputBufferSize, long nativeAudioManager)373   private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
374       int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
375       boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
376       int outputBufferSize, int inputBufferSize, long nativeAudioManager);
377 }
378