1 /*
2  *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 package org.webrtc.voiceengine;
12 
13 import android.annotation.TargetApi;
14 import android.content.Context;
15 import android.media.AudioAttributes;
16 import android.media.AudioFormat;
17 import android.media.AudioManager;
18 import android.media.AudioTrack;
19 import android.os.Build;
20 import android.os.Process;
21 import androidx.annotation.Nullable;
22 import java.lang.Thread;
23 import java.nio.ByteBuffer;
24 import org.webrtc.ContextUtils;
25 import org.webrtc.Logging;
26 import org.webrtc.ThreadUtils;
27 
28 public class WebRtcAudioTrack {
29   private static final boolean DEBUG = false;
30 
31   private static final String TAG = "WebRtcAudioTrack";
32 
33   // Default audio data format is PCM 16 bit per sample.
34   // Guaranteed to be supported by all devices.
35   private static final int BITS_PER_SAMPLE = 16;
36 
37   // Requested size of each recorded buffer provided to the client.
38   private static final int CALLBACK_BUFFER_SIZE_MS = 10;
39 
40   // Average number of callbacks per second.
41   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
42 
43   // The AudioTrackThread is allowed to wait for successful call to join()
44   // but the wait times out afther this amount of time.
45   private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
46 
47   // By default, WebRTC creates audio tracks with a usage attribute
48   // corresponding to voice communications, such as telephony or VoIP.
49   private static final int DEFAULT_USAGE = getDefaultUsageAttribute();
50   private static int usageAttribute = DEFAULT_USAGE;
51 
52   // This method overrides the default usage attribute and allows the user
53   // to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
54   // NOTE: calling this method will most likely break existing VoIP tuning.
55   // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
56   @SuppressWarnings("NoSynchronizedMethodCheck")
setAudioTrackUsageAttribute(int usage)57   public static synchronized void setAudioTrackUsageAttribute(int usage) {
58     Logging.w(TAG, "Default usage attribute is changed from: "
59         + DEFAULT_USAGE + " to " + usage);
60     usageAttribute = usage;
61   }
62 
getDefaultUsageAttribute()63   private static int getDefaultUsageAttribute() {
64     if (Build.VERSION.SDK_INT >= 21) {
65       return AudioAttributes.USAGE_VOICE_COMMUNICATION;
66     } else {
67       // Not used on SDKs lower than 21.
68       return 0;
69     }
70   }
71 
72   private final long nativeAudioTrack;
73   private final AudioManager audioManager;
74   private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
75 
76   private ByteBuffer byteBuffer;
77 
78   private @Nullable AudioTrack audioTrack;
79   private @Nullable AudioTrackThread audioThread;
80 
81   // Samples to be played are replaced by zeros if |speakerMute| is set to true.
82   // Can be used to ensure that the speaker is fully muted.
83   private static volatile boolean speakerMute;
84   private byte[] emptyBytes;
85 
86   // Audio playout/track error handler functions.
87   public enum AudioTrackStartErrorCode {
88     AUDIO_TRACK_START_EXCEPTION,
89     AUDIO_TRACK_START_STATE_MISMATCH,
90   }
91 
92   @Deprecated
93   public static interface WebRtcAudioTrackErrorCallback {
onWebRtcAudioTrackInitError(String errorMessage)94     void onWebRtcAudioTrackInitError(String errorMessage);
onWebRtcAudioTrackStartError(String errorMessage)95     void onWebRtcAudioTrackStartError(String errorMessage);
onWebRtcAudioTrackError(String errorMessage)96     void onWebRtcAudioTrackError(String errorMessage);
97   }
98 
99   // TODO(henrika): upgrade all clients to use this new interface instead.
100   public static interface ErrorCallback {
onWebRtcAudioTrackInitError(String errorMessage)101     void onWebRtcAudioTrackInitError(String errorMessage);
onWebRtcAudioTrackStartError(AudioTrackStartErrorCode errorCode, String errorMessage)102     void onWebRtcAudioTrackStartError(AudioTrackStartErrorCode errorCode, String errorMessage);
onWebRtcAudioTrackError(String errorMessage)103     void onWebRtcAudioTrackError(String errorMessage);
104   }
105 
106   private static @Nullable WebRtcAudioTrackErrorCallback errorCallbackOld;
107   private static @Nullable ErrorCallback errorCallback;
108 
109   @Deprecated
setErrorCallback(WebRtcAudioTrackErrorCallback errorCallback)110   public static void setErrorCallback(WebRtcAudioTrackErrorCallback errorCallback) {
111     Logging.d(TAG, "Set error callback (deprecated");
112     WebRtcAudioTrack.errorCallbackOld = errorCallback;
113   }
114 
setErrorCallback(ErrorCallback errorCallback)115   public static void setErrorCallback(ErrorCallback errorCallback) {
116     Logging.d(TAG, "Set extended error callback");
117     WebRtcAudioTrack.errorCallback = errorCallback;
118   }
119 
120   /**
121    * Audio thread which keeps calling AudioTrack.write() to stream audio.
122    * Data is periodically acquired from the native WebRTC layer using the
123    * nativeGetPlayoutData callback function.
124    * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
125    */
126   private class AudioTrackThread extends Thread {
127     private volatile boolean keepAlive = true;
128 
AudioTrackThread(String name)129     public AudioTrackThread(String name) {
130       super(name);
131     }
132 
133     @Override
run()134     public void run() {
135       Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
136       Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
137       assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
138 
139       // Fixed size in bytes of each 10ms block of audio data that we ask for
140       // using callbacks to the native WebRTC client.
141       final int sizeInBytes = byteBuffer.capacity();
142 
143       while (keepAlive) {
144         // Get 10ms of PCM data from the native WebRTC client. Audio data is
145         // written into the common ByteBuffer using the address that was
146         // cached at construction.
147         nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
148         // Write data until all data has been written to the audio sink.
149         // Upon return, the buffer position will have been advanced to reflect
150         // the amount of data that was successfully written to the AudioTrack.
151         assertTrue(sizeInBytes <= byteBuffer.remaining());
152         if (speakerMute) {
153           byteBuffer.clear();
154           byteBuffer.put(emptyBytes);
155           byteBuffer.position(0);
156         }
157         int bytesWritten = writeBytes(audioTrack, byteBuffer, sizeInBytes);
158         if (bytesWritten != sizeInBytes) {
159           Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
160           // If a write() returns a negative value, an error has occurred.
161           // Stop playing and report an error in this case.
162           if (bytesWritten < 0) {
163             keepAlive = false;
164             reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
165           }
166         }
167         // The byte buffer must be rewinded since byteBuffer.position() is
168         // increased at each call to AudioTrack.write(). If we don't do this,
169         // next call to AudioTrack.write() will fail.
170         byteBuffer.rewind();
171 
172         // TODO(henrika): it is possible to create a delay estimate here by
173         // counting number of written frames and subtracting the result from
174         // audioTrack.getPlaybackHeadPosition().
175       }
176 
177       // Stops playing the audio data. Since the instance was created in
178       // MODE_STREAM mode, audio will stop playing after the last buffer that
179       // was written has been played.
180       if (audioTrack != null) {
181         Logging.d(TAG, "Calling AudioTrack.stop...");
182         try {
183           audioTrack.stop();
184           Logging.d(TAG, "AudioTrack.stop is done.");
185         } catch (IllegalStateException e) {
186           Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
187         }
188       }
189     }
190 
writeBytes(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes)191     private int writeBytes(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
192       if (Build.VERSION.SDK_INT >= 21) {
193         return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
194       } else {
195         return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
196       }
197     }
198 
199     // Stops the inner thread loop which results in calling AudioTrack.stop().
200     // Does not block the calling thread.
stopThread()201     public void stopThread() {
202       Logging.d(TAG, "stopThread");
203       keepAlive = false;
204     }
205   }
206 
WebRtcAudioTrack(long nativeAudioTrack)207   WebRtcAudioTrack(long nativeAudioTrack) {
208     threadChecker.checkIsOnValidThread();
209     Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
210     this.nativeAudioTrack = nativeAudioTrack;
211     audioManager =
212         (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
213     if (DEBUG) {
214       WebRtcAudioUtils.logDeviceInfo(TAG);
215     }
216   }
217 
initPlayout(int sampleRate, int channels, double bufferSizeFactor)218   private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) {
219     threadChecker.checkIsOnValidThread();
220     Logging.d(TAG,
221         "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels
222             + ", bufferSizeFactor=" + bufferSizeFactor + ")");
223     final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
224     byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
225     Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
226     emptyBytes = new byte[byteBuffer.capacity()];
227     // Rather than passing the ByteBuffer with every callback (requiring
228     // the potentially expensive GetDirectBufferAddress) we simply have the
229     // the native class cache the address to the memory once.
230     nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
231 
232     // Get the minimum buffer size required for the successful creation of an
233     // AudioTrack object to be created in the MODE_STREAM mode.
234     // Note that this size doesn't guarantee a smooth playback under load.
235     final int channelConfig = channelCountToConfiguration(channels);
236     final int minBufferSizeInBytes = (int) (AudioTrack.getMinBufferSize(sampleRate, channelConfig,
237                                                 AudioFormat.ENCODING_PCM_16BIT)
238         * bufferSizeFactor);
239     Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
240     // For the streaming mode, data must be written to the audio sink in
241     // chunks of size (given by byteBuffer.capacity()) less than or equal
242     // to the total buffer size |minBufferSizeInBytes|. But, we have seen
243     // reports of "getMinBufferSize(): error querying hardware". Hence, it
244     // can happen that |minBufferSizeInBytes| contains an invalid value.
245     if (minBufferSizeInBytes < byteBuffer.capacity()) {
246       reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
247       return -1;
248     }
249 
250     // Ensure that prevision audio session was stopped correctly before trying
251     // to create a new AudioTrack.
252     if (audioTrack != null) {
253       reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
254       return -1;
255     }
256     try {
257       // Create an AudioTrack object and initialize its associated audio buffer.
258       // The size of this buffer determines how long an AudioTrack can play
259       // before running out of data.
260       if (Build.VERSION.SDK_INT >= 21) {
261         // If we are on API level 21 or higher, it is possible to use a special AudioTrack
262         // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
263         // supersede the notion of stream types for defining the behavior of audio playback,
264         // and to allow certain platforms or routing policies to use this information for more
265         // refined volume or routing decisions.
266         audioTrack = createAudioTrackOnLollipopOrHigher(
267             sampleRate, channelConfig, minBufferSizeInBytes);
268       } else {
269         // Use default constructor for API levels below 21.
270         audioTrack =
271             createAudioTrackOnLowerThanLollipop(sampleRate, channelConfig, minBufferSizeInBytes);
272       }
273     } catch (IllegalArgumentException e) {
274       reportWebRtcAudioTrackInitError(e.getMessage());
275       releaseAudioResources();
276       return -1;
277     }
278 
279     // It can happen that an AudioTrack is created but it was not successfully
280     // initialized upon creation. Seems to be the case e.g. when the maximum
281     // number of globally available audio tracks is exceeded.
282     if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
283       reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
284       releaseAudioResources();
285       return -1;
286     }
287     logMainParameters();
288     logMainParametersExtended();
289     return minBufferSizeInBytes;
290   }
291 
startPlayout()292   private boolean startPlayout() {
293     threadChecker.checkIsOnValidThread();
294     Logging.d(TAG, "startPlayout");
295     assertTrue(audioTrack != null);
296     assertTrue(audioThread == null);
297 
298     // Starts playing an audio track.
299     try {
300       audioTrack.play();
301     } catch (IllegalStateException e) {
302       reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
303           "AudioTrack.play failed: " + e.getMessage());
304       releaseAudioResources();
305       return false;
306     }
307     if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
308       reportWebRtcAudioTrackStartError(
309           AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
310           "AudioTrack.play failed - incorrect state :"
311           + audioTrack.getPlayState());
312       releaseAudioResources();
313       return false;
314     }
315 
316     // Create and start new high-priority thread which calls AudioTrack.write()
317     // and where we also call the native nativeGetPlayoutData() callback to
318     // request decoded audio from WebRTC.
319     audioThread = new AudioTrackThread("AudioTrackJavaThread");
320     audioThread.start();
321     return true;
322   }
323 
stopPlayout()324   private boolean stopPlayout() {
325     threadChecker.checkIsOnValidThread();
326     Logging.d(TAG, "stopPlayout");
327     assertTrue(audioThread != null);
328     logUnderrunCount();
329     audioThread.stopThread();
330 
331     Logging.d(TAG, "Stopping the AudioTrackThread...");
332     audioThread.interrupt();
333     if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
334       Logging.e(TAG, "Join of AudioTrackThread timed out.");
335       WebRtcAudioUtils.logAudioState(TAG);
336     }
337     Logging.d(TAG, "AudioTrackThread has now been stopped.");
338     audioThread = null;
339     releaseAudioResources();
340     return true;
341   }
342 
343   // Get max possible volume index for a phone call audio stream.
getStreamMaxVolume()344   private int getStreamMaxVolume() {
345     threadChecker.checkIsOnValidThread();
346     Logging.d(TAG, "getStreamMaxVolume");
347     assertTrue(audioManager != null);
348     return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
349   }
350 
351   // Set current volume level for a phone call audio stream.
setStreamVolume(int volume)352   private boolean setStreamVolume(int volume) {
353     threadChecker.checkIsOnValidThread();
354     Logging.d(TAG, "setStreamVolume(" + volume + ")");
355     assertTrue(audioManager != null);
356     if (isVolumeFixed()) {
357       Logging.e(TAG, "The device implements a fixed volume policy.");
358       return false;
359     }
360     audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
361     return true;
362   }
363 
isVolumeFixed()364   private boolean isVolumeFixed() {
365     if (Build.VERSION.SDK_INT < 21)
366       return false;
367     return audioManager.isVolumeFixed();
368   }
369 
370   /** Get current volume level for a phone call audio stream. */
getStreamVolume()371   private int getStreamVolume() {
372     threadChecker.checkIsOnValidThread();
373     Logging.d(TAG, "getStreamVolume");
374     assertTrue(audioManager != null);
375     return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
376   }
377 
logMainParameters()378   private void logMainParameters() {
379     Logging.d(TAG, "AudioTrack: "
380             + "session ID: " + audioTrack.getAudioSessionId() + ", "
381             + "channels: " + audioTrack.getChannelCount() + ", "
382             + "sample rate: " + audioTrack.getSampleRate() + ", "
383             // Gain (>=1.0) expressed as linear multiplier on sample values.
384             + "max gain: " + AudioTrack.getMaxVolume());
385   }
386 
387   // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
388   // It allows certain platforms or routing policies to use this information for more
389   // refined volume or routing decisions.
390   @TargetApi(21)
createAudioTrackOnLollipopOrHigher( int sampleRateInHz, int channelConfig, int bufferSizeInBytes)391   private static AudioTrack createAudioTrackOnLollipopOrHigher(
392       int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
393     Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
394     // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
395     // performance when Android O is supported. Add some logging in the mean time.
396     final int nativeOutputSampleRate =
397         AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
398     Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
399     if (sampleRateInHz != nativeOutputSampleRate) {
400       Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
401     }
402     if (usageAttribute != DEFAULT_USAGE) {
403       Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
404     }
405     // Create an audio track where the audio usage is for VoIP and the content type is speech.
406     return new AudioTrack(
407         new AudioAttributes.Builder()
408             .setUsage(usageAttribute)
409             .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
410         .build(),
411         new AudioFormat.Builder()
412           .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
413           .setSampleRate(sampleRateInHz)
414           .setChannelMask(channelConfig)
415           .build(),
416         bufferSizeInBytes,
417         AudioTrack.MODE_STREAM,
418         AudioManager.AUDIO_SESSION_ID_GENERATE);
419   }
420 
421   @SuppressWarnings("deprecation") // Deprecated in API level 25.
createAudioTrackOnLowerThanLollipop( int sampleRateInHz, int channelConfig, int bufferSizeInBytes)422   private static AudioTrack createAudioTrackOnLowerThanLollipop(
423       int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
424     return new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRateInHz, channelConfig,
425         AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes, AudioTrack.MODE_STREAM);
426   }
427 
logBufferSizeInFrames()428   private void logBufferSizeInFrames() {
429     if (Build.VERSION.SDK_INT >= 23) {
430       Logging.d(TAG, "AudioTrack: "
431               // The effective size of the AudioTrack buffer that the app writes to.
432               + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
433     }
434   }
435 
getBufferSizeInFrames()436   private int getBufferSizeInFrames() {
437     if (Build.VERSION.SDK_INT >= 23) {
438       return audioTrack.getBufferSizeInFrames();
439     }
440     return -1;
441   }
442 
logBufferCapacityInFrames()443   private void logBufferCapacityInFrames() {
444     if (Build.VERSION.SDK_INT >= 24) {
445       Logging.d(TAG,
446           "AudioTrack: "
447               // Maximum size of the AudioTrack buffer in frames.
448               + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
449     }
450   }
451 
logMainParametersExtended()452   private void logMainParametersExtended() {
453     logBufferSizeInFrames();
454     logBufferCapacityInFrames();
455   }
456 
457   // Prints the number of underrun occurrences in the application-level write
458   // buffer since the AudioTrack was created. An underrun occurs if the app does
459   // not write audio data quickly enough, causing the buffer to underflow and a
460   // potential audio glitch.
461   // TODO(henrika): keep track of this value in the field and possibly add new
462   // UMA stat if needed.
logUnderrunCount()463   private void logUnderrunCount() {
464     if (Build.VERSION.SDK_INT >= 24) {
465       Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
466     }
467   }
468 
469   // Helper method which throws an exception  when an assertion has failed.
assertTrue(boolean condition)470   private static void assertTrue(boolean condition) {
471     if (!condition) {
472       throw new AssertionError("Expected condition to be true");
473     }
474   }
475 
channelCountToConfiguration(int channels)476   private int channelCountToConfiguration(int channels) {
477     return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
478   }
479 
nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord)480   private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
481 
nativeGetPlayoutData(int bytes, long nativeAudioRecord)482   private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
483 
484   // Sets all samples to be played out to zero if |mute| is true, i.e.,
485   // ensures that the speaker is muted.
setSpeakerMute(boolean mute)486   public static void setSpeakerMute(boolean mute) {
487     Logging.w(TAG, "setSpeakerMute(" + mute + ")");
488     speakerMute = mute;
489   }
490 
491   // Releases the native AudioTrack resources.
releaseAudioResources()492   private void releaseAudioResources() {
493     Logging.d(TAG, "releaseAudioResources");
494     if (audioTrack != null) {
495       audioTrack.release();
496       audioTrack = null;
497     }
498   }
499 
reportWebRtcAudioTrackInitError(String errorMessage)500   private void reportWebRtcAudioTrackInitError(String errorMessage) {
501     Logging.e(TAG, "Init playout error: " + errorMessage);
502     WebRtcAudioUtils.logAudioState(TAG);
503     if (errorCallbackOld != null) {
504       errorCallbackOld.onWebRtcAudioTrackInitError(errorMessage);
505     }
506     if (errorCallback != null) {
507       errorCallback.onWebRtcAudioTrackInitError(errorMessage);
508     }
509   }
510 
reportWebRtcAudioTrackStartError( AudioTrackStartErrorCode errorCode, String errorMessage)511   private void reportWebRtcAudioTrackStartError(
512       AudioTrackStartErrorCode errorCode, String errorMessage) {
513     Logging.e(TAG, "Start playout error: "  + errorCode + ". " + errorMessage);
514     WebRtcAudioUtils.logAudioState(TAG);
515     if (errorCallbackOld != null) {
516       errorCallbackOld.onWebRtcAudioTrackStartError(errorMessage);
517     }
518     if (errorCallback != null) {
519       errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
520     }
521   }
522 
reportWebRtcAudioTrackError(String errorMessage)523   private void reportWebRtcAudioTrackError(String errorMessage) {
524     Logging.e(TAG, "Run-time playback error: " + errorMessage);
525     WebRtcAudioUtils.logAudioState(TAG);
526     if (errorCallbackOld != null) {
527       errorCallbackOld.onWebRtcAudioTrackError(errorMessage);
528     }
529     if (errorCallback != null) {
530       errorCallback.onWebRtcAudioTrackError(errorMessage);
531     }
532   }
533 }
534