1 /* This Source Code Form is subject to the terms of the Mozilla Public
2  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
3  * You can obtain one at http://mozilla.org/MPL/2.0/. */
4 
5 #ifndef MEDIAENGINEWEBRTC_H_
6 #define MEDIAENGINEWEBRTC_H_
7 
8 #include "prcvar.h"
9 #include "prthread.h"
10 #include "prprf.h"
11 #include "nsIThread.h"
12 #include "nsIRunnable.h"
13 
14 #include "mozilla/dom/File.h"
15 #include "mozilla/Mutex.h"
16 #include "mozilla/StaticMutex.h"
17 #include "mozilla/Monitor.h"
18 #include "mozilla/UniquePtr.h"
19 #include "nsAutoPtr.h"
20 #include "nsCOMPtr.h"
21 #include "nsThreadUtils.h"
22 #include "DOMMediaStream.h"
23 #include "nsDirectoryServiceDefs.h"
24 #include "nsComponentManagerUtils.h"
25 #include "nsRefPtrHashtable.h"
26 
27 #include "VideoUtils.h"
28 #include "MediaEngineCameraVideoSource.h"
29 #include "VideoSegment.h"
30 #include "AudioSegment.h"
31 #include "StreamTracks.h"
32 #include "MediaStreamGraph.h"
33 #include "cubeb/cubeb.h"
34 #include "CubebUtils.h"
35 #include "AudioPacketizer.h"
36 
37 #include "MediaEngineWrapper.h"
38 #include "mozilla/dom/MediaStreamTrackBinding.h"
39 // WebRTC library includes follow
40 #include "webrtc/common.h"
41 // Audio Engine
42 #include "webrtc/voice_engine/include/voe_base.h"
43 #include "webrtc/voice_engine/include/voe_codec.h"
44 #include "webrtc/voice_engine/include/voe_hardware.h"
45 #include "webrtc/voice_engine/include/voe_network.h"
46 #include "webrtc/voice_engine/include/voe_audio_processing.h"
47 #include "webrtc/voice_engine/include/voe_volume_control.h"
48 #include "webrtc/voice_engine/include/voe_external_media.h"
49 #include "webrtc/voice_engine/include/voe_audio_processing.h"
50 #include "webrtc/modules/audio_processing/include/audio_processing.h"
51 
52 // Video Engine
53 // conflicts with #include of scoped_ptr.h
54 #undef FF
55 #include "webrtc/video_engine/include/vie_base.h"
56 #include "webrtc/video_engine/include/vie_codec.h"
57 #include "webrtc/video_engine/include/vie_render.h"
58 #include "webrtc/video_engine/include/vie_capture.h"
59 #include "CamerasChild.h"
60 
61 #include "NullTransport.h"
62 #include "AudioOutputObserver.h"
63 
64 namespace mozilla {
65 
66 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
67 {
68 public:
69   NS_DECL_THREADSAFE_ISUPPORTS
70 
MediaEngineWebRTCAudioCaptureSource(const char * aUuid)71   explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
72     : MediaEngineAudioSource(kReleased)
73   {
74   }
75   void GetName(nsAString& aName) const override;
76   void GetUUID(nsACString& aUUID) const override;
Allocate(const dom::MediaTrackConstraints & aConstraints,const MediaEnginePrefs & aPrefs,const nsString & aDeviceId,const nsACString & aOrigin,AllocationHandle ** aOutHandle,const char ** aOutBadConstraint)77   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
78                     const MediaEnginePrefs& aPrefs,
79                     const nsString& aDeviceId,
80                     const nsACString& aOrigin,
81                     AllocationHandle** aOutHandle,
82                     const char** aOutBadConstraint) override
83   {
84     // Nothing to do here, everything is managed in MediaManager.cpp
85     *aOutHandle = nullptr;
86     return NS_OK;
87   }
Deallocate(AllocationHandle * aHandle)88   nsresult Deallocate(AllocationHandle* aHandle) override
89   {
90     // Nothing to do here, everything is managed in MediaManager.cpp
91     MOZ_ASSERT(!aHandle);
92     return NS_OK;
93   }
94   nsresult Start(SourceMediaStream* aMediaStream,
95                  TrackID aId,
96                  const PrincipalHandle& aPrincipalHandle) override;
97   nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
98   nsresult Restart(AllocationHandle* aHandle,
99                    const dom::MediaTrackConstraints& aConstraints,
100                    const MediaEnginePrefs &aPrefs,
101                    const nsString& aDeviceId,
102                    const char** aOutBadConstraint) override;
SetDirectListeners(bool aDirect)103   void SetDirectListeners(bool aDirect) override
104   {}
NotifyOutputData(MediaStreamGraph * aGraph,AudioDataValue * aBuffer,size_t aFrames,TrackRate aRate,uint32_t aChannels)105   void NotifyOutputData(MediaStreamGraph* aGraph,
106                         AudioDataValue* aBuffer, size_t aFrames,
107                         TrackRate aRate, uint32_t aChannels) override
108   {}
DeviceChanged()109   void DeviceChanged() override
110   {}
NotifyInputData(MediaStreamGraph * aGraph,const AudioDataValue * aBuffer,size_t aFrames,TrackRate aRate,uint32_t aChannels)111   void NotifyInputData(MediaStreamGraph* aGraph,
112                        const AudioDataValue* aBuffer, size_t aFrames,
113                        TrackRate aRate, uint32_t aChannels) override
114   {}
NotifyPull(MediaStreamGraph * aGraph,SourceMediaStream * aSource,TrackID aID,StreamTime aDesiredTime,const PrincipalHandle & aPrincipalHandle)115   void NotifyPull(MediaStreamGraph* aGraph,
116                   SourceMediaStream* aSource,
117                   TrackID aID,
118                   StreamTime aDesiredTime,
119                   const PrincipalHandle& aPrincipalHandle) override
120   {}
GetMediaSource()121   dom::MediaSourceEnum GetMediaSource() const override
122   {
123     return dom::MediaSourceEnum::AudioCapture;
124   }
IsFake()125   bool IsFake() override
126   {
127     return false;
128   }
TakePhoto(MediaEnginePhotoCallback * aCallback)129   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
130   {
131     return NS_ERROR_NOT_IMPLEMENTED;
132   }
133   uint32_t GetBestFitnessDistance(
134     const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
135     const nsString& aDeviceId) const override;
136 
137 protected:
~MediaEngineWebRTCAudioCaptureSource()138   virtual ~MediaEngineWebRTCAudioCaptureSource() {}
139   nsCString mUUID;
140 };
141 
142 // Small subset of VoEHardware
143 class AudioInput
144 {
145 public:
AudioInput(webrtc::VoiceEngine * aVoiceEngine)146   explicit AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
147   // Threadsafe because it's referenced from an MicrophoneSource, which can
148   // had references to it on other threads.
149   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
150 
151   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
152   virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
153                                      char aStrGuidUTF8[128]) = 0;
154   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
155   virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
156   virtual void StopRecording(SourceMediaStream *aStream) = 0;
157   virtual int SetRecordingDevice(int aIndex) = 0;
158 
159 protected:
160   // Protected destructor, to discourage deletion outside of Release():
~AudioInput()161   virtual ~AudioInput() {}
162 
163   webrtc::VoiceEngine* mVoiceEngine;
164 };
165 
166 class AudioInputCubeb final : public AudioInput
167 {
168 public:
169   explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine, int aIndex = 0) :
AudioInput(aVoiceEngine)170     AudioInput(aVoiceEngine), mSelectedDevice(aIndex), mInUseCount(0)
171   {
172     if (!mDeviceIndexes) {
173       mDeviceIndexes = new nsTArray<int>;
174       mDeviceNames = new nsTArray<nsCString>;
175       mDefaultDevice = -1;
176     }
177   }
178 
CleanupGlobalData()179   static void CleanupGlobalData()
180   {
181     if (mDevices) {
182       // This doesn't require anything more than support for free()
183       cubeb_device_collection_destroy(mDevices);
184       mDevices = nullptr;
185     }
186     delete mDeviceIndexes;
187     mDeviceIndexes = nullptr;
188     delete mDeviceNames;
189     mDeviceNames = nullptr;
190   }
191 
GetNumOfRecordingDevices(int & aDevices)192   int GetNumOfRecordingDevices(int& aDevices)
193   {
194     UpdateDeviceList();
195     aDevices = mDeviceIndexes->Length();
196     return 0;
197   }
198 
DeviceIndex(int aIndex)199   static int32_t DeviceIndex(int aIndex)
200   {
201     // -1 = system default if any
202     if (aIndex == -1) {
203       if (mDefaultDevice == -1) {
204         aIndex = 0;
205       } else {
206         aIndex = mDefaultDevice;
207       }
208     }
209     if (aIndex < 0 || aIndex >= (int) mDeviceIndexes->Length()) {
210       return -1;
211     }
212     // Note: if the device is gone, this will be -1
213     return (*mDeviceIndexes)[aIndex]; // translate to mDevices index
214   }
215 
Mutex()216   static StaticMutex& Mutex()
217   {
218     return sMutex;
219   }
220 
GetDeviceID(int aDeviceIndex,CubebUtils::AudioDeviceID & aID)221   static bool GetDeviceID(int aDeviceIndex, CubebUtils::AudioDeviceID &aID)
222   {
223     // Assert sMutex is held
224     sMutex.AssertCurrentThreadOwns();
225     int dev_index = DeviceIndex(aDeviceIndex);
226     if (dev_index != -1) {
227       aID = mDevices->device[dev_index]->devid;
228       return true;
229     }
230     return false;
231   }
232 
GetRecordingDeviceName(int aIndex,char aStrNameUTF8[128],char aStrGuidUTF8[128])233   int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
234                              char aStrGuidUTF8[128])
235   {
236     int32_t devindex = DeviceIndex(aIndex);
237     if (!mDevices || devindex < 0) {
238       return 1;
239     }
240     PR_snprintf(aStrNameUTF8, 128, "%s%s", aIndex == -1 ? "default: " : "",
241                 mDevices->device[devindex]->friendly_name);
242     aStrGuidUTF8[0] = '\0';
243     return 0;
244   }
245 
GetRecordingDeviceStatus(bool & aIsAvailable)246   int GetRecordingDeviceStatus(bool& aIsAvailable)
247   {
248     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
249     // so unless it was removed, say it's available
250     aIsAvailable = true;
251     return 0;
252   }
253 
StartRecording(SourceMediaStream * aStream,AudioDataListener * aListener)254   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener)
255   {
256     MOZ_ASSERT(mDevices);
257 
258     if (mInUseCount == 0) {
259       ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
260       ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
261       if (ptrVoERender) {
262         ptrVoERender->SetExternalRecordingStatus(true);
263       }
264       mAnyInUse = true;
265     }
266     mInUseCount++;
267     // Always tell the stream we're using it for input
268     aStream->OpenAudioInput(mSelectedDevice, aListener);
269   }
270 
StopRecording(SourceMediaStream * aStream)271   void StopRecording(SourceMediaStream *aStream)
272   {
273     aStream->CloseAudioInput();
274     if (--mInUseCount == 0) {
275       mAnyInUse = false;
276     }
277   }
278 
SetRecordingDevice(int aIndex)279   int SetRecordingDevice(int aIndex)
280   {
281     mSelectedDevice = aIndex;
282     return 0;
283   }
284 
285 protected:
~AudioInputCubeb()286   ~AudioInputCubeb() {
287     MOZ_RELEASE_ASSERT(mInUseCount == 0);
288   }
289 
290 private:
291   // It would be better to watch for device-change notifications
292   void UpdateDeviceList();
293 
294   // We have an array, which consists of indexes to the current mDevices
295   // list.  This is updated on mDevices updates.  Many devices in mDevices
296   // won't be included in the array (wrong type, etc), or if a device is
297   // removed it will map to -1 (and opens of this device will need to check
298   // for this - and be careful of threading access.  The mappings need to
299   // updated on each re-enumeration.
300   int mSelectedDevice;
301   uint32_t mInUseCount;
302 
303   // pointers to avoid static constructors
304   static nsTArray<int>* mDeviceIndexes;
305   static int mDefaultDevice; // -1 == not set
306   static nsTArray<nsCString>* mDeviceNames;
307   static cubeb_device_collection *mDevices;
308   static bool mAnyInUse;
309   static StaticMutex sMutex;
310 };
311 
312 class AudioInputWebRTC final : public AudioInput
313 {
314 public:
AudioInputWebRTC(webrtc::VoiceEngine * aVoiceEngine)315   explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
316 
GetNumOfRecordingDevices(int & aDevices)317   int GetNumOfRecordingDevices(int& aDevices)
318   {
319     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
320     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
321     if (!ptrVoEHw)  {
322       return 1;
323     }
324     return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
325   }
326 
GetRecordingDeviceName(int aIndex,char aStrNameUTF8[128],char aStrGuidUTF8[128])327   int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
328                              char aStrGuidUTF8[128])
329   {
330     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
331     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
332     if (!ptrVoEHw)  {
333       return 1;
334     }
335     return ptrVoEHw->GetRecordingDeviceName(aIndex, aStrNameUTF8,
336                                             aStrGuidUTF8);
337   }
338 
GetRecordingDeviceStatus(bool & aIsAvailable)339   int GetRecordingDeviceStatus(bool& aIsAvailable)
340   {
341     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
342     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
343     if (!ptrVoEHw)  {
344       return 1;
345     }
346     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
347     return 0;
348   }
349 
StartRecording(SourceMediaStream * aStream,AudioDataListener * aListener)350   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
StopRecording(SourceMediaStream * aStream)351   void StopRecording(SourceMediaStream *aStream) {}
352 
SetRecordingDevice(int aIndex)353   int SetRecordingDevice(int aIndex)
354   {
355     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
356     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
357     if (!ptrVoEHw)  {
358       return 1;
359     }
360     return ptrVoEHw->SetRecordingDevice(aIndex);
361   }
362 
363 protected:
364   // Protected destructor, to discourage deletion outside of Release():
~AudioInputWebRTC()365   ~AudioInputWebRTC() {}
366 };
367 
368 class WebRTCAudioDataListener : public AudioDataListener
369 {
370 protected:
371   // Protected destructor, to discourage deletion outside of Release():
~WebRTCAudioDataListener()372   virtual ~WebRTCAudioDataListener() {}
373 
374 public:
WebRTCAudioDataListener(MediaEngineAudioSource * aAudioSource)375   explicit WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource)
376     : mMutex("WebRTCAudioDataListener")
377     , mAudioSource(aAudioSource)
378   {}
379 
380   // AudioDataListenerInterface methods
NotifyOutputData(MediaStreamGraph * aGraph,AudioDataValue * aBuffer,size_t aFrames,TrackRate aRate,uint32_t aChannels)381   virtual void NotifyOutputData(MediaStreamGraph* aGraph,
382                                 AudioDataValue* aBuffer, size_t aFrames,
383                                 TrackRate aRate, uint32_t aChannels) override
384   {
385     MutexAutoLock lock(mMutex);
386     if (mAudioSource) {
387       mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels);
388     }
389   }
NotifyInputData(MediaStreamGraph * aGraph,const AudioDataValue * aBuffer,size_t aFrames,TrackRate aRate,uint32_t aChannels)390   virtual void NotifyInputData(MediaStreamGraph* aGraph,
391                                const AudioDataValue* aBuffer, size_t aFrames,
392                                TrackRate aRate, uint32_t aChannels) override
393   {
394     MutexAutoLock lock(mMutex);
395     if (mAudioSource) {
396       mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels);
397     }
398   }
DeviceChanged()399   virtual void DeviceChanged() override
400   {
401     MutexAutoLock lock(mMutex);
402     if (mAudioSource) {
403       mAudioSource->DeviceChanged();
404     }
405   }
406 
Shutdown()407   void Shutdown()
408   {
409     MutexAutoLock lock(mMutex);
410     mAudioSource = nullptr;
411   }
412 
413 private:
414   Mutex mMutex;
415   RefPtr<MediaEngineAudioSource> mAudioSource;
416 };
417 
418 class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
419                                           public webrtc::VoEMediaProcess
420 {
421   typedef MediaEngineAudioSource Super;
422 public:
423   MediaEngineWebRTCMicrophoneSource(webrtc::VoiceEngine* aVoiceEnginePtr,
424                                     mozilla::AudioInput* aAudioInput,
425                                     int aIndex,
426                                     const char* name,
427                                     const char* uuid);
428 
429   void GetName(nsAString& aName) const override;
430   void GetUUID(nsACString& aUUID) const override;
431 
432   nsresult Deallocate(AllocationHandle* aHandle) override;
433   nsresult Start(SourceMediaStream* aStream,
434                  TrackID aID,
435                  const PrincipalHandle& aPrincipalHandle) override;
436   nsresult Stop(SourceMediaStream* aSource, TrackID aID) override;
437   nsresult Restart(AllocationHandle* aHandle,
438                    const dom::MediaTrackConstraints& aConstraints,
439                    const MediaEnginePrefs &aPrefs,
440                    const nsString& aDeviceId,
441                    const char** aOutBadConstraint) override;
SetDirectListeners(bool aHasDirectListeners)442   void SetDirectListeners(bool aHasDirectListeners) override {};
443 
444   void NotifyPull(MediaStreamGraph* aGraph,
445                   SourceMediaStream* aSource,
446                   TrackID aId,
447                   StreamTime aDesiredTime,
448                   const PrincipalHandle& aPrincipalHandle) override;
449 
450   // AudioDataListenerInterface methods
451   void NotifyOutputData(MediaStreamGraph* aGraph,
452                         AudioDataValue* aBuffer, size_t aFrames,
453                         TrackRate aRate, uint32_t aChannels) override;
454   void NotifyInputData(MediaStreamGraph* aGraph,
455                        const AudioDataValue* aBuffer, size_t aFrames,
456                        TrackRate aRate, uint32_t aChannels) override;
457 
458   void DeviceChanged() override;
459 
IsFake()460   bool IsFake() override {
461     return false;
462   }
463 
GetMediaSource()464   dom::MediaSourceEnum GetMediaSource() const override {
465     return dom::MediaSourceEnum::Microphone;
466   }
467 
TakePhoto(MediaEnginePhotoCallback * aCallback)468   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
469   {
470     return NS_ERROR_NOT_IMPLEMENTED;
471   }
472 
473   uint32_t GetBestFitnessDistance(
474       const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
475       const nsString& aDeviceId) const override;
476 
477   // VoEMediaProcess.
478   void Process(int channel, webrtc::ProcessingTypes type,
479                int16_t audio10ms[], int length,
480                int samplingFreq, bool isStereo) override;
481 
482   void Shutdown() override;
483 
484   NS_DECL_THREADSAFE_ISUPPORTS
485 
486 protected:
~MediaEngineWebRTCMicrophoneSource()487   ~MediaEngineWebRTCMicrophoneSource() {}
488 
489 private:
490   nsresult
491   UpdateSingleSource(const AllocationHandle* aHandle,
492                      const NormalizedConstraints& aNetConstraints,
493                      const MediaEnginePrefs& aPrefs,
494                      const nsString& aDeviceId,
495                      const char** aOutBadConstraint) override;
496 
497   void SetLastPrefs(const MediaEnginePrefs& aPrefs);
498 
499   // These allocate/configure and release the channel
500   bool AllocChannel();
501   void FreeChannel();
502   // These start/stop VoEBase and associated interfaces
503   bool InitEngine();
504   void DeInitEngine();
505 
506   // This is true when all processing is disabled, we can skip
507   // packetization, resampling and other processing passes.
PassThrough()508   bool PassThrough() {
509     return mSkipProcessing;
510   }
511   template<typename T>
512   void InsertInGraph(const T* aBuffer,
513                      size_t aFrames,
514                      uint32_t aChannels);
515 
516   void PacketizeAndProcess(MediaStreamGraph* aGraph,
517                            const AudioDataValue* aBuffer,
518                            size_t aFrames,
519                            TrackRate aRate,
520                            uint32_t aChannels);
521 
522   webrtc::VoiceEngine* mVoiceEngine;
523   RefPtr<mozilla::AudioInput> mAudioInput;
524   RefPtr<WebRTCAudioDataListener> mListener;
525 
526   // Note: shared across all microphone sources - we don't want to Terminate()
527   // the VoEBase until there are no active captures
528   static int sChannelsOpen;
529   static ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
530   static ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
531   static ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
532   static ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
533 
534   // accessed from the GraphDriver thread except for deletion
535   nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
536   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
537 
538   // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
539   // transitions of mState from kStarted to kStopped (which are combined with
540   // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
541   // threads.
542   Monitor mMonitor;
543   nsTArray<RefPtr<SourceMediaStream>> mSources;
544   nsTArray<PrincipalHandle> mPrincipalHandles; // Maps to mSources.
545 
546   int mCapIndex;
547   int mChannel;
548   MOZ_INIT_OUTSIDE_CTOR TrackID mTrackID;
549   bool mStarted;
550 
551   nsString mDeviceName;
552   nsCString mDeviceUUID;
553 
554   int32_t mSampleFrequency;
555   int32_t mPlayoutDelay;
556 
557   NullTransport *mNullTransport;
558 
559   nsTArray<int16_t> mInputBuffer;
560   // mSkipProcessing is true if none of the processing passes are enabled,
561   // because of prefs or constraints. This allows simply copying the audio into
562   // the MSG, skipping resampling and the whole webrtc.org code.
563   bool mSkipProcessing;
564 
565   // To only update microphone when needed, we keep track of previous settings.
566   MediaEnginePrefs mLastPrefs;
567 };
568 
569 class MediaEngineWebRTC : public MediaEngine
570 {
571   typedef MediaEngine Super;
572 public:
573   explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
574 
575   virtual void SetFakeDeviceChangeEvents() override;
576 
577   // Clients should ensure to clean-up sources video/audio sources
578   // before invoking Shutdown on this class.
579   void Shutdown() override;
580 
581   // Returns whether the host supports duplex audio stream.
582   bool SupportsDuplex();
583 
584   void EnumerateVideoDevices(dom::MediaSourceEnum,
585                              nsTArray<RefPtr<MediaEngineVideoSource>>*) override;
586   void EnumerateAudioDevices(dom::MediaSourceEnum,
587                              nsTArray<RefPtr<MediaEngineAudioSource>>*) override;
588 private:
~MediaEngineWebRTC()589   ~MediaEngineWebRTC() {
590     gFarendObserver = nullptr;
591   }
592 
593   nsCOMPtr<nsIThread> mThread;
594 
595   // gUM runnables can e.g. Enumerate from multiple threads
596   Mutex mMutex;
597   webrtc::VoiceEngine* mVoiceEngine;
598   webrtc::Config mConfig;
599   RefPtr<mozilla::AudioInput> mAudioInput;
600   bool mFullDuplex;
601   bool mExtendedFilter;
602   bool mDelayAgnostic;
603   bool mHasTabVideoSource;
604 
605   // Store devices we've already seen in a hashtable for quick return.
606   // Maps UUID to MediaEngineSource (one set for audio, one for video).
607   nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
608   nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
609 };
610 
611 }
612 
613 #endif /* NSMEDIAENGINEWEBRTC_H_ */
614