1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "AudioDestinationNode.h"
8 
9 #include "AlignmentUtils.h"
10 #include "AudibilityMonitor.h"
11 #include "AudioChannelService.h"
12 #include "AudioContext.h"
13 #include "AudioNodeEngine.h"
14 #include "AudioNodeTrack.h"
15 #include "CubebUtils.h"
16 #include "MediaTrackGraph.h"
17 #include "mozilla/StaticPrefs_dom.h"
18 #include "mozilla/dom/AudioDestinationNodeBinding.h"
19 #include "mozilla/dom/BaseAudioContextBinding.h"
20 #include "mozilla/dom/OfflineAudioCompletionEvent.h"
21 #include "mozilla/dom/Promise.h"
22 #include "mozilla/dom/ScriptSettings.h"
23 #include "mozilla/dom/WakeLock.h"
24 #include "mozilla/dom/power/PowerManagerService.h"
25 #include "mozilla/Telemetry.h"
26 #include "mozilla/TelemetryHistogramEnums.h"
27 #include "nsContentUtils.h"
28 #include "nsIInterfaceRequestorUtils.h"
29 #include "nsIScriptObjectPrincipal.h"
30 #include "nsServiceManagerUtils.h"
31 
32 extern mozilla::LazyLogModule gAudioChannelLog;
33 
34 #define AUDIO_CHANNEL_LOG(msg, ...) \
35   MOZ_LOG(gAudioChannelLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
36 
37 namespace mozilla::dom {
38 
39 namespace {
40 class OnCompleteTask final : public Runnable {
41  public:
OnCompleteTask(AudioContext * aAudioContext,AudioBuffer * aRenderedBuffer)42   OnCompleteTask(AudioContext* aAudioContext, AudioBuffer* aRenderedBuffer)
43       : Runnable("dom::OfflineDestinationNodeEngine::OnCompleteTask"),
44         mAudioContext(aAudioContext),
45         mRenderedBuffer(aRenderedBuffer) {}
46 
Run()47   NS_IMETHOD Run() override {
48     OfflineAudioCompletionEventInit param;
49     param.mRenderedBuffer = mRenderedBuffer;
50 
51     RefPtr<OfflineAudioCompletionEvent> event =
52         OfflineAudioCompletionEvent::Constructor(mAudioContext, u"complete"_ns,
53                                                  param);
54     mAudioContext->DispatchTrustedEvent(event);
55 
56     return NS_OK;
57   }
58 
59  private:
60   RefPtr<AudioContext> mAudioContext;
61   RefPtr<AudioBuffer> mRenderedBuffer;
62 };
63 }  // anonymous namespace
64 
65 class OfflineDestinationNodeEngine final : public AudioNodeEngine {
66  public:
OfflineDestinationNodeEngine(AudioDestinationNode * aNode)67   explicit OfflineDestinationNodeEngine(AudioDestinationNode* aNode)
68       : AudioNodeEngine(aNode),
69         mWriteIndex(0),
70         mNumberOfChannels(aNode->ChannelCount()),
71         mLength(aNode->Length()),
72         mSampleRate(aNode->Context()->SampleRate()),
73         mBufferAllocated(false) {}
74 
ProcessBlock(AudioNodeTrack * aTrack,GraphTime aFrom,const AudioBlock & aInput,AudioBlock * aOutput,bool * aFinished)75   void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
76                     const AudioBlock& aInput, AudioBlock* aOutput,
77                     bool* aFinished) override {
78     // Do this just for the sake of political correctness; this output
79     // will not go anywhere.
80     *aOutput = aInput;
81 
82     // The output buffer is allocated lazily, on the rendering thread, when
83     // non-null input is received.
84     if (!mBufferAllocated && !aInput.IsNull()) {
85       // These allocations might fail if content provides a huge number of
86       // channels or size, but it's OK since we'll deal with the failure
87       // gracefully.
88       mBuffer = ThreadSharedFloatArrayBufferList::Create(mNumberOfChannels,
89                                                          mLength, fallible);
90       if (mBuffer && mWriteIndex) {
91         // Zero leading for any null chunks that were skipped.
92         for (uint32_t i = 0; i < mNumberOfChannels; ++i) {
93           float* channelData = mBuffer->GetDataForWrite(i);
94           PodZero(channelData, mWriteIndex);
95         }
96       }
97 
98       mBufferAllocated = true;
99     }
100 
101     // Skip copying if there is no buffer.
102     uint32_t outputChannelCount = mBuffer ? mNumberOfChannels : 0;
103 
104     // Record our input buffer
105     MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?");
106     const uint32_t duration =
107         std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex);
108     const uint32_t inputChannelCount = aInput.ChannelCount();
109     for (uint32_t i = 0; i < outputChannelCount; ++i) {
110       float* outputData = mBuffer->GetDataForWrite(i) + mWriteIndex;
111       if (aInput.IsNull() || i >= inputChannelCount) {
112         PodZero(outputData, duration);
113       } else {
114         const float* inputBuffer =
115             static_cast<const float*>(aInput.mChannelData[i]);
116         if (duration == WEBAUDIO_BLOCK_SIZE && IS_ALIGNED16(inputBuffer)) {
117           // Use the optimized version of the copy with scale operation
118           AudioBlockCopyChannelWithScale(inputBuffer, aInput.mVolume,
119                                          outputData);
120         } else {
121           if (aInput.mVolume == 1.0f) {
122             PodCopy(outputData, inputBuffer, duration);
123           } else {
124             for (uint32_t j = 0; j < duration; ++j) {
125               outputData[j] = aInput.mVolume * inputBuffer[j];
126             }
127           }
128         }
129       }
130     }
131     mWriteIndex += duration;
132 
133     if (mWriteIndex >= mLength) {
134       NS_ASSERTION(mWriteIndex == mLength, "Overshot length");
135       // Go to finished state. When the graph's current time eventually reaches
136       // the end of the track, then the main thread will be notified and we'll
137       // shut down the AudioContext.
138       *aFinished = true;
139     }
140   }
141 
IsActive() const142   bool IsActive() const override {
143     // Keep processing to track track time, which is used for all timelines
144     // associated with the same AudioContext.
145     return true;
146   }
147 
CreateAudioBuffer(AudioContext * aContext)148   already_AddRefed<AudioBuffer> CreateAudioBuffer(AudioContext* aContext) {
149     MOZ_ASSERT(NS_IsMainThread());
150     // Create the input buffer
151     ErrorResult rv;
152     RefPtr<AudioBuffer> renderedBuffer =
153         AudioBuffer::Create(aContext->GetOwner(), mNumberOfChannels, mLength,
154                             mSampleRate, mBuffer.forget(), rv);
155     if (rv.Failed()) {
156       rv.SuppressException();
157       return nullptr;
158     }
159 
160     return renderedBuffer.forget();
161   }
162 
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const163   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
164     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
165     if (mBuffer) {
166       amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
167     }
168     return amount;
169   }
170 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const171   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
172     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
173   }
174 
175  private:
176   // The input to the destination node is recorded in mBuffer.
177   // When this buffer fills up with mLength frames, the buffered input is sent
178   // to the main thread in order to dispatch OfflineAudioCompletionEvent.
179   RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
180   // An index representing the next offset in mBuffer to be written to.
181   uint32_t mWriteIndex;
182   uint32_t mNumberOfChannels;
183   // How many frames the OfflineAudioContext intends to produce.
184   uint32_t mLength;
185   float mSampleRate;
186   bool mBufferAllocated;
187 };
188 
189 class DestinationNodeEngine final : public AudioNodeEngine {
190  public:
DestinationNodeEngine(AudioDestinationNode * aNode)191   explicit DestinationNodeEngine(AudioDestinationNode* aNode)
192       : AudioNodeEngine(aNode),
193         mSampleRate(CubebUtils::PreferredSampleRate()),
194         mVolume(1.0f),
195         mAudibilityMonitor(
196             mSampleRate,
197             StaticPrefs::dom_media_silence_duration_for_audibility()),
198         mSuspended(false),
199         mIsAudible(false) {
200     MOZ_ASSERT(aNode);
201   }
202 
ProcessBlock(AudioNodeTrack * aTrack,GraphTime aFrom,const AudioBlock & aInput,AudioBlock * aOutput,bool * aFinished)203   void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
204                     const AudioBlock& aInput, AudioBlock* aOutput,
205                     bool* aFinished) override {
206     *aOutput = aInput;
207     aOutput->mVolume *= mVolume;
208 
209     if (mSuspended) {
210       return;
211     }
212 
213     mAudibilityMonitor.Process(aInput);
214     bool isAudible =
215         mAudibilityMonitor.RecentlyAudible() && aOutput->mVolume > 0.0;
216     if (isAudible != mIsAudible) {
217       mIsAudible = isAudible;
218       RefPtr<AudioNodeTrack> track = aTrack;
219       auto r = [track, isAudible]() -> void {
220         MOZ_ASSERT(NS_IsMainThread());
221         RefPtr<AudioNode> node = track->Engine()->NodeMainThread();
222         if (node) {
223           RefPtr<AudioDestinationNode> destinationNode =
224               static_cast<AudioDestinationNode*>(node.get());
225           destinationNode->NotifyDataAudibleStateChanged(isAudible);
226         }
227       };
228 
229       aTrack->Graph()->DispatchToMainThreadStableState(NS_NewRunnableFunction(
230           "dom::WebAudioAudibleStateChangedRunnable", r));
231     }
232   }
233 
IsActive() const234   bool IsActive() const override {
235     // Keep processing to track track time, which is used for all timelines
236     // associated with the same AudioContext.  If there are no other engines
237     // for the AudioContext, then this could return false to suspend the
238     // track, but the track is blocked anyway through
239     // AudioDestinationNode::SetIsOnlyNodeForContext().
240     return true;
241   }
242 
SetDoubleParameter(uint32_t aIndex,double aParam)243   void SetDoubleParameter(uint32_t aIndex, double aParam) override {
244     if (aIndex == VOLUME) {
245       mVolume = static_cast<float>(aParam);
246     }
247   }
248 
SetInt32Parameter(uint32_t aIndex,int32_t aParam)249   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
250     if (aIndex == SUSPENDED) {
251       mSuspended = !!aParam;
252       if (mSuspended) {
253         mIsAudible = false;
254       }
255     }
256   }
257 
258   enum Parameters {
259     VOLUME,
260     SUSPENDED,
261   };
262 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const263   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
264     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
265   }
266 
267  private:
268   int mSampleRate;
269   float mVolume;
270   AudibilityMonitor mAudibilityMonitor;
271   bool mSuspended;
272   bool mIsAudible;
273 };
274 
275 NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationNode, AudioNode,
276                                    mAudioChannelAgent, mOfflineRenderingPromise)
277 
278 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioDestinationNode)
279   NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
280 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
281 
282 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
283 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
284 
285 const AudioNodeTrack::Flags kTrackFlags =
286     AudioNodeTrack::NEED_MAIN_THREAD_CURRENT_TIME |
287     AudioNodeTrack::NEED_MAIN_THREAD_ENDED | AudioNodeTrack::EXTERNAL_OUTPUT;
288 
AudioDestinationNode(AudioContext * aContext,bool aIsOffline,uint32_t aNumberOfChannels,uint32_t aLength)289 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
290                                            bool aIsOffline,
291                                            uint32_t aNumberOfChannels,
292                                            uint32_t aLength)
293     : AudioNode(aContext, aNumberOfChannels, ChannelCountMode::Explicit,
294                 ChannelInterpretation::Speakers),
295       mFramesToProduce(aLength),
296       mIsOffline(aIsOffline),
297       mCreatedTime(TimeStamp::Now()) {
298   if (aIsOffline) {
299     // The track is created on demand to avoid creating a graph thread that
300     // may not be used.
301     return;
302   }
303 
304   // GetParentObject can return nullptr here. This will end up creating another
305   // MediaTrackGraph
306   MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
307       MediaTrackGraph::AUDIO_THREAD_DRIVER, aContext->GetParentObject(),
308       aContext->SampleRate(), MediaTrackGraph::DEFAULT_OUTPUT_DEVICE);
309   AudioNodeEngine* engine = new DestinationNodeEngine(this);
310 
311   mTrack = AudioNodeTrack::Create(aContext, engine, kTrackFlags, graph);
312   mTrack->AddMainThreadListener(this);
313   // null key is fine: only one output per mTrack
314   mTrack->AddAudioOutput(nullptr);
315 }
316 
Init()317 void AudioDestinationNode::Init() {
318   // The reason we don't do that in ctor is because we have to keep AudioContext
319   // holding a strong reference to the destination node first. If we don't do
320   // that, initializing the agent would cause an unexpected destroy of the
321   // destination node when destroying the local weak reference inside
322   // `InitWithWeakCallback()`.
323   if (!mIsOffline) {
324     CreateAndStartAudioChannelAgent();
325   }
326 }
327 
Close()328 void AudioDestinationNode::Close() {
329   DestroyAudioChannelAgentIfExists();
330   ReleaseAudioWakeLockIfExists();
331 }
332 
CreateAndStartAudioChannelAgent()333 void AudioDestinationNode::CreateAndStartAudioChannelAgent() {
334   MOZ_ASSERT(!mIsOffline);
335   MOZ_ASSERT(!mAudioChannelAgent);
336 
337   AudioChannelAgent* agent = new AudioChannelAgent();
338   nsresult rv = agent->InitWithWeakCallback(GetOwner(), this);
339   if (NS_WARN_IF(NS_FAILED(rv))) {
340     AUDIO_CHANNEL_LOG("Failed to init audio channel agent");
341     return;
342   }
343 
344   AudibleState state =
345       IsAudible() ? AudibleState::eAudible : AudibleState::eNotAudible;
346   rv = agent->NotifyStartedPlaying(state);
347   if (NS_WARN_IF(NS_FAILED(rv))) {
348     AUDIO_CHANNEL_LOG("Failed to start audio channel agent");
349     return;
350   }
351 
352   mAudioChannelAgent = agent;
353   mAudioChannelAgent->PullInitialUpdate();
354 }
355 
~AudioDestinationNode()356 AudioDestinationNode::~AudioDestinationNode() {
357   MOZ_ASSERT(!mAudioChannelAgent);
358   MOZ_ASSERT(!mWakeLock);
359   MOZ_ASSERT(!mCaptureTrackPort);
360 }
361 
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const362 size_t AudioDestinationNode::SizeOfExcludingThis(
363     MallocSizeOf aMallocSizeOf) const {
364   size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
365   // Might be useful in the future:
366   // - mAudioChannelAgent
367   return amount;
368 }
369 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const370 size_t AudioDestinationNode::SizeOfIncludingThis(
371     MallocSizeOf aMallocSizeOf) const {
372   return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
373 }
374 
Track()375 AudioNodeTrack* AudioDestinationNode::Track() {
376   if (mTrack) {
377     return mTrack;
378   }
379 
380   AudioContext* context = Context();
381   if (!context) {  // This node has been unlinked.
382     return nullptr;
383   }
384 
385   MOZ_ASSERT(mIsOffline, "Realtime tracks are created in constructor");
386 
387   // GetParentObject can return nullptr here when the document has been
388   // unlinked.
389   MediaTrackGraph* graph = MediaTrackGraph::CreateNonRealtimeInstance(
390       context->SampleRate(), context->GetParentObject());
391   AudioNodeEngine* engine = new OfflineDestinationNodeEngine(this);
392 
393   mTrack = AudioNodeTrack::Create(context, engine, kTrackFlags, graph);
394   mTrack->AddMainThreadListener(this);
395 
396   return mTrack;
397 }
398 
DestroyAudioChannelAgentIfExists()399 void AudioDestinationNode::DestroyAudioChannelAgentIfExists() {
400   if (mAudioChannelAgent) {
401     mAudioChannelAgent->NotifyStoppedPlaying();
402     mAudioChannelAgent = nullptr;
403     if (IsCapturingAudio()) {
404       StopAudioCapturingTrack();
405     }
406   }
407 }
408 
DestroyMediaTrack()409 void AudioDestinationNode::DestroyMediaTrack() {
410   Close();
411   if (!mTrack) {
412     return;
413   }
414 
415   Context()->ShutdownWorklet();
416 
417   mTrack->RemoveMainThreadListener(this);
418   AudioNode::DestroyMediaTrack();
419 }
420 
NotifyMainThreadTrackEnded()421 void AudioDestinationNode::NotifyMainThreadTrackEnded() {
422   MOZ_ASSERT(NS_IsMainThread());
423   MOZ_ASSERT(mTrack->IsEnded());
424 
425   if (mIsOffline && GetAbstractMainThread()) {
426     GetAbstractMainThread()->Dispatch(NewRunnableMethod(
427         "dom::AudioDestinationNode::FireOfflineCompletionEvent", this,
428         &AudioDestinationNode::FireOfflineCompletionEvent));
429   }
430 }
431 
FireOfflineCompletionEvent()432 void AudioDestinationNode::FireOfflineCompletionEvent() {
433   AudioContext* context = Context();
434   context->OfflineClose();
435 
436   OfflineDestinationNodeEngine* engine =
437       static_cast<OfflineDestinationNodeEngine*>(Track()->Engine());
438   RefPtr<AudioBuffer> renderedBuffer = engine->CreateAudioBuffer(context);
439   if (!renderedBuffer) {
440     return;
441   }
442   ResolvePromise(renderedBuffer);
443 
444   context->Dispatch(do_AddRef(new OnCompleteTask(context, renderedBuffer)));
445 
446   context->OnStateChanged(nullptr, AudioContextState::Closed);
447 
448   mOfflineRenderingRef.Drop(this);
449 }
450 
ResolvePromise(AudioBuffer * aRenderedBuffer)451 void AudioDestinationNode::ResolvePromise(AudioBuffer* aRenderedBuffer) {
452   MOZ_ASSERT(NS_IsMainThread());
453   MOZ_ASSERT(mIsOffline);
454   mOfflineRenderingPromise->MaybeResolve(aRenderedBuffer);
455 }
456 
MaxChannelCount() const457 uint32_t AudioDestinationNode::MaxChannelCount() const {
458   return Context()->MaxChannelCount();
459 }
460 
SetChannelCount(uint32_t aChannelCount,ErrorResult & aRv)461 void AudioDestinationNode::SetChannelCount(uint32_t aChannelCount,
462                                            ErrorResult& aRv) {
463   if (aChannelCount > MaxChannelCount()) {
464     aRv.ThrowIndexSizeError(
465         nsPrintfCString("%u is larger than maxChannelCount", aChannelCount));
466     return;
467   }
468 
469   if (aChannelCount == ChannelCount()) {
470     return;
471   }
472 
473   AudioNode::SetChannelCount(aChannelCount, aRv);
474 }
475 
Mute()476 void AudioDestinationNode::Mute() {
477   MOZ_ASSERT(Context() && !Context()->IsOffline());
478   SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 0.0f);
479 }
480 
Unmute()481 void AudioDestinationNode::Unmute() {
482   MOZ_ASSERT(Context() && !Context()->IsOffline());
483   SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 1.0f);
484 }
485 
Suspend()486 void AudioDestinationNode::Suspend() {
487   SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 1);
488 }
489 
Resume()490 void AudioDestinationNode::Resume() {
491   SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 0);
492 }
493 
NotifyAudioContextStateChanged()494 void AudioDestinationNode::NotifyAudioContextStateChanged() {
495   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
496 }
497 
OfflineShutdown()498 void AudioDestinationNode::OfflineShutdown() {
499   MOZ_ASSERT(Context() && Context()->IsOffline(),
500              "Should only be called on a valid OfflineAudioContext");
501 
502   mOfflineRenderingRef.Drop(this);
503 }
504 
WrapObject(JSContext * aCx,JS::Handle<JSObject * > aGivenProto)505 JSObject* AudioDestinationNode::WrapObject(JSContext* aCx,
506                                            JS::Handle<JSObject*> aGivenProto) {
507   return AudioDestinationNode_Binding::Wrap(aCx, this, aGivenProto);
508 }
509 
StartRendering(Promise * aPromise)510 void AudioDestinationNode::StartRendering(Promise* aPromise) {
511   mOfflineRenderingPromise = aPromise;
512   mOfflineRenderingRef.Take(this);
513   Track()->Graph()->StartNonRealtimeProcessing(mFramesToProduce);
514 }
515 
516 NS_IMETHODIMP
WindowVolumeChanged(float aVolume,bool aMuted)517 AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) {
518   MOZ_ASSERT(mAudioChannelAgent);
519   if (!mTrack) {
520     return NS_OK;
521   }
522 
523   AUDIO_CHANNEL_LOG(
524       "AudioDestinationNode %p WindowVolumeChanged, "
525       "aVolume = %f, aMuted = %s\n",
526       this, aVolume, aMuted ? "true" : "false");
527 
528   mAudioChannelVolume = aMuted ? 0.0f : aVolume;
529   mTrack->SetAudioOutputVolume(nullptr, mAudioChannelVolume);
530   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eVolumeChanged);
531   return NS_OK;
532 }
533 
534 NS_IMETHODIMP
WindowSuspendChanged(nsSuspendedTypes aSuspend)535 AudioDestinationNode::WindowSuspendChanged(nsSuspendedTypes aSuspend) {
536   MOZ_ASSERT(mAudioChannelAgent);
537   if (!mTrack) {
538     return NS_OK;
539   }
540 
541   const bool shouldDisable = aSuspend == nsISuspendedTypes::SUSPENDED_BLOCK;
542   if (mAudioChannelDisabled == shouldDisable) {
543     return NS_OK;
544   }
545   mAudioChannelDisabled = shouldDisable;
546 
547   AUDIO_CHANNEL_LOG(
548       "AudioDestinationNode %p WindowSuspendChanged, shouldDisable = %d\n",
549       this, mAudioChannelDisabled);
550 
551   DisabledTrackMode disabledMode = mAudioChannelDisabled
552                                        ? DisabledTrackMode::SILENCE_BLACK
553                                        : DisabledTrackMode::ENABLED;
554   mTrack->SetDisabledTrackMode(disabledMode);
555   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
556   return NS_OK;
557 }
558 
559 NS_IMETHODIMP
WindowAudioCaptureChanged(bool aCapture)560 AudioDestinationNode::WindowAudioCaptureChanged(bool aCapture) {
561   MOZ_ASSERT(mAudioChannelAgent);
562   if (!mTrack) {
563     return NS_OK;
564   }
565 
566   nsCOMPtr<nsPIDOMWindowInner> ownerWindow = GetOwner();
567   if (!ownerWindow) {
568     return NS_OK;
569   }
570 
571   if (aCapture == IsCapturingAudio()) {
572     return NS_OK;
573   }
574 
575   if (aCapture) {
576     StartAudioCapturingTrack();
577   } else {
578     StopAudioCapturingTrack();
579   }
580 
581   return NS_OK;
582 }
583 
IsCapturingAudio() const584 bool AudioDestinationNode::IsCapturingAudio() const {
585   return mCaptureTrackPort != nullptr;
586 }
587 
StartAudioCapturingTrack()588 void AudioDestinationNode::StartAudioCapturingTrack() {
589   MOZ_ASSERT(!IsCapturingAudio());
590   nsCOMPtr<nsPIDOMWindowInner> window = Context()->GetParentObject();
591   uint64_t id = window->WindowID();
592   mCaptureTrackPort = mTrack->Graph()->ConnectToCaptureTrack(id, mTrack);
593 }
594 
StopAudioCapturingTrack()595 void AudioDestinationNode::StopAudioCapturingTrack() {
596   MOZ_ASSERT(IsCapturingAudio());
597   mCaptureTrackPort->Destroy();
598   mCaptureTrackPort = nullptr;
599 }
600 
CreateAudioWakeLockIfNeeded()601 void AudioDestinationNode::CreateAudioWakeLockIfNeeded() {
602   if (!mWakeLock && IsAudible()) {
603     RefPtr<power::PowerManagerService> pmService =
604         power::PowerManagerService::GetInstance();
605     NS_ENSURE_TRUE_VOID(pmService);
606 
607     ErrorResult rv;
608     mWakeLock = pmService->NewWakeLock(u"audio-playing"_ns, GetOwner(), rv);
609   }
610 }
611 
ReleaseAudioWakeLockIfExists()612 void AudioDestinationNode::ReleaseAudioWakeLockIfExists() {
613   if (mWakeLock) {
614     IgnoredErrorResult rv;
615     mWakeLock->Unlock(rv);
616     mWakeLock = nullptr;
617   }
618 }
619 
NotifyDataAudibleStateChanged(bool aAudible)620 void AudioDestinationNode::NotifyDataAudibleStateChanged(bool aAudible) {
621   MOZ_ASSERT(!mIsOffline);
622 
623   AUDIO_CHANNEL_LOG(
624       "AudioDestinationNode %p NotifyDataAudibleStateChanged, audible=%d", this,
625       aAudible);
626 
627   if (mDurationBeforeFirstTimeAudible.IsZero()) {
628     MOZ_ASSERT(aAudible);
629     mDurationBeforeFirstTimeAudible = TimeStamp::Now() - mCreatedTime;
630     Telemetry::Accumulate(Telemetry::WEB_AUDIO_BECOMES_AUDIBLE_TIME,
631                           mDurationBeforeFirstTimeAudible.ToSeconds());
632   }
633 
634   mIsDataAudible = aAudible;
635   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eDataAudibleChanged);
636 }
637 
UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons aReason)638 void AudioDestinationNode::UpdateFinalAudibleStateIfNeeded(
639     AudibleChangedReasons aReason) {
640   // The audio context has been closed and we've destroyed the agent.
641   if (!mAudioChannelAgent) {
642     return;
643   }
644   const bool newAudibleState = IsAudible();
645   if (mFinalAudibleState == newAudibleState) {
646     return;
647   }
648   AUDIO_CHANNEL_LOG("AudioDestinationNode %p Final audible state=%d", this,
649                     newAudibleState);
650   mFinalAudibleState = newAudibleState;
651   AudibleState state =
652       mFinalAudibleState ? AudibleState::eAudible : AudibleState::eNotAudible;
653   mAudioChannelAgent->NotifyStartedAudible(state, aReason);
654   if (mFinalAudibleState) {
655     CreateAudioWakeLockIfNeeded();
656   } else {
657     ReleaseAudioWakeLockIfExists();
658   }
659 }
660 
IsAudible() const661 bool AudioDestinationNode::IsAudible() const {
662   // The desitionation node will be regarded as audible if all following
663   // conditions are true.
664   // (1) data audible state : both audio input and output are audible
665   // (2) window audible state : the tab isn't muted by tab sound indicator
666   // (3) audio context state : audio context should be running
667   return Context()->State() == AudioContextState::Running && mIsDataAudible &&
668          mAudioChannelVolume != 0.0;
669 }
670 
671 }  // namespace mozilla::dom
672