1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "AudioDestinationNode.h"
8 
9 #include "AlignmentUtils.h"
10 #include "AudibilityMonitor.h"
11 #include "AudioChannelService.h"
12 #include "AudioContext.h"
13 #include "AudioContext.h"
14 #include "AudioNodeEngine.h"
15 #include "AudioNodeTrack.h"
16 #include "CubebUtils.h"
17 #include "MediaTrackGraph.h"
18 #include "mozilla/StaticPrefs_dom.h"
19 #include "mozilla/dom/AudioDestinationNodeBinding.h"
20 #include "mozilla/dom/BaseAudioContextBinding.h"
21 #include "mozilla/dom/OfflineAudioCompletionEvent.h"
22 #include "mozilla/dom/Promise.h"
23 #include "mozilla/dom/ScriptSettings.h"
24 #include "mozilla/dom/WakeLock.h"
25 #include "mozilla/dom/power/PowerManagerService.h"
26 #include "mozilla/Telemetry.h"
27 #include "mozilla/TelemetryHistogramEnums.h"
28 #include "nsContentUtils.h"
29 #include "nsIInterfaceRequestorUtils.h"
30 #include "nsIScriptObjectPrincipal.h"
31 #include "nsServiceManagerUtils.h"
32 
33 extern mozilla::LazyLogModule gAudioChannelLog;
34 
35 #define AUDIO_CHANNEL_LOG(msg, ...) \
36   MOZ_LOG(gAudioChannelLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
37 
38 namespace mozilla::dom {
39 
40 namespace {
41 class OnCompleteTask final : public Runnable {
42  public:
OnCompleteTask(AudioContext * aAudioContext,AudioBuffer * aRenderedBuffer)43   OnCompleteTask(AudioContext* aAudioContext, AudioBuffer* aRenderedBuffer)
44       : Runnable("dom::OfflineDestinationNodeEngine::OnCompleteTask"),
45         mAudioContext(aAudioContext),
46         mRenderedBuffer(aRenderedBuffer) {}
47 
Run()48   NS_IMETHOD Run() override {
49     OfflineAudioCompletionEventInit param;
50     param.mRenderedBuffer = mRenderedBuffer;
51 
52     RefPtr<OfflineAudioCompletionEvent> event =
53         OfflineAudioCompletionEvent::Constructor(mAudioContext, u"complete"_ns,
54                                                  param);
55     mAudioContext->DispatchTrustedEvent(event);
56 
57     return NS_OK;
58   }
59 
60  private:
61   RefPtr<AudioContext> mAudioContext;
62   RefPtr<AudioBuffer> mRenderedBuffer;
63 };
64 }  // anonymous namespace
65 
66 class OfflineDestinationNodeEngine final : public AudioNodeEngine {
67  public:
OfflineDestinationNodeEngine(AudioDestinationNode * aNode)68   explicit OfflineDestinationNodeEngine(AudioDestinationNode* aNode)
69       : AudioNodeEngine(aNode),
70         mWriteIndex(0),
71         mNumberOfChannels(aNode->ChannelCount()),
72         mLength(aNode->Length()),
73         mSampleRate(aNode->Context()->SampleRate()),
74         mBufferAllocated(false) {}
75 
ProcessBlock(AudioNodeTrack * aTrack,GraphTime aFrom,const AudioBlock & aInput,AudioBlock * aOutput,bool * aFinished)76   void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
77                     const AudioBlock& aInput, AudioBlock* aOutput,
78                     bool* aFinished) override {
79     // Do this just for the sake of political correctness; this output
80     // will not go anywhere.
81     *aOutput = aInput;
82 
83     // The output buffer is allocated lazily, on the rendering thread, when
84     // non-null input is received.
85     if (!mBufferAllocated && !aInput.IsNull()) {
86       // These allocations might fail if content provides a huge number of
87       // channels or size, but it's OK since we'll deal with the failure
88       // gracefully.
89       mBuffer = ThreadSharedFloatArrayBufferList::Create(mNumberOfChannels,
90                                                          mLength, fallible);
91       if (mBuffer && mWriteIndex) {
92         // Zero leading for any null chunks that were skipped.
93         for (uint32_t i = 0; i < mNumberOfChannels; ++i) {
94           float* channelData = mBuffer->GetDataForWrite(i);
95           PodZero(channelData, mWriteIndex);
96         }
97       }
98 
99       mBufferAllocated = true;
100     }
101 
102     // Skip copying if there is no buffer.
103     uint32_t outputChannelCount = mBuffer ? mNumberOfChannels : 0;
104 
105     // Record our input buffer
106     MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?");
107     const uint32_t duration =
108         std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex);
109     const uint32_t inputChannelCount = aInput.ChannelCount();
110     for (uint32_t i = 0; i < outputChannelCount; ++i) {
111       float* outputData = mBuffer->GetDataForWrite(i) + mWriteIndex;
112       if (aInput.IsNull() || i >= inputChannelCount) {
113         PodZero(outputData, duration);
114       } else {
115         const float* inputBuffer =
116             static_cast<const float*>(aInput.mChannelData[i]);
117         if (duration == WEBAUDIO_BLOCK_SIZE && IS_ALIGNED16(inputBuffer)) {
118           // Use the optimized version of the copy with scale operation
119           AudioBlockCopyChannelWithScale(inputBuffer, aInput.mVolume,
120                                          outputData);
121         } else {
122           if (aInput.mVolume == 1.0f) {
123             PodCopy(outputData, inputBuffer, duration);
124           } else {
125             for (uint32_t j = 0; j < duration; ++j) {
126               outputData[j] = aInput.mVolume * inputBuffer[j];
127             }
128           }
129         }
130       }
131     }
132     mWriteIndex += duration;
133 
134     if (mWriteIndex >= mLength) {
135       NS_ASSERTION(mWriteIndex == mLength, "Overshot length");
136       // Go to finished state. When the graph's current time eventually reaches
137       // the end of the track, then the main thread will be notified and we'll
138       // shut down the AudioContext.
139       *aFinished = true;
140     }
141   }
142 
IsActive() const143   bool IsActive() const override {
144     // Keep processing to track track time, which is used for all timelines
145     // associated with the same AudioContext.
146     return true;
147   }
148 
CreateAudioBuffer(AudioContext * aContext)149   already_AddRefed<AudioBuffer> CreateAudioBuffer(AudioContext* aContext) {
150     MOZ_ASSERT(NS_IsMainThread());
151     // Create the input buffer
152     ErrorResult rv;
153     RefPtr<AudioBuffer> renderedBuffer =
154         AudioBuffer::Create(aContext->GetOwner(), mNumberOfChannels, mLength,
155                             mSampleRate, mBuffer.forget(), rv);
156     if (rv.Failed()) {
157       rv.SuppressException();
158       return nullptr;
159     }
160 
161     return renderedBuffer.forget();
162   }
163 
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const164   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
165     size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
166     if (mBuffer) {
167       amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
168     }
169     return amount;
170   }
171 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const172   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
173     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
174   }
175 
176  private:
177   // The input to the destination node is recorded in mBuffer.
178   // When this buffer fills up with mLength frames, the buffered input is sent
179   // to the main thread in order to dispatch OfflineAudioCompletionEvent.
180   RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
181   // An index representing the next offset in mBuffer to be written to.
182   uint32_t mWriteIndex;
183   uint32_t mNumberOfChannels;
184   // How many frames the OfflineAudioContext intends to produce.
185   uint32_t mLength;
186   float mSampleRate;
187   bool mBufferAllocated;
188 };
189 
190 class DestinationNodeEngine final : public AudioNodeEngine {
191  public:
DestinationNodeEngine(AudioDestinationNode * aNode)192   explicit DestinationNodeEngine(AudioDestinationNode* aNode)
193       : AudioNodeEngine(aNode),
194         mSampleRate(CubebUtils::PreferredSampleRate()),
195         mVolume(1.0f),
196         mAudibilityMonitor(
197             mSampleRate,
198             StaticPrefs::dom_media_silence_duration_for_audibility()),
199         mSuspended(false),
200         mIsAudible(false) {
201     MOZ_ASSERT(aNode);
202   }
203 
ProcessBlock(AudioNodeTrack * aTrack,GraphTime aFrom,const AudioBlock & aInput,AudioBlock * aOutput,bool * aFinished)204   void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
205                     const AudioBlock& aInput, AudioBlock* aOutput,
206                     bool* aFinished) override {
207     *aOutput = aInput;
208     aOutput->mVolume *= mVolume;
209 
210     if (mSuspended) {
211       return;
212     }
213 
214     mAudibilityMonitor.Process(aInput);
215     bool isAudible =
216         mAudibilityMonitor.RecentlyAudible() && aOutput->mVolume > 0.0;
217     if (isAudible != mIsAudible) {
218       mIsAudible = isAudible;
219       RefPtr<AudioNodeTrack> track = aTrack;
220       auto r = [track, isAudible]() -> void {
221         MOZ_ASSERT(NS_IsMainThread());
222         RefPtr<AudioNode> node = track->Engine()->NodeMainThread();
223         if (node) {
224           RefPtr<AudioDestinationNode> destinationNode =
225               static_cast<AudioDestinationNode*>(node.get());
226           destinationNode->NotifyDataAudibleStateChanged(isAudible);
227         }
228       };
229 
230       aTrack->Graph()->DispatchToMainThreadStableState(NS_NewRunnableFunction(
231           "dom::WebAudioAudibleStateChangedRunnable", r));
232     }
233   }
234 
IsActive() const235   bool IsActive() const override {
236     // Keep processing to track track time, which is used for all timelines
237     // associated with the same AudioContext.  If there are no other engines
238     // for the AudioContext, then this could return false to suspend the
239     // track, but the track is blocked anyway through
240     // AudioDestinationNode::SetIsOnlyNodeForContext().
241     return true;
242   }
243 
SetDoubleParameter(uint32_t aIndex,double aParam)244   void SetDoubleParameter(uint32_t aIndex, double aParam) override {
245     if (aIndex == VOLUME) {
246       mVolume = static_cast<float>(aParam);
247     }
248   }
249 
SetInt32Parameter(uint32_t aIndex,int32_t aParam)250   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
251     if (aIndex == SUSPENDED) {
252       mSuspended = !!aParam;
253       if (mSuspended) {
254         mIsAudible = false;
255       }
256     }
257   }
258 
259   enum Parameters {
260     VOLUME,
261     SUSPENDED,
262   };
263 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const264   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
265     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
266   }
267 
268  private:
269   int mSampleRate;
270   float mVolume;
271   AudibilityMonitor mAudibilityMonitor;
272   bool mSuspended;
273   bool mIsAudible;
274 };
275 
276 NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationNode, AudioNode,
277                                    mAudioChannelAgent, mOfflineRenderingPromise)
278 
279 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioDestinationNode)
280   NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
281 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
282 
283 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
284 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
285 
286 const AudioNodeTrack::Flags kTrackFlags =
287     AudioNodeTrack::NEED_MAIN_THREAD_CURRENT_TIME |
288     AudioNodeTrack::NEED_MAIN_THREAD_ENDED | AudioNodeTrack::EXTERNAL_OUTPUT;
289 
AudioDestinationNode(AudioContext * aContext,bool aIsOffline,uint32_t aNumberOfChannels,uint32_t aLength)290 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
291                                            bool aIsOffline,
292                                            uint32_t aNumberOfChannels,
293                                            uint32_t aLength)
294     : AudioNode(aContext, aNumberOfChannels, ChannelCountMode::Explicit,
295                 ChannelInterpretation::Speakers),
296       mFramesToProduce(aLength),
297       mIsOffline(aIsOffline),
298       mCreatedTime(TimeStamp::Now()) {
299   if (aIsOffline) {
300     // The track is created on demand to avoid creating a graph thread that
301     // may not be used.
302     return;
303   }
304 
305   // GetParentObject can return nullptr here. This will end up creating another
306   // MediaTrackGraph
307   MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
308       MediaTrackGraph::AUDIO_THREAD_DRIVER, aContext->GetParentObject(),
309       aContext->SampleRate(), MediaTrackGraph::DEFAULT_OUTPUT_DEVICE);
310   AudioNodeEngine* engine = new DestinationNodeEngine(this);
311 
312   mTrack = AudioNodeTrack::Create(aContext, engine, kTrackFlags, graph);
313   mTrack->AddMainThreadListener(this);
314   // null key is fine: only one output per mTrack
315   mTrack->AddAudioOutput(nullptr);
316 }
317 
Init()318 void AudioDestinationNode::Init() {
319   // The reason we don't do that in ctor is because we have to keep AudioContext
320   // holding a strong reference to the destination node first. If we don't do
321   // that, initializing the agent would cause an unexpected destroy of the
322   // destination node when destroying the local weak reference inside
323   // `InitWithWeakCallback()`.
324   if (!mIsOffline) {
325     CreateAndStartAudioChannelAgent();
326   }
327 }
328 
Close()329 void AudioDestinationNode::Close() {
330   DestroyAudioChannelAgentIfExists();
331   ReleaseAudioWakeLockIfExists();
332 }
333 
CreateAndStartAudioChannelAgent()334 void AudioDestinationNode::CreateAndStartAudioChannelAgent() {
335   MOZ_ASSERT(!mIsOffline);
336   MOZ_ASSERT(!mAudioChannelAgent);
337 
338   AudioChannelAgent* agent = new AudioChannelAgent();
339   nsresult rv = agent->InitWithWeakCallback(GetOwner(), this);
340   if (NS_WARN_IF(NS_FAILED(rv))) {
341     AUDIO_CHANNEL_LOG("Failed to init audio channel agent");
342     return;
343   }
344 
345   AudibleState state =
346       IsAudible() ? AudibleState::eAudible : AudibleState::eNotAudible;
347   rv = agent->NotifyStartedPlaying(state);
348   if (NS_WARN_IF(NS_FAILED(rv))) {
349     AUDIO_CHANNEL_LOG("Failed to start audio channel agent");
350     return;
351   }
352 
353   mAudioChannelAgent = agent;
354   mAudioChannelAgent->PullInitialUpdate();
355 }
356 
~AudioDestinationNode()357 AudioDestinationNode::~AudioDestinationNode() {
358   MOZ_ASSERT(!mAudioChannelAgent);
359   MOZ_ASSERT(!mWakeLock);
360   MOZ_ASSERT(!mCaptureTrackPort);
361 }
362 
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const363 size_t AudioDestinationNode::SizeOfExcludingThis(
364     MallocSizeOf aMallocSizeOf) const {
365   size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
366   // Might be useful in the future:
367   // - mAudioChannelAgent
368   return amount;
369 }
370 
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const371 size_t AudioDestinationNode::SizeOfIncludingThis(
372     MallocSizeOf aMallocSizeOf) const {
373   return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
374 }
375 
Track()376 AudioNodeTrack* AudioDestinationNode::Track() {
377   if (mTrack) {
378     return mTrack;
379   }
380 
381   AudioContext* context = Context();
382   if (!context) {  // This node has been unlinked.
383     return nullptr;
384   }
385 
386   MOZ_ASSERT(mIsOffline, "Realtime tracks are created in constructor");
387 
388   // GetParentObject can return nullptr here when the document has been
389   // unlinked.
390   MediaTrackGraph* graph = MediaTrackGraph::CreateNonRealtimeInstance(
391       context->SampleRate(), context->GetParentObject());
392   AudioNodeEngine* engine = new OfflineDestinationNodeEngine(this);
393 
394   mTrack = AudioNodeTrack::Create(context, engine, kTrackFlags, graph);
395   mTrack->AddMainThreadListener(this);
396 
397   return mTrack;
398 }
399 
DestroyAudioChannelAgentIfExists()400 void AudioDestinationNode::DestroyAudioChannelAgentIfExists() {
401   if (mAudioChannelAgent) {
402     mAudioChannelAgent->NotifyStoppedPlaying();
403     mAudioChannelAgent = nullptr;
404     if (IsCapturingAudio()) {
405       StopAudioCapturingTrack();
406     }
407   }
408 }
409 
DestroyMediaTrack()410 void AudioDestinationNode::DestroyMediaTrack() {
411   Close();
412   if (!mTrack) {
413     return;
414   }
415 
416   Context()->ShutdownWorklet();
417 
418   mTrack->RemoveMainThreadListener(this);
419   AudioNode::DestroyMediaTrack();
420 }
421 
NotifyMainThreadTrackEnded()422 void AudioDestinationNode::NotifyMainThreadTrackEnded() {
423   MOZ_ASSERT(NS_IsMainThread());
424   MOZ_ASSERT(mTrack->IsEnded());
425 
426   if (mIsOffline && GetAbstractMainThread()) {
427     GetAbstractMainThread()->Dispatch(NewRunnableMethod(
428         "dom::AudioDestinationNode::FireOfflineCompletionEvent", this,
429         &AudioDestinationNode::FireOfflineCompletionEvent));
430   }
431 }
432 
FireOfflineCompletionEvent()433 void AudioDestinationNode::FireOfflineCompletionEvent() {
434   AudioContext* context = Context();
435   context->OfflineClose();
436 
437   OfflineDestinationNodeEngine* engine =
438       static_cast<OfflineDestinationNodeEngine*>(Track()->Engine());
439   RefPtr<AudioBuffer> renderedBuffer = engine->CreateAudioBuffer(context);
440   if (!renderedBuffer) {
441     return;
442   }
443   ResolvePromise(renderedBuffer);
444 
445   context->Dispatch(do_AddRef(new OnCompleteTask(context, renderedBuffer)));
446 
447   context->OnStateChanged(nullptr, AudioContextState::Closed);
448 
449   mOfflineRenderingRef.Drop(this);
450 }
451 
ResolvePromise(AudioBuffer * aRenderedBuffer)452 void AudioDestinationNode::ResolvePromise(AudioBuffer* aRenderedBuffer) {
453   MOZ_ASSERT(NS_IsMainThread());
454   MOZ_ASSERT(mIsOffline);
455   mOfflineRenderingPromise->MaybeResolve(aRenderedBuffer);
456 }
457 
MaxChannelCount() const458 uint32_t AudioDestinationNode::MaxChannelCount() const {
459   return Context()->MaxChannelCount();
460 }
461 
SetChannelCount(uint32_t aChannelCount,ErrorResult & aRv)462 void AudioDestinationNode::SetChannelCount(uint32_t aChannelCount,
463                                            ErrorResult& aRv) {
464   if (aChannelCount > MaxChannelCount()) {
465     aRv.ThrowIndexSizeError(
466         nsPrintfCString("%u is larger than maxChannelCount", aChannelCount));
467     return;
468   }
469 
470   if (aChannelCount == ChannelCount()) {
471     return;
472   }
473 
474   AudioNode::SetChannelCount(aChannelCount, aRv);
475 }
476 
Mute()477 void AudioDestinationNode::Mute() {
478   MOZ_ASSERT(Context() && !Context()->IsOffline());
479   SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 0.0f);
480 }
481 
Unmute()482 void AudioDestinationNode::Unmute() {
483   MOZ_ASSERT(Context() && !Context()->IsOffline());
484   SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 1.0f);
485 }
486 
Suspend()487 void AudioDestinationNode::Suspend() {
488   SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 1);
489 }
490 
Resume()491 void AudioDestinationNode::Resume() {
492   SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 0);
493 }
494 
NotifyAudioContextStateChanged()495 void AudioDestinationNode::NotifyAudioContextStateChanged() {
496   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
497 }
498 
OfflineShutdown()499 void AudioDestinationNode::OfflineShutdown() {
500   MOZ_ASSERT(Context() && Context()->IsOffline(),
501              "Should only be called on a valid OfflineAudioContext");
502 
503   mOfflineRenderingRef.Drop(this);
504 }
505 
WrapObject(JSContext * aCx,JS::Handle<JSObject * > aGivenProto)506 JSObject* AudioDestinationNode::WrapObject(JSContext* aCx,
507                                            JS::Handle<JSObject*> aGivenProto) {
508   return AudioDestinationNode_Binding::Wrap(aCx, this, aGivenProto);
509 }
510 
StartRendering(Promise * aPromise)511 void AudioDestinationNode::StartRendering(Promise* aPromise) {
512   mOfflineRenderingPromise = aPromise;
513   mOfflineRenderingRef.Take(this);
514   Track()->Graph()->StartNonRealtimeProcessing(mFramesToProduce);
515 }
516 
517 NS_IMETHODIMP
WindowVolumeChanged(float aVolume,bool aMuted)518 AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) {
519   MOZ_ASSERT(mAudioChannelAgent);
520   if (!mTrack) {
521     return NS_OK;
522   }
523 
524   AUDIO_CHANNEL_LOG(
525       "AudioDestinationNode %p WindowVolumeChanged, "
526       "aVolume = %f, aMuted = %s\n",
527       this, aVolume, aMuted ? "true" : "false");
528 
529   mAudioChannelVolume = aMuted ? 0.0f : aVolume;
530   mTrack->SetAudioOutputVolume(nullptr, mAudioChannelVolume);
531   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eVolumeChanged);
532   return NS_OK;
533 }
534 
535 NS_IMETHODIMP
WindowSuspendChanged(nsSuspendedTypes aSuspend)536 AudioDestinationNode::WindowSuspendChanged(nsSuspendedTypes aSuspend) {
537   MOZ_ASSERT(mAudioChannelAgent);
538   if (!mTrack) {
539     return NS_OK;
540   }
541 
542   const bool shouldDisable = aSuspend == nsISuspendedTypes::SUSPENDED_BLOCK;
543   if (mAudioChannelDisabled == shouldDisable) {
544     return NS_OK;
545   }
546   mAudioChannelDisabled = shouldDisable;
547 
548   AUDIO_CHANNEL_LOG(
549       "AudioDestinationNode %p WindowSuspendChanged, shouldDisable = %d\n",
550       this, mAudioChannelDisabled);
551 
552   DisabledTrackMode disabledMode = mAudioChannelDisabled
553                                        ? DisabledTrackMode::SILENCE_BLACK
554                                        : DisabledTrackMode::ENABLED;
555   mTrack->SetDisabledTrackMode(disabledMode);
556   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
557   return NS_OK;
558 }
559 
560 NS_IMETHODIMP
WindowAudioCaptureChanged(bool aCapture)561 AudioDestinationNode::WindowAudioCaptureChanged(bool aCapture) {
562   MOZ_ASSERT(mAudioChannelAgent);
563   if (!mTrack) {
564     return NS_OK;
565   }
566 
567   nsCOMPtr<nsPIDOMWindowInner> ownerWindow = GetOwner();
568   if (!ownerWindow) {
569     return NS_OK;
570   }
571 
572   if (aCapture == IsCapturingAudio()) {
573     return NS_OK;
574   }
575 
576   if (aCapture) {
577     StartAudioCapturingTrack();
578   } else {
579     StopAudioCapturingTrack();
580   }
581 
582   return NS_OK;
583 }
584 
IsCapturingAudio() const585 bool AudioDestinationNode::IsCapturingAudio() const {
586   return mCaptureTrackPort != nullptr;
587 }
588 
StartAudioCapturingTrack()589 void AudioDestinationNode::StartAudioCapturingTrack() {
590   MOZ_ASSERT(!IsCapturingAudio());
591   nsCOMPtr<nsPIDOMWindowInner> window = Context()->GetParentObject();
592   uint64_t id = window->WindowID();
593   mCaptureTrackPort = mTrack->Graph()->ConnectToCaptureTrack(id, mTrack);
594 }
595 
StopAudioCapturingTrack()596 void AudioDestinationNode::StopAudioCapturingTrack() {
597   MOZ_ASSERT(IsCapturingAudio());
598   mCaptureTrackPort->Destroy();
599   mCaptureTrackPort = nullptr;
600 }
601 
CreateAudioWakeLockIfNeeded()602 void AudioDestinationNode::CreateAudioWakeLockIfNeeded() {
603   if (!mWakeLock && IsAudible()) {
604     RefPtr<power::PowerManagerService> pmService =
605         power::PowerManagerService::GetInstance();
606     NS_ENSURE_TRUE_VOID(pmService);
607 
608     ErrorResult rv;
609     mWakeLock = pmService->NewWakeLock(u"audio-playing"_ns, GetOwner(), rv);
610   }
611 }
612 
ReleaseAudioWakeLockIfExists()613 void AudioDestinationNode::ReleaseAudioWakeLockIfExists() {
614   if (mWakeLock) {
615     IgnoredErrorResult rv;
616     mWakeLock->Unlock(rv);
617     mWakeLock = nullptr;
618   }
619 }
620 
NotifyDataAudibleStateChanged(bool aAudible)621 void AudioDestinationNode::NotifyDataAudibleStateChanged(bool aAudible) {
622   MOZ_ASSERT(!mIsOffline);
623 
624   AUDIO_CHANNEL_LOG(
625       "AudioDestinationNode %p NotifyDataAudibleStateChanged, audible=%d", this,
626       aAudible);
627 
628   if (mDurationBeforeFirstTimeAudible.IsZero()) {
629     MOZ_ASSERT(aAudible);
630     mDurationBeforeFirstTimeAudible = TimeStamp::Now() - mCreatedTime;
631     Telemetry::Accumulate(Telemetry::WEB_AUDIO_BECOMES_AUDIBLE_TIME,
632                           mDurationBeforeFirstTimeAudible.ToSeconds());
633   }
634 
635   mIsDataAudible = aAudible;
636   UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eDataAudibleChanged);
637 }
638 
UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons aReason)639 void AudioDestinationNode::UpdateFinalAudibleStateIfNeeded(
640     AudibleChangedReasons aReason) {
641   // The audio context has been closed and we've destroyed the agent.
642   if (!mAudioChannelAgent) {
643     return;
644   }
645   const bool newAudibleState = IsAudible();
646   if (mFinalAudibleState == newAudibleState) {
647     return;
648   }
649   AUDIO_CHANNEL_LOG("AudioDestinationNode %p Final audible state=%d", this,
650                     newAudibleState);
651   mFinalAudibleState = newAudibleState;
652   AudibleState state =
653       mFinalAudibleState ? AudibleState::eAudible : AudibleState::eNotAudible;
654   mAudioChannelAgent->NotifyStartedAudible(state, aReason);
655   if (mFinalAudibleState) {
656     CreateAudioWakeLockIfNeeded();
657   } else {
658     ReleaseAudioWakeLockIfExists();
659   }
660 }
661 
IsAudible() const662 bool AudioDestinationNode::IsAudible() const {
663   // The desitionation node will be regarded as audible if all following
664   // conditions are true.
665   // (1) data audible state : both audio input and output are audible
666   // (2) window audible state : the tab isn't muted by tab sound indicator
667   // (3) audio context state : audio context should be running
668   return Context()->State() == AudioContextState::Running && mIsDataAudible &&
669          mAudioChannelVolume != 0.0;
670 }
671 
672 }  // namespace mozilla::dom
673