1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "AudioContext.h"
8 
9 #include "blink/PeriodicWave.h"
10 
11 #include "mozilla/ErrorResult.h"
12 #include "mozilla/NotNull.h"
13 #include "mozilla/OwningNonNull.h"
14 #include "mozilla/RefPtr.h"
15 #include "mozilla/Preferences.h"
16 #include "mozilla/StaticPrefs_media.h"
17 
18 #include "mozilla/dom/AnalyserNode.h"
19 #include "mozilla/dom/AnalyserNodeBinding.h"
20 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
21 #include "mozilla/dom/AudioContextBinding.h"
22 #include "mozilla/dom/BaseAudioContextBinding.h"
23 #include "mozilla/dom/BiquadFilterNodeBinding.h"
24 #include "mozilla/dom/BrowsingContext.h"
25 #include "mozilla/dom/CanonicalBrowsingContext.h"
26 #include "mozilla/dom/ChannelMergerNodeBinding.h"
27 #include "mozilla/dom/ChannelSplitterNodeBinding.h"
28 #include "mozilla/dom/ContentChild.h"
29 #include "mozilla/dom/ConvolverNodeBinding.h"
30 #include "mozilla/dom/DelayNodeBinding.h"
31 #include "mozilla/dom/DynamicsCompressorNodeBinding.h"
32 #include "mozilla/dom/GainNodeBinding.h"
33 #include "mozilla/dom/IIRFilterNodeBinding.h"
34 #include "mozilla/dom/HTMLMediaElement.h"
35 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
36 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
37 #include "mozilla/dom/MediaStreamTrackAudioSourceNodeBinding.h"
38 #include "mozilla/dom/OfflineAudioContextBinding.h"
39 #include "mozilla/dom/OscillatorNodeBinding.h"
40 #include "mozilla/dom/PannerNodeBinding.h"
41 #include "mozilla/dom/PeriodicWaveBinding.h"
42 #include "mozilla/dom/Performance.h"
43 #include "mozilla/dom/Promise.h"
44 #include "mozilla/dom/StereoPannerNodeBinding.h"
45 #include "mozilla/dom/WaveShaperNodeBinding.h"
46 #include "mozilla/dom/Worklet.h"
47 
48 #include "AudioBuffer.h"
49 #include "AudioBufferSourceNode.h"
50 #include "AudioChannelService.h"
51 #include "AudioDestinationNode.h"
52 #include "AudioListener.h"
53 #include "AudioNodeTrack.h"
54 #include "AudioStream.h"
55 #include "AudioWorkletImpl.h"
56 #include "AutoplayPolicy.h"
57 #include "BiquadFilterNode.h"
58 #include "ChannelMergerNode.h"
59 #include "ChannelSplitterNode.h"
60 #include "ConstantSourceNode.h"
61 #include "ConvolverNode.h"
62 #include "DelayNode.h"
63 #include "DynamicsCompressorNode.h"
64 #include "GainNode.h"
65 #include "IIRFilterNode.h"
66 #include "js/ArrayBuffer.h"  // JS::StealArrayBufferContents
67 #include "MediaElementAudioSourceNode.h"
68 #include "MediaStreamAudioDestinationNode.h"
69 #include "MediaStreamAudioSourceNode.h"
70 #include "MediaTrackGraph.h"
71 #include "MediaStreamTrackAudioSourceNode.h"
72 #include "nsContentUtils.h"
73 #include "nsIScriptError.h"
74 #include "nsNetCID.h"
75 #include "nsNetUtil.h"
76 #include "nsPIDOMWindow.h"
77 #include "nsPrintfCString.h"
78 #include "nsRFPService.h"
79 #include "OscillatorNode.h"
80 #include "PannerNode.h"
81 #include "PeriodicWave.h"
82 #include "ScriptProcessorNode.h"
83 #include "StereoPannerNode.h"
84 #include "WaveShaperNode.h"
85 #include "Tracing.h"
86 
87 extern mozilla::LazyLogModule gAutoplayPermissionLog;
88 
89 #define AUTOPLAY_LOG(msg, ...) \
90   MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
91 
92 namespace mozilla::dom {
93 
94 // 0 is a special value that MediaTracks use to denote they are not part of a
95 // AudioContext.
96 static dom::AudioContext::AudioContextId gAudioContextId = 1;
97 
98 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
99 
100 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
101   // The destination node and AudioContext form a cycle and so the destination
102   // track will be destroyed.  mWorklet must be shut down before the track
103   // is destroyed.  Do this before clearing mWorklet.
104   tmp->ShutdownWorklet();
105   NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
106   NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
107   NS_IMPL_CYCLE_COLLECTION_UNLINK(mWorklet)
108   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
109   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises)
110   if (tmp->mSuspendCalled || !tmp->mIsStarted) {
111     NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
112   }
113   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
114   // explicitly. mAllNodes is an array of weak pointers, ignore it here.
115   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
116 
117   // Remove weak reference on the global window as the context is not usable
118   // without mDestination.
119   tmp->DisconnectFromWindow();
120 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
121 
122 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
123                                                   DOMEventTargetHelper)
124   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
125   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
126   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mWorklet)
127   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
128   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises)
129   if (tmp->mSuspendCalled || !tmp->mIsStarted) {
130     NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
131   }
132   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
133   // explicitly. mAllNodes is an array of weak pointers, ignore it here.
134   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
135 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
136 
NS_IMPL_ADDREF_INHERITED(AudioContext,DOMEventTargetHelper)137 NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper)
138 NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper)
139 
140 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext)
141   NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter)
142 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
143 
144 static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) {
145   if (aIsOffline || aSampleRate != 0.0) {
146     return aSampleRate;
147   } else {
148     return static_cast<float>(CubebUtils::PreferredSampleRate());
149   }
150 }
151 
AudioContext(nsPIDOMWindowInner * aWindow,bool aIsOffline,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate)152 AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, bool aIsOffline,
153                            uint32_t aNumberOfChannels, uint32_t aLength,
154                            float aSampleRate)
155     : DOMEventTargetHelper(aWindow),
156       mId(gAudioContextId++),
157       mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)),
158       mAudioContextState(AudioContextState::Suspended),
159       mNumberOfChannels(aNumberOfChannels),
160       mIsOffline(aIsOffline),
161       mIsStarted(!aIsOffline),
162       mIsShutDown(false),
163       mCloseCalled(false),
164       // Realtime contexts start with suspended tracks until an
165       // AudioCallbackDriver is running.
166       mSuspendCalled(!aIsOffline),
167       mIsDisconnecting(false),
168       mWasAllowedToStart(true),
169       mSuspendedByContent(false),
170       mWasEverAllowedToStart(false),
171       mWasEverBlockedToStart(false),
172       mWouldBeAllowedToStart(true) {
173   bool mute = aWindow->AddAudioContext(this);
174 
175   // Note: AudioDestinationNode needs an AudioContext that must already be
176   // bound to the window.
177   const bool allowedToStart = AutoplayPolicy::IsAllowedToPlay(*this);
178   mDestination =
179       new AudioDestinationNode(this, aIsOffline, aNumberOfChannels, aLength);
180   mDestination->Init();
181   // If an AudioContext is not allowed to start, we would postpone its state
182   // transition from `suspended` to `running` until sites explicitly call
183   // AudioContext.resume() or AudioScheduledSourceNode.start().
184   if (!allowedToStart) {
185     MOZ_ASSERT(!mIsOffline);
186     AUTOPLAY_LOG("AudioContext %p is not allowed to start", this);
187     ReportBlocked();
188   } else if (!mIsOffline) {
189     ResumeInternal(AudioContextOperationFlags::SendStateChange);
190   }
191 
192   // The context can't be muted until it has a destination.
193   if (mute) {
194     Mute();
195   }
196 
197   UpdateAutoplayAssumptionStatus();
198 
199   FFTBlock::MainThreadInit();
200 }
201 
StartBlockedAudioContextIfAllowed()202 void AudioContext::StartBlockedAudioContextIfAllowed() {
203   MOZ_ASSERT(NS_IsMainThread());
204   MaybeUpdateAutoplayTelemetry();
205   // Only try to start AudioContext when AudioContext was not allowed to start.
206   if (mWasAllowedToStart) {
207     return;
208   }
209 
210   const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this);
211   AUTOPLAY_LOG("Trying to start AudioContext %p, IsAllowedToPlay=%d", this,
212                isAllowedToPlay);
213 
214   // Only start the AudioContext if this resume() call was initiated by content,
215   // not if it was a result of the AudioContext starting after having been
216   // blocked because of the auto-play policy.
217   if (isAllowedToPlay && !mSuspendedByContent) {
218     ResumeInternal(AudioContextOperationFlags::SendStateChange);
219   } else {
220     ReportBlocked();
221   }
222 }
223 
DisconnectFromWindow()224 void AudioContext::DisconnectFromWindow() {
225   MaybeClearPageAwakeRequest();
226   nsPIDOMWindowInner* window = GetOwner();
227   if (window) {
228     window->RemoveAudioContext(this);
229   }
230 }
231 
~AudioContext()232 AudioContext::~AudioContext() {
233   DisconnectFromWindow();
234   UnregisterWeakMemoryReporter(this);
235   MOZ_ASSERT(!mSetPageAwakeRequest, "forgot to revoke for page awake?");
236 }
237 
WrapObject(JSContext * aCx,JS::Handle<JSObject * > aGivenProto)238 JSObject* AudioContext::WrapObject(JSContext* aCx,
239                                    JS::Handle<JSObject*> aGivenProto) {
240   if (mIsOffline) {
241     return OfflineAudioContext_Binding::Wrap(aCx, this, aGivenProto);
242   } else {
243     return AudioContext_Binding::Wrap(aCx, this, aGivenProto);
244   }
245 }
246 
CheckFullyActive(nsPIDOMWindowInner * aWindow,ErrorResult & aRv)247 static bool CheckFullyActive(nsPIDOMWindowInner* aWindow, ErrorResult& aRv) {
248   if (!aWindow->IsFullyActive()) {
249     aRv.ThrowInvalidStateError("The document is not fully active.");
250     return false;
251   }
252   return true;
253 }
254 
255 /* static */
Constructor(const GlobalObject & aGlobal,const AudioContextOptions & aOptions,ErrorResult & aRv)256 already_AddRefed<AudioContext> AudioContext::Constructor(
257     const GlobalObject& aGlobal, const AudioContextOptions& aOptions,
258     ErrorResult& aRv) {
259   nsCOMPtr<nsPIDOMWindowInner> window =
260       do_QueryInterface(aGlobal.GetAsSupports());
261   if (!window) {
262     aRv.Throw(NS_ERROR_FAILURE);
263     return nullptr;
264   }
265   /**
266    * If the current settings object’s responsible document is NOT fully
267    * active, throw an InvalidStateError and abort these steps.
268    */
269   if (!CheckFullyActive(window, aRv)) {
270     return nullptr;
271   }
272 
273   if (aOptions.mSampleRate.WasPassed() &&
274       (aOptions.mSampleRate.Value() < WebAudioUtils::MinSampleRate ||
275        aOptions.mSampleRate.Value() > WebAudioUtils::MaxSampleRate)) {
276     aRv.ThrowNotSupportedError(nsPrintfCString(
277         "Sample rate %g is not in the range [%u, %u]",
278         aOptions.mSampleRate.Value(), WebAudioUtils::MinSampleRate,
279         WebAudioUtils::MaxSampleRate));
280     return nullptr;
281   }
282   float sampleRate = aOptions.mSampleRate.WasPassed()
283                          ? aOptions.mSampleRate.Value()
284                          : MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE;
285 
286   RefPtr<AudioContext> object =
287       new AudioContext(window, false, 2, 0, sampleRate);
288 
289   RegisterWeakMemoryReporter(object);
290 
291   return object.forget();
292 }
293 
294 /* static */
Constructor(const GlobalObject & aGlobal,const OfflineAudioContextOptions & aOptions,ErrorResult & aRv)295 already_AddRefed<AudioContext> AudioContext::Constructor(
296     const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
297     ErrorResult& aRv) {
298   return Constructor(aGlobal, aOptions.mNumberOfChannels, aOptions.mLength,
299                      aOptions.mSampleRate, aRv);
300 }
301 
302 /* static */
Constructor(const GlobalObject & aGlobal,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)303 already_AddRefed<AudioContext> AudioContext::Constructor(
304     const GlobalObject& aGlobal, uint32_t aNumberOfChannels, uint32_t aLength,
305     float aSampleRate, ErrorResult& aRv) {
306   nsCOMPtr<nsPIDOMWindowInner> window =
307       do_QueryInterface(aGlobal.GetAsSupports());
308   if (!window) {
309     aRv.Throw(NS_ERROR_FAILURE);
310     return nullptr;
311   }
312   /**
313    * If the current settings object’s responsible document is NOT fully
314    * active, throw an InvalidStateError and abort these steps.
315    */
316   if (!CheckFullyActive(window, aRv)) {
317     return nullptr;
318   }
319 
320   if (aNumberOfChannels == 0 ||
321       aNumberOfChannels > WebAudioUtils::MaxChannelCount) {
322     aRv.ThrowNotSupportedError(
323         nsPrintfCString("%u is not a valid channel count", aNumberOfChannels));
324     return nullptr;
325   }
326 
327   if (aLength == 0) {
328     aRv.ThrowNotSupportedError("Length must be nonzero");
329     return nullptr;
330   }
331 
332   if (aSampleRate < WebAudioUtils::MinSampleRate ||
333       aSampleRate > WebAudioUtils::MaxSampleRate) {
334     // The DOM binding protects us against infinity and NaN
335     aRv.ThrowNotSupportedError(nsPrintfCString(
336         "Sample rate %g is not in the range [%u, %u]", aSampleRate,
337         WebAudioUtils::MinSampleRate, WebAudioUtils::MaxSampleRate));
338     return nullptr;
339   }
340 
341   RefPtr<AudioContext> object =
342       new AudioContext(window, true, aNumberOfChannels, aLength, aSampleRate);
343 
344   RegisterWeakMemoryReporter(object);
345 
346   return object.forget();
347 }
348 
CreateBufferSource()349 already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource() {
350   return AudioBufferSourceNode::Create(nullptr, *this,
351                                        AudioBufferSourceOptions());
352 }
353 
CreateConstantSource()354 already_AddRefed<ConstantSourceNode> AudioContext::CreateConstantSource() {
355   RefPtr<ConstantSourceNode> constantSourceNode = new ConstantSourceNode(this);
356   return constantSourceNode.forget();
357 }
358 
CreateBuffer(uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)359 already_AddRefed<AudioBuffer> AudioContext::CreateBuffer(
360     uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate,
361     ErrorResult& aRv) {
362   if (!aNumberOfChannels) {
363     aRv.ThrowNotSupportedError("Number of channels must be nonzero");
364     return nullptr;
365   }
366 
367   return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength,
368                              aSampleRate, aRv);
369 }
370 
371 namespace {
372 
IsValidBufferSize(uint32_t aBufferSize)373 bool IsValidBufferSize(uint32_t aBufferSize) {
374   switch (aBufferSize) {
375     case 0:  // let the implementation choose the buffer size
376     case 256:
377     case 512:
378     case 1024:
379     case 2048:
380     case 4096:
381     case 8192:
382     case 16384:
383       return true;
384     default:
385       return false;
386   }
387 }
388 
389 }  // namespace
390 
391 already_AddRefed<MediaStreamAudioDestinationNode>
CreateMediaStreamDestination(ErrorResult & aRv)392 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) {
393   return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(),
394                                                  aRv);
395 }
396 
CreateScriptProcessor(uint32_t aBufferSize,uint32_t aNumberOfInputChannels,uint32_t aNumberOfOutputChannels,ErrorResult & aRv)397 already_AddRefed<ScriptProcessorNode> AudioContext::CreateScriptProcessor(
398     uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
399     uint32_t aNumberOfOutputChannels, ErrorResult& aRv) {
400   if (aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) {
401     aRv.ThrowIndexSizeError(
402         "At least one of numberOfInputChannels and numberOfOutputChannels must "
403         "be nonzero");
404     return nullptr;
405   }
406 
407   if (aNumberOfInputChannels > WebAudioUtils::MaxChannelCount) {
408     aRv.ThrowIndexSizeError(nsPrintfCString(
409         "%u is not a valid number of input channels", aNumberOfInputChannels));
410     return nullptr;
411   }
412 
413   if (aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount) {
414     aRv.ThrowIndexSizeError(
415         nsPrintfCString("%u is not a valid number of output channels",
416                         aNumberOfOutputChannels));
417     return nullptr;
418   }
419 
420   if (!IsValidBufferSize(aBufferSize)) {
421     aRv.ThrowIndexSizeError(
422         nsPrintfCString("%u is not a valid bufferSize", aBufferSize));
423     return nullptr;
424   }
425 
426   RefPtr<ScriptProcessorNode> scriptProcessor = new ScriptProcessorNode(
427       this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels);
428   return scriptProcessor.forget();
429 }
430 
CreateAnalyser(ErrorResult & aRv)431 already_AddRefed<AnalyserNode> AudioContext::CreateAnalyser(ErrorResult& aRv) {
432   return AnalyserNode::Create(*this, AnalyserOptions(), aRv);
433 }
434 
CreateStereoPanner(ErrorResult & aRv)435 already_AddRefed<StereoPannerNode> AudioContext::CreateStereoPanner(
436     ErrorResult& aRv) {
437   return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv);
438 }
439 
440 already_AddRefed<MediaElementAudioSourceNode>
CreateMediaElementSource(HTMLMediaElement & aMediaElement,ErrorResult & aRv)441 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
442                                        ErrorResult& aRv) {
443   MediaElementAudioSourceOptions options;
444   options.mMediaElement = aMediaElement;
445 
446   return MediaElementAudioSourceNode::Create(*this, options, aRv);
447 }
448 
449 already_AddRefed<MediaStreamAudioSourceNode>
CreateMediaStreamSource(DOMMediaStream & aMediaStream,ErrorResult & aRv)450 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
451                                       ErrorResult& aRv) {
452   MediaStreamAudioSourceOptions options;
453   options.mMediaStream = aMediaStream;
454 
455   return MediaStreamAudioSourceNode::Create(*this, options, aRv);
456 }
457 
458 already_AddRefed<MediaStreamTrackAudioSourceNode>
CreateMediaStreamTrackSource(MediaStreamTrack & aMediaStreamTrack,ErrorResult & aRv)459 AudioContext::CreateMediaStreamTrackSource(MediaStreamTrack& aMediaStreamTrack,
460                                            ErrorResult& aRv) {
461   MediaStreamTrackAudioSourceOptions options;
462   options.mMediaStreamTrack = aMediaStreamTrack;
463 
464   return MediaStreamTrackAudioSourceNode::Create(*this, options, aRv);
465 }
466 
CreateGain(ErrorResult & aRv)467 already_AddRefed<GainNode> AudioContext::CreateGain(ErrorResult& aRv) {
468   return GainNode::Create(*this, GainOptions(), aRv);
469 }
470 
CreateWaveShaper(ErrorResult & aRv)471 already_AddRefed<WaveShaperNode> AudioContext::CreateWaveShaper(
472     ErrorResult& aRv) {
473   return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv);
474 }
475 
CreateDelay(double aMaxDelayTime,ErrorResult & aRv)476 already_AddRefed<DelayNode> AudioContext::CreateDelay(double aMaxDelayTime,
477                                                       ErrorResult& aRv) {
478   DelayOptions options;
479   options.mMaxDelayTime = aMaxDelayTime;
480   return DelayNode::Create(*this, options, aRv);
481 }
482 
CreatePanner(ErrorResult & aRv)483 already_AddRefed<PannerNode> AudioContext::CreatePanner(ErrorResult& aRv) {
484   return PannerNode::Create(*this, PannerOptions(), aRv);
485 }
486 
CreateConvolver(ErrorResult & aRv)487 already_AddRefed<ConvolverNode> AudioContext::CreateConvolver(
488     ErrorResult& aRv) {
489   return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv);
490 }
491 
CreateChannelSplitter(uint32_t aNumberOfOutputs,ErrorResult & aRv)492 already_AddRefed<ChannelSplitterNode> AudioContext::CreateChannelSplitter(
493     uint32_t aNumberOfOutputs, ErrorResult& aRv) {
494   ChannelSplitterOptions options;
495   options.mNumberOfOutputs = aNumberOfOutputs;
496   return ChannelSplitterNode::Create(*this, options, aRv);
497 }
498 
CreateChannelMerger(uint32_t aNumberOfInputs,ErrorResult & aRv)499 already_AddRefed<ChannelMergerNode> AudioContext::CreateChannelMerger(
500     uint32_t aNumberOfInputs, ErrorResult& aRv) {
501   ChannelMergerOptions options;
502   options.mNumberOfInputs = aNumberOfInputs;
503   return ChannelMergerNode::Create(*this, options, aRv);
504 }
505 
CreateDynamicsCompressor(ErrorResult & aRv)506 already_AddRefed<DynamicsCompressorNode> AudioContext::CreateDynamicsCompressor(
507     ErrorResult& aRv) {
508   return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(),
509                                         aRv);
510 }
511 
CreateBiquadFilter(ErrorResult & aRv)512 already_AddRefed<BiquadFilterNode> AudioContext::CreateBiquadFilter(
513     ErrorResult& aRv) {
514   return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv);
515 }
516 
CreateIIRFilter(const Sequence<double> & aFeedforward,const Sequence<double> & aFeedback,mozilla::ErrorResult & aRv)517 already_AddRefed<IIRFilterNode> AudioContext::CreateIIRFilter(
518     const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
519     mozilla::ErrorResult& aRv) {
520   IIRFilterOptions options;
521   options.mFeedforward = aFeedforward;
522   options.mFeedback = aFeedback;
523   return IIRFilterNode::Create(*this, options, aRv);
524 }
525 
CreateOscillator(ErrorResult & aRv)526 already_AddRefed<OscillatorNode> AudioContext::CreateOscillator(
527     ErrorResult& aRv) {
528   return OscillatorNode::Create(*this, OscillatorOptions(), aRv);
529 }
530 
CreatePeriodicWave(const Sequence<float> & aRealData,const Sequence<float> & aImagData,const PeriodicWaveConstraints & aConstraints,ErrorResult & aRv)531 already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(
532     const Sequence<float>& aRealData, const Sequence<float>& aImagData,
533     const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) {
534   RefPtr<PeriodicWave> periodicWave = new PeriodicWave(
535       this, aRealData.Elements(), aRealData.Length(), aImagData.Elements(),
536       aImagData.Length(), aConstraints.mDisableNormalization, aRv);
537   if (aRv.Failed()) {
538     return nullptr;
539   }
540   return periodicWave.forget();
541 }
542 
Listener()543 AudioListener* AudioContext::Listener() {
544   if (!mListener) {
545     mListener = new AudioListener(this);
546   }
547   return mListener;
548 }
549 
OutputLatency()550 double AudioContext::OutputLatency() {
551   if (mIsShutDown) {
552     return 0.0;
553   }
554   // When reduceFingerprinting is enabled, return a latency figure that is
555   // fixed, but plausible for the platform.
556   double latency_s = 0.0;
557   if (StaticPrefs::privacy_resistFingerprinting()) {
558 #ifdef XP_MACOSX
559     latency_s = 512. / mSampleRate;
560 #elif MOZ_WIDGET_ANDROID
561     latency_s = 0.020;
562 #elif XP_WIN
563     latency_s = 0.04;
564 #else  // Catchall for other OSes, including Linux.
565     latency_s = 0.025;
566 #endif
567   } else {
568     return Graph()->AudioOutputLatency();
569   }
570   return latency_s;
571 }
572 
GetOutputTimestamp(AudioTimestamp & aTimeStamp)573 void AudioContext::GetOutputTimestamp(AudioTimestamp& aTimeStamp) {
574   if (!Destination()) {
575     aTimeStamp.mContextTime.Construct(0.0);
576     aTimeStamp.mPerformanceTime.Construct(0.0);
577     return;
578   }
579 
580   // The currentTime currently being output is the currentTime minus the audio
581   // output latency. The resolution of CurrentTime() is already reduced.
582   aTimeStamp.mContextTime.Construct(
583       std::max(0.0, CurrentTime() - OutputLatency()));
584   nsPIDOMWindowInner* parent = GetParentObject();
585   Performance* perf = parent ? parent->GetPerformance() : nullptr;
586   if (perf) {
587     // perf->Now() already has reduced resolution here, no need to do it again.
588     aTimeStamp.mPerformanceTime.Construct(
589         std::max(0., perf->Now() - (OutputLatency() * 1000.)));
590   } else {
591     aTimeStamp.mPerformanceTime.Construct(0.0);
592   }
593 }
594 
GetAudioWorklet(ErrorResult & aRv)595 Worklet* AudioContext::GetAudioWorklet(ErrorResult& aRv) {
596   if (!mWorklet) {
597     mWorklet = AudioWorkletImpl::CreateWorklet(this, aRv);
598   }
599 
600   return mWorklet;
601 }
IsRunning() const602 bool AudioContext::IsRunning() const {
603   return mAudioContextState == AudioContextState::Running;
604 }
605 
CreatePromise(ErrorResult & aRv)606 already_AddRefed<Promise> AudioContext::CreatePromise(ErrorResult& aRv) {
607   // Get the relevant global for the promise from the wrapper cache because
608   // DOMEventTargetHelper::GetOwner() returns null if the document is unloaded.
609   // We know the wrapper exists because it is being used for |this| from JS.
610   // See https://github.com/heycam/webidl/issues/932 for why the relevant
611   // global is used instead of the current global.
612   nsCOMPtr<nsIGlobalObject> global = xpc::NativeGlobal(GetWrapper());
613   RefPtr<Promise> promise = Promise::Create(global, aRv);
614   if (aRv.Failed()) {
615     return nullptr;
616   }
617   /**
618    * If this's relevant global object's associated Document is not fully
619    * active then return a promise rejected with "InvalidStateError"
620    * DOMException.
621    */
622   nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(global);
623   if (!window->IsFullyActive()) {
624     promise->MaybeRejectWithInvalidStateError(
625         "The document is not fully active.");
626   }
627   return promise.forget();
628 }
629 
DecodeAudioData(const ArrayBuffer & aBuffer,const Optional<OwningNonNull<DecodeSuccessCallback>> & aSuccessCallback,const Optional<OwningNonNull<DecodeErrorCallback>> & aFailureCallback,ErrorResult & aRv)630 already_AddRefed<Promise> AudioContext::DecodeAudioData(
631     const ArrayBuffer& aBuffer,
632     const Optional<OwningNonNull<DecodeSuccessCallback>>& aSuccessCallback,
633     const Optional<OwningNonNull<DecodeErrorCallback>>& aFailureCallback,
634     ErrorResult& aRv) {
635   AutoJSAPI jsapi;
636   jsapi.Init();
637   JSContext* cx = jsapi.cx();
638 
639   // CheckedUnwrapStatic is OK, since we know we have an ArrayBuffer.
640   JS::Rooted<JSObject*> obj(cx, js::CheckedUnwrapStatic(aBuffer.Obj()));
641   if (!obj) {
642     aRv.ThrowSecurityError("Can't get audio data from cross-origin object");
643     return nullptr;
644   }
645 
646   RefPtr<Promise> promise = CreatePromise(aRv);
647   if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
648     return promise.forget();
649   }
650 
651   JSAutoRealm ar(cx, obj);
652   aBuffer.ComputeState();
653 
654   if (!aBuffer.Data()) {
655     // Throw if the buffer is detached
656     aRv.ThrowTypeError("Buffer argument can't be a detached buffer");
657     return nullptr;
658   }
659 
660   // Detach the array buffer
661   size_t length = aBuffer.Length();
662 
663   uint8_t* data = static_cast<uint8_t*>(JS::StealArrayBufferContents(cx, obj));
664 
665   // Sniff the content of the media.
666   // Failed type sniffing will be handled by AsyncDecodeWebAudio.
667   nsAutoCString contentType;
668   NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType);
669 
670   RefPtr<DecodeErrorCallback> failureCallback;
671   RefPtr<DecodeSuccessCallback> successCallback;
672   if (aFailureCallback.WasPassed()) {
673     failureCallback = &aFailureCallback.Value();
674   }
675   if (aSuccessCallback.WasPassed()) {
676     successCallback = &aSuccessCallback.Value();
677   }
678   UniquePtr<WebAudioDecodeJob> job(
679       new WebAudioDecodeJob(this, promise, successCallback, failureCallback));
680   AsyncDecodeWebAudio(contentType.get(), data, length, *job);
681   // Transfer the ownership to mDecodeJobs
682   mDecodeJobs.AppendElement(std::move(job));
683 
684   return promise.forget();
685 }
686 
RemoveFromDecodeQueue(WebAudioDecodeJob * aDecodeJob)687 void AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) {
688   // Since UniquePtr doesn't provide an operator== which allows you to compare
689   // against raw pointers, we need to iterate manually.
690   for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
691     if (mDecodeJobs[i].get() == aDecodeJob) {
692       mDecodeJobs.RemoveElementAt(i);
693       break;
694     }
695   }
696 }
697 
RegisterActiveNode(AudioNode * aNode)698 void AudioContext::RegisterActiveNode(AudioNode* aNode) {
699   if (!mCloseCalled) {
700     mActiveNodes.Insert(aNode);
701   }
702 }
703 
UnregisterActiveNode(AudioNode * aNode)704 void AudioContext::UnregisterActiveNode(AudioNode* aNode) {
705   mActiveNodes.Remove(aNode);
706 }
707 
MaxChannelCount() const708 uint32_t AudioContext::MaxChannelCount() const {
709   if (StaticPrefs::privacy_resistFingerprinting()) {
710     return 2;
711   }
712   return std::min<uint32_t>(
713       WebAudioUtils::MaxChannelCount,
714       mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels());
715 }
716 
ActiveNodeCount() const717 uint32_t AudioContext::ActiveNodeCount() const { return mActiveNodes.Count(); }
718 
Graph() const719 MediaTrackGraph* AudioContext::Graph() const {
720   return Destination()->Track()->Graph();
721 }
722 
DestinationTrack() const723 AudioNodeTrack* AudioContext::DestinationTrack() const {
724   if (Destination()) {
725     return Destination()->Track();
726   }
727   return nullptr;
728 }
729 
ShutdownWorklet()730 void AudioContext::ShutdownWorklet() {
731   if (mWorklet) {
732     mWorklet->Impl()->NotifyWorkletFinished();
733   }
734 }
735 
CurrentTime()736 double AudioContext::CurrentTime() {
737   mozilla::MediaTrack* track = Destination()->Track();
738 
739   double rawTime = track->TrackTimeToSeconds(track->GetCurrentTime());
740 
741   // CurrentTime increments in intervals of 128/sampleRate. If the Timer
742   // Precision Reduction is smaller than this interval, the jittered time
743   // can always be reversed to the raw step of the interval. In that case
744   // we can simply return the un-reduced time; and avoid breaking tests.
745   // We have to convert each variable into a common magnitude, we choose ms.
746   if ((128 / mSampleRate) * 1000.0 > nsRFPService::TimerResolution() / 1000.0) {
747     return rawTime;
748   }
749 
750   MOZ_ASSERT(GetParentObject()->AsGlobal());
751   // The value of a MediaTrack's CurrentTime will always advance forward; it
752   // will never reset (even if one rewinds a video.) Therefore we can use a
753   // single Random Seed initialized at the same time as the object.
754   return nsRFPService::ReduceTimePrecisionAsSecs(
755       rawTime, GetRandomTimelineSeed(),
756       /* aIsSystemPrincipal */ false,
757       GetParentObject()->AsGlobal()->CrossOriginIsolated());
758 }
759 
GetMainThread() const760 nsISerialEventTarget* AudioContext::GetMainThread() const {
761   if (nsPIDOMWindowInner* window = GetParentObject()) {
762     return window->AsGlobal()->EventTargetFor(TaskCategory::Other);
763   }
764 
765   return GetCurrentSerialEventTarget();
766 }
767 
DisconnectFromOwner()768 void AudioContext::DisconnectFromOwner() {
769   mIsDisconnecting = true;
770   MaybeClearPageAwakeRequest();
771   OnWindowDestroy();
772   DOMEventTargetHelper::DisconnectFromOwner();
773 }
774 
OnWindowDestroy()775 void AudioContext::OnWindowDestroy() {
776   // Avoid resend the Telemetry data.
777   if (!mIsShutDown) {
778     MaybeUpdateAutoplayTelemetryWhenShutdown();
779   }
780   mIsShutDown = true;
781 
782   CloseInternal(nullptr, AudioContextOperationFlags::None);
783 
784   // We don't want to touch promises if the global is going away soon.
785   if (!mIsDisconnecting) {
786     for (auto p : mPromiseGripArray) {
787       p->MaybeRejectWithInvalidStateError("Navigated away from page");
788     }
789 
790     mPromiseGripArray.Clear();
791 
792     for (const auto& p : mPendingResumePromises) {
793       p->MaybeRejectWithInvalidStateError("Navigated away from page");
794     }
795     mPendingResumePromises.Clear();
796   }
797 
798   // On process shutdown, the MTG thread shuts down before the destination
799   // track is destroyed, but AudioWorklet needs to release objects on the MTG
800   // thread.  AudioContext::Shutdown() is invoked on processing the
801   // PBrowser::Destroy() message before xpcom shutdown begins.
802   ShutdownWorklet();
803 
804   if (mDestination) {
805     // We can destroy the MediaTrackGraph at this point.
806     // Although there may be other clients using the graph, this graph is used
807     // only for clients in the same window and this window is going away.
808     // This will also interrupt any worklet script still running on the graph
809     // thread.
810     Graph()->ForceShutDown();
811     // AudioDestinationNodes on rendering offline contexts have a
812     // self-reference which needs removal.
813     if (mIsOffline) {
814       mDestination->OfflineShutdown();
815     }
816   }
817 }
818 
819 /* This runnable allows to fire the "statechange" event */
820 class OnStateChangeTask final : public Runnable {
821  public:
OnStateChangeTask(AudioContext * aAudioContext)822   explicit OnStateChangeTask(AudioContext* aAudioContext)
823       : Runnable("dom::OnStateChangeTask"), mAudioContext(aAudioContext) {}
824 
825   NS_IMETHODIMP
Run()826   Run() override {
827     nsPIDOMWindowInner* parent = mAudioContext->GetParentObject();
828     if (!parent) {
829       return NS_ERROR_FAILURE;
830     }
831 
832     Document* doc = parent->GetExtantDoc();
833     if (!doc) {
834       return NS_ERROR_FAILURE;
835     }
836 
837     return nsContentUtils::DispatchTrustedEvent(
838         doc, static_cast<DOMEventTargetHelper*>(mAudioContext),
839         u"statechange"_ns, CanBubble::eNo, Cancelable::eNo);
840   }
841 
842  private:
843   RefPtr<AudioContext> mAudioContext;
844 };
845 
Dispatch(already_AddRefed<nsIRunnable> && aRunnable)846 void AudioContext::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
847   MOZ_ASSERT(NS_IsMainThread());
848   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
849   // It can happen that this runnable took a long time to reach the main thread,
850   // and the global is not valid anymore.
851   if (parentObject) {
852     parentObject->AbstractMainThreadFor(TaskCategory::Other)
853         ->Dispatch(std::move(aRunnable));
854   } else {
855     RefPtr<nsIRunnable> runnable(aRunnable);
856     runnable = nullptr;
857   }
858 }
859 
OnStateChanged(void * aPromise,AudioContextState aNewState)860 void AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) {
861   MOZ_ASSERT(NS_IsMainThread());
862 
863   if (mAudioContextState == AudioContextState::Closed) {
864     fprintf(stderr,
865             "Invalid transition: mAudioContextState: %d -> aNewState %d\n",
866             static_cast<int>(mAudioContextState), static_cast<int>(aNewState));
867     MOZ_ASSERT(false);
868   }
869 
870   if (aPromise) {
871     Promise* promise = reinterpret_cast<Promise*>(aPromise);
872     // It is possible for the promise to have been removed from
873     // mPromiseGripArray if the cycle collector has severed our connections. DO
874     // NOT dereference the promise pointer in that case since it may point to
875     // already freed memory.
876     if (mPromiseGripArray.Contains(promise)) {
877       promise->MaybeResolveWithUndefined();
878       DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
879       MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
880     }
881   }
882 
883   // Resolve all pending promises once the audio context has been allowed to
884   // start.
885   if (aNewState == AudioContextState::Running) {
886     for (const auto& p : mPendingResumePromises) {
887       p->MaybeResolveWithUndefined();
888     }
889     mPendingResumePromises.Clear();
890   }
891 
892   if (mAudioContextState != aNewState) {
893     RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
894     Dispatch(task.forget());
895   }
896 
897   mAudioContextState = aNewState;
898   Destination()->NotifyAudioContextStateChanged();
899   MaybeUpdatePageAwakeRequest();
900 }
901 
GetTopLevelBrowsingContext()902 BrowsingContext* AudioContext::GetTopLevelBrowsingContext() {
903   nsCOMPtr<nsPIDOMWindowInner> window = GetParentObject();
904   if (!window) {
905     return nullptr;
906   }
907   BrowsingContext* bc = window->GetBrowsingContext();
908   if (!bc || bc->IsDiscarded()) {
909     return nullptr;
910   }
911   return bc->Top();
912 }
913 
MaybeUpdatePageAwakeRequest()914 void AudioContext::MaybeUpdatePageAwakeRequest() {
915   // No need to keep page awake for offline context.
916   if (IsOffline()) {
917     return;
918   }
919 
920   if (mIsShutDown) {
921     return;
922   }
923 
924   if (IsRunning() && !mSetPageAwakeRequest) {
925     SetPageAwakeRequest(true);
926   } else if (!IsRunning() && mSetPageAwakeRequest) {
927     SetPageAwakeRequest(false);
928   }
929 }
930 
SetPageAwakeRequest(bool aShouldSet)931 void AudioContext::SetPageAwakeRequest(bool aShouldSet) {
932   mSetPageAwakeRequest = aShouldSet;
933   BrowsingContext* bc = GetTopLevelBrowsingContext();
934   if (!bc) {
935     return;
936   }
937   if (XRE_IsContentProcess()) {
938     ContentChild* contentChild = ContentChild::GetSingleton();
939     Unused << contentChild->SendAddOrRemovePageAwakeRequest(bc, aShouldSet);
940     return;
941   }
942   if (aShouldSet) {
943     bc->Canonical()->AddPageAwakeRequest();
944   } else {
945     bc->Canonical()->RemovePageAwakeRequest();
946   }
947 }
948 
MaybeClearPageAwakeRequest()949 void AudioContext::MaybeClearPageAwakeRequest() {
950   if (mSetPageAwakeRequest) {
951     SetPageAwakeRequest(false);
952   }
953 }
954 
GetAllTracks() const955 nsTArray<RefPtr<mozilla::MediaTrack>> AudioContext::GetAllTracks() const {
956   nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
957   for (AudioNode* node : mAllNodes) {
958     mozilla::MediaTrack* t = node->GetTrack();
959     if (t) {
960       tracks.AppendElement(t);
961     }
962     // Add the tracks of AudioParam.
963     const nsTArray<RefPtr<AudioParam>>& audioParams = node->GetAudioParams();
964     if (!audioParams.IsEmpty()) {
965       for (auto& param : audioParams) {
966         t = param->GetTrack();
967         if (t && !tracks.Contains(t)) {
968           tracks.AppendElement(t);
969         }
970       }
971     }
972   }
973   return tracks;
974 }
975 
Suspend(ErrorResult & aRv)976 already_AddRefed<Promise> AudioContext::Suspend(ErrorResult& aRv) {
977   TRACE("AudioContext::Suspend");
978   RefPtr<Promise> promise = CreatePromise(aRv);
979   if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
980     return promise.forget();
981   }
982   if (mIsOffline) {
983     // XXXbz This is not reachable, since we don't implement this
984     // method on OfflineAudioContext at all!
985     promise->MaybeRejectWithNotSupportedError(
986         "Can't suspend OfflineAudioContext yet");
987     return promise.forget();
988   }
989 
990   if (mCloseCalled) {
991     promise->MaybeRejectWithInvalidStateError(
992         "Can't suspend if the control thread state is \"closed\"");
993     return promise.forget();
994   }
995 
996   mSuspendedByContent = true;
997   mPromiseGripArray.AppendElement(promise);
998   SuspendInternal(promise, AudioContextOperationFlags::SendStateChange);
999   return promise.forget();
1000 }
1001 
SuspendFromChrome()1002 void AudioContext::SuspendFromChrome() {
1003   if (mIsOffline || mIsShutDown) {
1004     return;
1005   }
1006   SuspendInternal(nullptr, Preferences::GetBool("dom.audiocontext.testing")
1007                                ? AudioContextOperationFlags::SendStateChange
1008                                : AudioContextOperationFlags::None);
1009 }
1010 
SuspendInternal(void * aPromise,AudioContextOperationFlags aFlags)1011 void AudioContext::SuspendInternal(void* aPromise,
1012                                    AudioContextOperationFlags aFlags) {
1013   MOZ_ASSERT(NS_IsMainThread());
1014   MOZ_ASSERT(!mIsOffline);
1015   Destination()->Suspend();
1016 
1017   nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
1018   // If mSuspendCalled is true then we already suspended all our tracks,
1019   // so don't suspend them again (since suspend(); suspend(); resume(); should
1020   // cancel both suspends). But we still need to do ApplyAudioContextOperation
1021   // to ensure our new promise is resolved.
1022   if (!mSuspendCalled) {
1023     tracks = GetAllTracks();
1024   }
1025   auto promise = Graph()->ApplyAudioContextOperation(
1026       DestinationTrack(), std::move(tracks), AudioContextOperation::Suspend);
1027   if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
1028     promise->Then(
1029         GetMainThread(), "AudioContext::OnStateChanged",
1030         [self = RefPtr<AudioContext>(this),
1031          aPromise](AudioContextState aNewState) {
1032           self->OnStateChanged(aPromise, aNewState);
1033         },
1034         [] { MOZ_CRASH("Unexpected rejection"); });
1035   }
1036 
1037   mSuspendCalled = true;
1038 }
1039 
ResumeFromChrome()1040 void AudioContext::ResumeFromChrome() {
1041   if (mIsOffline || mIsShutDown) {
1042     return;
1043   }
1044   ResumeInternal(Preferences::GetBool("dom.audiocontext.testing")
1045                      ? AudioContextOperationFlags::SendStateChange
1046                      : AudioContextOperationFlags::None);
1047 }
1048 
Resume(ErrorResult & aRv)1049 already_AddRefed<Promise> AudioContext::Resume(ErrorResult& aRv) {
1050   TRACE("AudioContext::Resume");
1051   RefPtr<Promise> promise = CreatePromise(aRv);
1052   if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
1053     return promise.forget();
1054   }
1055 
1056   if (mIsOffline) {
1057     promise->MaybeRejectWithNotSupportedError(
1058         "Can't resume OfflineAudioContext");
1059     return promise.forget();
1060   }
1061 
1062   if (mCloseCalled) {
1063     promise->MaybeRejectWithInvalidStateError(
1064         "Can't resume if the control thread state is \"closed\"");
1065     return promise.forget();
1066   }
1067 
1068   mSuspendedByContent = false;
1069   mPendingResumePromises.AppendElement(promise);
1070 
1071   const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this);
1072   AUTOPLAY_LOG("Trying to resume AudioContext %p, IsAllowedToPlay=%d", this,
1073                isAllowedToPlay);
1074   if (isAllowedToPlay) {
1075     ResumeInternal(AudioContextOperationFlags::SendStateChange);
1076   } else {
1077     ReportBlocked();
1078   }
1079 
1080   MaybeUpdateAutoplayTelemetry();
1081 
1082   return promise.forget();
1083 }
1084 
ResumeInternal(AudioContextOperationFlags aFlags)1085 void AudioContext::ResumeInternal(AudioContextOperationFlags aFlags) {
1086   MOZ_ASSERT(!mIsOffline);
1087   AUTOPLAY_LOG("Allow to resume AudioContext %p", this);
1088   mWasAllowedToStart = true;
1089 
1090   Destination()->Resume();
1091 
1092   nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
1093   // If mSuspendCalled is false then we already resumed all our tracks,
1094   // so don't resume them again (since suspend(); resume(); resume(); should
1095   // be OK). But we still need to do ApplyAudioContextOperation
1096   // to ensure our new promise is resolved.
1097   if (mSuspendCalled) {
1098     tracks = GetAllTracks();
1099   }
1100   auto promise = Graph()->ApplyAudioContextOperation(
1101       DestinationTrack(), std::move(tracks), AudioContextOperation::Resume);
1102   if (aFlags & AudioContextOperationFlags::SendStateChange) {
1103     promise->Then(
1104         GetMainThread(), "AudioContext::OnStateChanged",
1105         [self = RefPtr<AudioContext>(this)](AudioContextState aNewState) {
1106           self->OnStateChanged(nullptr, aNewState);
1107         },
1108         [] {});  // Promise may be rejected after graph shutdown.
1109   }
1110   mSuspendCalled = false;
1111 }
1112 
UpdateAutoplayAssumptionStatus()1113 void AudioContext::UpdateAutoplayAssumptionStatus() {
1114   if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled(
1115           *this)) {
1116     mWasEverAllowedToStart |= true;
1117     mWouldBeAllowedToStart = true;
1118   } else {
1119     mWasEverBlockedToStart |= true;
1120     mWouldBeAllowedToStart = false;
1121   }
1122 }
1123 
MaybeUpdateAutoplayTelemetry()1124 void AudioContext::MaybeUpdateAutoplayTelemetry() {
1125   // Exclude offline AudioContext because it's always allowed to start.
1126   if (mIsOffline) {
1127     return;
1128   }
1129 
1130   if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled(
1131           *this) &&
1132       !mWouldBeAllowedToStart) {
1133     AccumulateCategorical(
1134         mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::AllowedAfterBlocked);
1135   }
1136   UpdateAutoplayAssumptionStatus();
1137 }
1138 
MaybeUpdateAutoplayTelemetryWhenShutdown()1139 void AudioContext::MaybeUpdateAutoplayTelemetryWhenShutdown() {
1140   // Exclude offline AudioContext because it's always allowed to start.
1141   if (mIsOffline) {
1142     return;
1143   }
1144 
1145   if (mWasEverAllowedToStart && !mWasEverBlockedToStart) {
1146     AccumulateCategorical(
1147         mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverBlocked);
1148   } else if (!mWasEverAllowedToStart && mWasEverBlockedToStart) {
1149     AccumulateCategorical(
1150         mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverAllowed);
1151   }
1152 }
1153 
ReportBlocked()1154 void AudioContext::ReportBlocked() {
1155   ReportToConsole(nsIScriptError::warningFlag,
1156                   "BlockAutoplayWebAudioStartError");
1157   mWasAllowedToStart = false;
1158 
1159   if (!StaticPrefs::media_autoplay_block_event_enabled()) {
1160     return;
1161   }
1162 
1163   RefPtr<AudioContext> self = this;
1164   RefPtr<nsIRunnable> r =
1165       NS_NewRunnableFunction("AudioContext::AutoplayBlocked", [self]() {
1166         nsPIDOMWindowInner* parent = self->GetParentObject();
1167         if (!parent) {
1168           return;
1169         }
1170 
1171         Document* doc = parent->GetExtantDoc();
1172         if (!doc) {
1173           return;
1174         }
1175 
1176         AUTOPLAY_LOG("Dispatch `blocked` event for AudioContext %p",
1177                      self.get());
1178         nsContentUtils::DispatchTrustedEvent(
1179             doc, static_cast<DOMEventTargetHelper*>(self), u"blocked"_ns,
1180             CanBubble::eNo, Cancelable::eNo);
1181       });
1182   Dispatch(r.forget());
1183 }
1184 
Close(ErrorResult & aRv)1185 already_AddRefed<Promise> AudioContext::Close(ErrorResult& aRv) {
1186   TRACE("AudioContext::Close");
1187   RefPtr<Promise> promise = CreatePromise(aRv);
1188   if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
1189     return promise.forget();
1190   }
1191 
1192   if (mIsOffline) {
1193     // XXXbz This is not reachable, since we don't implement this
1194     // method on OfflineAudioContext at all!
1195     promise->MaybeRejectWithNotSupportedError(
1196         "Can't close OfflineAudioContext yet");
1197     return promise.forget();
1198   }
1199 
1200   if (mCloseCalled) {
1201     promise->MaybeRejectWithInvalidStateError(
1202         "Can't close an AudioContext twice");
1203     return promise.forget();
1204   }
1205 
1206   mPromiseGripArray.AppendElement(promise);
1207 
1208   CloseInternal(promise, AudioContextOperationFlags::SendStateChange);
1209 
1210   return promise.forget();
1211 }
1212 
OfflineClose()1213 void AudioContext::OfflineClose() {
1214   CloseInternal(nullptr, AudioContextOperationFlags::None);
1215 }
1216 
CloseInternal(void * aPromise,AudioContextOperationFlags aFlags)1217 void AudioContext::CloseInternal(void* aPromise,
1218                                  AudioContextOperationFlags aFlags) {
1219   // This can be called when freeing a document, and the tracks are dead at
1220   // this point, so we need extra null-checks.
1221   AudioNodeTrack* ds = DestinationTrack();
1222   if (ds && !mIsOffline) {
1223     Destination()->Close();
1224 
1225     nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
1226     // If mSuspendCalled or mCloseCalled are true then we already suspended
1227     // all our tracks, so don't suspend them again. But we still need to do
1228     // ApplyAudioContextOperation to ensure our new promise is resolved.
1229     if (!mSuspendCalled && !mCloseCalled) {
1230       tracks = GetAllTracks();
1231     }
1232     auto promise = Graph()->ApplyAudioContextOperation(
1233         ds, std::move(tracks), AudioContextOperation::Close);
1234     if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
1235       promise->Then(
1236           GetMainThread(), "AudioContext::OnStateChanged",
1237           [self = RefPtr<AudioContext>(this),
1238            aPromise](AudioContextState aNewState) {
1239             self->OnStateChanged(aPromise, aNewState);
1240           },
1241           [] {});  // Promise may be rejected after graph shutdown.
1242     }
1243   }
1244   mCloseCalled = true;
1245   // Release references to active nodes.
1246   // Active AudioNodes don't unregister in destructors, at which point the
1247   // Node is already unregistered.
1248   mActiveNodes.Clear();
1249 }
1250 
RegisterNode(AudioNode * aNode)1251 void AudioContext::RegisterNode(AudioNode* aNode) {
1252   MOZ_ASSERT(!mAllNodes.Contains(aNode));
1253   mAllNodes.Insert(aNode);
1254 }
1255 
UnregisterNode(AudioNode * aNode)1256 void AudioContext::UnregisterNode(AudioNode* aNode) {
1257   MOZ_ASSERT(mAllNodes.Contains(aNode));
1258   mAllNodes.Remove(aNode);
1259 }
1260 
StartRendering(ErrorResult & aRv)1261 already_AddRefed<Promise> AudioContext::StartRendering(ErrorResult& aRv) {
1262   MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext");
1263   RefPtr<Promise> promise = CreatePromise(aRv);
1264   if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
1265     return promise.forget();
1266   }
1267   if (mIsStarted) {
1268     aRv.ThrowInvalidStateError("Rendering already started");
1269     return nullptr;
1270   }
1271 
1272   mIsStarted = true;
1273   mDestination->StartRendering(promise);
1274 
1275   OnStateChanged(nullptr, AudioContextState::Running);
1276 
1277   return promise.forget();
1278 }
1279 
Length()1280 unsigned long AudioContext::Length() {
1281   MOZ_ASSERT(mIsOffline);
1282   return mDestination->Length();
1283 }
1284 
Mute() const1285 void AudioContext::Mute() const {
1286   MOZ_ASSERT(!mIsOffline);
1287   if (mDestination) {
1288     mDestination->Mute();
1289   }
1290 }
1291 
Unmute() const1292 void AudioContext::Unmute() const {
1293   MOZ_ASSERT(!mIsOffline);
1294   if (mDestination) {
1295     mDestination->Unmute();
1296   }
1297 }
1298 
SetParamMapForWorkletName(const nsAString & aName,AudioParamDescriptorMap * aParamMap)1299 void AudioContext::SetParamMapForWorkletName(
1300     const nsAString& aName, AudioParamDescriptorMap* aParamMap) {
1301   MOZ_ASSERT(!mWorkletParamDescriptors.Contains(aName));
1302   Unused << mWorkletParamDescriptors.InsertOrUpdate(
1303       aName, std::move(*aParamMap), fallible);
1304 }
1305 
SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const1306 size_t AudioContext::SizeOfIncludingThis(
1307     mozilla::MallocSizeOf aMallocSizeOf) const {
1308   // AudioNodes are tracked separately because we do not want the AudioContext
1309   // to track all of the AudioNodes it creates, so we wouldn't be able to
1310   // traverse them from here.
1311 
1312   size_t amount = aMallocSizeOf(this);
1313   if (mListener) {
1314     amount += mListener->SizeOfIncludingThis(aMallocSizeOf);
1315   }
1316   amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf);
1317   for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
1318     amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf);
1319   }
1320   amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf);
1321   return amount;
1322 }
1323 
1324 NS_IMETHODIMP
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1325 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
1326                              nsISupports* aData, bool aAnonymize) {
1327   const nsLiteralCString nodeDescription(
1328       "Memory used by AudioNode DOM objects (Web Audio).");
1329   for (AudioNode* node : mAllNodes) {
1330     int64_t amount = node->SizeOfIncludingThis(MallocSizeOf);
1331     nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes",
1332                                 node->NodeType());
1333     aHandleReport->Callback(""_ns, domNodePath, KIND_HEAP, UNITS_BYTES, amount,
1334                             nodeDescription, aData);
1335   }
1336 
1337   int64_t amount = SizeOfIncludingThis(MallocSizeOf);
1338   MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
1339                      amount,
1340                      "Memory used by AudioContext objects (Web Audio).");
1341 
1342   return NS_OK;
1343 }
1344 
GetBasicWaveFormCache()1345 BasicWaveFormCache* AudioContext::GetBasicWaveFormCache() {
1346   MOZ_ASSERT(NS_IsMainThread());
1347   if (!mBasicWaveFormCache) {
1348     mBasicWaveFormCache = new BasicWaveFormCache(SampleRate());
1349   }
1350   return mBasicWaveFormCache;
1351 }
1352 
ReportToConsole(uint32_t aErrorFlags,const char * aMsg) const1353 void AudioContext::ReportToConsole(uint32_t aErrorFlags,
1354                                    const char* aMsg) const {
1355   MOZ_ASSERT(aMsg);
1356   Document* doc =
1357       GetParentObject() ? GetParentObject()->GetExtantDoc() : nullptr;
1358   nsContentUtils::ReportToConsole(aErrorFlags, "Media"_ns, doc,
1359                                   nsContentUtils::eDOM_PROPERTIES, aMsg);
1360 }
1361 
BasicWaveFormCache(uint32_t aSampleRate)1362 BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate)
1363     : mSampleRate(aSampleRate) {
1364   MOZ_ASSERT(NS_IsMainThread());
1365 }
1366 BasicWaveFormCache::~BasicWaveFormCache() = default;
1367 
GetBasicWaveForm(OscillatorType aType)1368 WebCore::PeriodicWave* BasicWaveFormCache::GetBasicWaveForm(
1369     OscillatorType aType) {
1370   MOZ_ASSERT(!NS_IsMainThread());
1371   if (aType == OscillatorType::Sawtooth) {
1372     if (!mSawtooth) {
1373       mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate);
1374     }
1375     return mSawtooth;
1376   }
1377   if (aType == OscillatorType::Square) {
1378     if (!mSquare) {
1379       mSquare = WebCore::PeriodicWave::createSquare(mSampleRate);
1380     }
1381     return mSquare;
1382   }
1383   if (aType == OscillatorType::Triangle) {
1384     if (!mTriangle) {
1385       mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate);
1386     }
1387     return mTriangle;
1388   }
1389   MOZ_ASSERT(false, "Not reached");
1390   return nullptr;
1391 }
1392 
1393 }  // namespace mozilla::dom
1394