1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "AudioContext.h"
8
9 #include "blink/PeriodicWave.h"
10
11 #include "mozilla/ErrorResult.h"
12 #include "mozilla/NotNull.h"
13 #include "mozilla/OwningNonNull.h"
14 #include "mozilla/RefPtr.h"
15 #include "mozilla/Preferences.h"
16 #include "mozilla/StaticPrefs_media.h"
17
18 #include "mozilla/dom/AnalyserNode.h"
19 #include "mozilla/dom/AnalyserNodeBinding.h"
20 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
21 #include "mozilla/dom/AudioContextBinding.h"
22 #include "mozilla/dom/BaseAudioContextBinding.h"
23 #include "mozilla/dom/BiquadFilterNodeBinding.h"
24 #include "mozilla/dom/ChannelMergerNodeBinding.h"
25 #include "mozilla/dom/ChannelSplitterNodeBinding.h"
26 #include "mozilla/dom/ConvolverNodeBinding.h"
27 #include "mozilla/dom/DelayNodeBinding.h"
28 #include "mozilla/dom/DynamicsCompressorNodeBinding.h"
29 #include "mozilla/dom/GainNodeBinding.h"
30 #include "mozilla/dom/IIRFilterNodeBinding.h"
31 #include "mozilla/dom/HTMLMediaElement.h"
32 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
33 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
34 #include "mozilla/dom/MediaStreamTrackAudioSourceNodeBinding.h"
35 #include "mozilla/dom/OfflineAudioContextBinding.h"
36 #include "mozilla/dom/OscillatorNodeBinding.h"
37 #include "mozilla/dom/PannerNodeBinding.h"
38 #include "mozilla/dom/PeriodicWaveBinding.h"
39 #include "mozilla/dom/Performance.h"
40 #include "mozilla/dom/Promise.h"
41 #include "mozilla/dom/StereoPannerNodeBinding.h"
42 #include "mozilla/dom/WaveShaperNodeBinding.h"
43 #include "mozilla/dom/Worklet.h"
44
45 #include "AudioBuffer.h"
46 #include "AudioBufferSourceNode.h"
47 #include "AudioChannelService.h"
48 #include "AudioDestinationNode.h"
49 #include "AudioListener.h"
50 #include "AudioNodeTrack.h"
51 #include "AudioStream.h"
52 #include "AudioWorkletImpl.h"
53 #include "AutoplayPolicy.h"
54 #include "BiquadFilterNode.h"
55 #include "ChannelMergerNode.h"
56 #include "ChannelSplitterNode.h"
57 #include "ConstantSourceNode.h"
58 #include "ConvolverNode.h"
59 #include "DelayNode.h"
60 #include "DynamicsCompressorNode.h"
61 #include "GainNode.h"
62 #include "IIRFilterNode.h"
63 #include "js/ArrayBuffer.h" // JS::StealArrayBufferContents
64 #include "MediaElementAudioSourceNode.h"
65 #include "MediaStreamAudioDestinationNode.h"
66 #include "MediaStreamAudioSourceNode.h"
67 #include "MediaTrackGraph.h"
68 #include "MediaStreamTrackAudioSourceNode.h"
69 #include "nsContentUtils.h"
70 #include "nsIScriptError.h"
71 #include "nsNetCID.h"
72 #include "nsNetUtil.h"
73 #include "nsPIDOMWindow.h"
74 #include "nsPrintfCString.h"
75 #include "nsRFPService.h"
76 #include "OscillatorNode.h"
77 #include "PannerNode.h"
78 #include "PeriodicWave.h"
79 #include "ScriptProcessorNode.h"
80 #include "StereoPannerNode.h"
81 #include "WaveShaperNode.h"
82
83 extern mozilla::LazyLogModule gAutoplayPermissionLog;
84
85 #define AUTOPLAY_LOG(msg, ...) \
86 MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
87
88 using std::move;
89
90 namespace mozilla::dom {
91
92 // 0 is a special value that MediaTracks use to denote they are not part of a
93 // AudioContext.
94 static dom::AudioContext::AudioContextId gAudioContextId = 1;
95
96 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
97
98 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
99 // The destination node and AudioContext form a cycle and so the destination
100 // track will be destroyed. mWorklet must be shut down before the track
101 // is destroyed. Do this before clearing mWorklet.
102 tmp->ShutdownWorklet();
103 NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
104 NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
105 NS_IMPL_CYCLE_COLLECTION_UNLINK(mWorklet)
106 NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
107 NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises)
108 if (tmp->mSuspendCalled || !tmp->mIsStarted) {
109 NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
110 }
111 // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
112 // explicitly. mAllNodes is an array of weak pointers, ignore it here.
113 // mBasicWaveFormCache cannot participate in cycles, ignore it here.
114
115 // Remove weak reference on the global window as the context is not usable
116 // without mDestination.
117 tmp->DisconnectFromWindow();
118 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
119
120 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
121 DOMEventTargetHelper)
122 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
123 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
124 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mWorklet)
125 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
126 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises)
127 if (tmp->mSuspendCalled || !tmp->mIsStarted) {
128 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
129 }
130 // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
131 // explicitly. mAllNodes is an array of weak pointers, ignore it here.
132 // mBasicWaveFormCache cannot participate in cycles, ignore it here.
133 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
134
NS_IMPL_ADDREF_INHERITED(AudioContext,DOMEventTargetHelper)135 NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper)
136 NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper)
137
138 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext)
139 NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter)
140 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
141
142 static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) {
143 if (aIsOffline || aSampleRate != 0.0) {
144 return aSampleRate;
145 } else {
146 return static_cast<float>(CubebUtils::PreferredSampleRate());
147 }
148 }
149
AudioContext(nsPIDOMWindowInner * aWindow,bool aIsOffline,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate)150 AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, bool aIsOffline,
151 uint32_t aNumberOfChannels, uint32_t aLength,
152 float aSampleRate)
153 : DOMEventTargetHelper(aWindow),
154 mId(gAudioContextId++),
155 mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)),
156 mAudioContextState(AudioContextState::Suspended),
157 mNumberOfChannels(aNumberOfChannels),
158 mIsOffline(aIsOffline),
159 mIsStarted(!aIsOffline),
160 mIsShutDown(false),
161 mCloseCalled(false),
162 // Realtime contexts start with suspended tracks until an
163 // AudioCallbackDriver is running.
164 mSuspendCalled(!aIsOffline),
165 mIsDisconnecting(false),
166 mWasAllowedToStart(true),
167 mSuspendedByContent(false),
168 mWasEverAllowedToStart(false),
169 mWasEverBlockedToStart(false),
170 mWouldBeAllowedToStart(true) {
171 bool mute = aWindow->AddAudioContext(this);
172
173 // Note: AudioDestinationNode needs an AudioContext that must already be
174 // bound to the window.
175 const bool allowedToStart = AutoplayPolicy::IsAllowedToPlay(*this);
176 mDestination =
177 new AudioDestinationNode(this, aIsOffline, aNumberOfChannels, aLength);
178 mDestination->Init();
179 // If an AudioContext is not allowed to start, we would postpone its state
180 // transition from `suspended` to `running` until sites explicitly call
181 // AudioContext.resume() or AudioScheduledSourceNode.start().
182 if (!allowedToStart) {
183 MOZ_ASSERT(!mIsOffline);
184 AUTOPLAY_LOG("AudioContext %p is not allowed to start", this);
185 ReportBlocked();
186 } else if (!mIsOffline) {
187 ResumeInternal(AudioContextOperationFlags::SendStateChange);
188 }
189
190 // The context can't be muted until it has a destination.
191 if (mute) {
192 Mute();
193 }
194
195 UpdateAutoplayAssumptionStatus();
196
197 FFTBlock::MainThreadInit();
198 }
199
StartBlockedAudioContextIfAllowed()200 void AudioContext::StartBlockedAudioContextIfAllowed() {
201 MOZ_ASSERT(NS_IsMainThread());
202 MaybeUpdateAutoplayTelemetry();
203 // Only try to start AudioContext when AudioContext was not allowed to start.
204 if (mWasAllowedToStart) {
205 return;
206 }
207
208 const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this);
209 AUTOPLAY_LOG("Trying to start AudioContext %p, IsAllowedToPlay=%d", this,
210 isAllowedToPlay);
211
212 // Only start the AudioContext if this resume() call was initiated by content,
213 // not if it was a result of the AudioContext starting after having been
214 // blocked because of the auto-play policy.
215 if (isAllowedToPlay && !mSuspendedByContent) {
216 ResumeInternal(AudioContextOperationFlags::SendStateChange);
217 } else {
218 ReportBlocked();
219 }
220 }
221
DisconnectFromWindow()222 void AudioContext::DisconnectFromWindow() {
223 nsPIDOMWindowInner* window = GetOwner();
224 if (window) {
225 window->RemoveAudioContext(this);
226 }
227 }
228
~AudioContext()229 AudioContext::~AudioContext() {
230 DisconnectFromWindow();
231 UnregisterWeakMemoryReporter(this);
232 }
233
WrapObject(JSContext * aCx,JS::Handle<JSObject * > aGivenProto)234 JSObject* AudioContext::WrapObject(JSContext* aCx,
235 JS::Handle<JSObject*> aGivenProto) {
236 if (mIsOffline) {
237 return OfflineAudioContext_Binding::Wrap(aCx, this, aGivenProto);
238 } else {
239 return AudioContext_Binding::Wrap(aCx, this, aGivenProto);
240 }
241 }
242
CheckFullyActive(nsPIDOMWindowInner * aWindow,ErrorResult & aRv)243 static bool CheckFullyActive(nsPIDOMWindowInner* aWindow, ErrorResult& aRv) {
244 if (!aWindow->IsFullyActive()) {
245 aRv.ThrowInvalidStateError("The document is not fully active.");
246 return false;
247 }
248 return true;
249 }
250
251 /* static */
Constructor(const GlobalObject & aGlobal,const AudioContextOptions & aOptions,ErrorResult & aRv)252 already_AddRefed<AudioContext> AudioContext::Constructor(
253 const GlobalObject& aGlobal, const AudioContextOptions& aOptions,
254 ErrorResult& aRv) {
255 nsCOMPtr<nsPIDOMWindowInner> window =
256 do_QueryInterface(aGlobal.GetAsSupports());
257 if (!window) {
258 aRv.Throw(NS_ERROR_FAILURE);
259 return nullptr;
260 }
261 /**
262 * If the current settings object’s responsible document is NOT fully
263 * active, throw an InvalidStateError and abort these steps.
264 */
265 if (!CheckFullyActive(window, aRv)) {
266 return nullptr;
267 }
268
269 if (aOptions.mSampleRate.WasPassed() &&
270 (aOptions.mSampleRate.Value() < WebAudioUtils::MinSampleRate ||
271 aOptions.mSampleRate.Value() > WebAudioUtils::MaxSampleRate)) {
272 aRv.ThrowNotSupportedError(nsPrintfCString(
273 "Sample rate %g is not in the range [%u, %u]",
274 aOptions.mSampleRate.Value(), WebAudioUtils::MinSampleRate,
275 WebAudioUtils::MaxSampleRate));
276 return nullptr;
277 }
278 float sampleRate = aOptions.mSampleRate.WasPassed()
279 ? aOptions.mSampleRate.Value()
280 : MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE;
281
282 RefPtr<AudioContext> object =
283 new AudioContext(window, false, 2, 0, sampleRate);
284
285 RegisterWeakMemoryReporter(object);
286
287 return object.forget();
288 }
289
290 /* static */
Constructor(const GlobalObject & aGlobal,const OfflineAudioContextOptions & aOptions,ErrorResult & aRv)291 already_AddRefed<AudioContext> AudioContext::Constructor(
292 const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
293 ErrorResult& aRv) {
294 return Constructor(aGlobal, aOptions.mNumberOfChannels, aOptions.mLength,
295 aOptions.mSampleRate, aRv);
296 }
297
298 /* static */
Constructor(const GlobalObject & aGlobal,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)299 already_AddRefed<AudioContext> AudioContext::Constructor(
300 const GlobalObject& aGlobal, uint32_t aNumberOfChannels, uint32_t aLength,
301 float aSampleRate, ErrorResult& aRv) {
302 nsCOMPtr<nsPIDOMWindowInner> window =
303 do_QueryInterface(aGlobal.GetAsSupports());
304 if (!window) {
305 aRv.Throw(NS_ERROR_FAILURE);
306 return nullptr;
307 }
308 /**
309 * If the current settings object’s responsible document is NOT fully
310 * active, throw an InvalidStateError and abort these steps.
311 */
312 if (!CheckFullyActive(window, aRv)) {
313 return nullptr;
314 }
315
316 if (aNumberOfChannels == 0 ||
317 aNumberOfChannels > WebAudioUtils::MaxChannelCount) {
318 aRv.ThrowNotSupportedError(
319 nsPrintfCString("%u is not a valid channel count", aNumberOfChannels));
320 return nullptr;
321 }
322
323 if (aLength == 0) {
324 aRv.ThrowNotSupportedError("Length must be nonzero");
325 return nullptr;
326 }
327
328 if (aSampleRate < WebAudioUtils::MinSampleRate ||
329 aSampleRate > WebAudioUtils::MaxSampleRate) {
330 // The DOM binding protects us against infinity and NaN
331 aRv.ThrowNotSupportedError(nsPrintfCString(
332 "Sample rate %g is not in the range [%u, %u]", aSampleRate,
333 WebAudioUtils::MinSampleRate, WebAudioUtils::MaxSampleRate));
334 return nullptr;
335 }
336
337 RefPtr<AudioContext> object =
338 new AudioContext(window, true, aNumberOfChannels, aLength, aSampleRate);
339
340 RegisterWeakMemoryReporter(object);
341
342 return object.forget();
343 }
344
CreateBufferSource()345 already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource() {
346 return AudioBufferSourceNode::Create(nullptr, *this,
347 AudioBufferSourceOptions());
348 }
349
CreateConstantSource()350 already_AddRefed<ConstantSourceNode> AudioContext::CreateConstantSource() {
351 RefPtr<ConstantSourceNode> constantSourceNode = new ConstantSourceNode(this);
352 return constantSourceNode.forget();
353 }
354
CreateBuffer(uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)355 already_AddRefed<AudioBuffer> AudioContext::CreateBuffer(
356 uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate,
357 ErrorResult& aRv) {
358 if (!aNumberOfChannels) {
359 aRv.ThrowNotSupportedError("Number of channels must be nonzero");
360 return nullptr;
361 }
362
363 return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength,
364 aSampleRate, aRv);
365 }
366
367 namespace {
368
IsValidBufferSize(uint32_t aBufferSize)369 bool IsValidBufferSize(uint32_t aBufferSize) {
370 switch (aBufferSize) {
371 case 0: // let the implementation choose the buffer size
372 case 256:
373 case 512:
374 case 1024:
375 case 2048:
376 case 4096:
377 case 8192:
378 case 16384:
379 return true;
380 default:
381 return false;
382 }
383 }
384
385 } // namespace
386
387 already_AddRefed<MediaStreamAudioDestinationNode>
CreateMediaStreamDestination(ErrorResult & aRv)388 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) {
389 return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(),
390 aRv);
391 }
392
CreateScriptProcessor(uint32_t aBufferSize,uint32_t aNumberOfInputChannels,uint32_t aNumberOfOutputChannels,ErrorResult & aRv)393 already_AddRefed<ScriptProcessorNode> AudioContext::CreateScriptProcessor(
394 uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
395 uint32_t aNumberOfOutputChannels, ErrorResult& aRv) {
396 if (aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) {
397 aRv.ThrowIndexSizeError(
398 "At least one of numberOfInputChannels and numberOfOutputChannels must "
399 "be nonzero");
400 return nullptr;
401 }
402
403 if (aNumberOfInputChannels > WebAudioUtils::MaxChannelCount) {
404 aRv.ThrowIndexSizeError(nsPrintfCString(
405 "%u is not a valid number of input channels", aNumberOfInputChannels));
406 return nullptr;
407 }
408
409 if (aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount) {
410 aRv.ThrowIndexSizeError(
411 nsPrintfCString("%u is not a valid number of output channels",
412 aNumberOfOutputChannels));
413 return nullptr;
414 }
415
416 if (!IsValidBufferSize(aBufferSize)) {
417 aRv.ThrowIndexSizeError(
418 nsPrintfCString("%u is not a valid bufferSize", aBufferSize));
419 return nullptr;
420 }
421
422 RefPtr<ScriptProcessorNode> scriptProcessor = new ScriptProcessorNode(
423 this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels);
424 return scriptProcessor.forget();
425 }
426
CreateAnalyser(ErrorResult & aRv)427 already_AddRefed<AnalyserNode> AudioContext::CreateAnalyser(ErrorResult& aRv) {
428 return AnalyserNode::Create(*this, AnalyserOptions(), aRv);
429 }
430
CreateStereoPanner(ErrorResult & aRv)431 already_AddRefed<StereoPannerNode> AudioContext::CreateStereoPanner(
432 ErrorResult& aRv) {
433 return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv);
434 }
435
436 already_AddRefed<MediaElementAudioSourceNode>
CreateMediaElementSource(HTMLMediaElement & aMediaElement,ErrorResult & aRv)437 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
438 ErrorResult& aRv) {
439 MediaElementAudioSourceOptions options;
440 options.mMediaElement = aMediaElement;
441
442 return MediaElementAudioSourceNode::Create(*this, options, aRv);
443 }
444
445 already_AddRefed<MediaStreamAudioSourceNode>
CreateMediaStreamSource(DOMMediaStream & aMediaStream,ErrorResult & aRv)446 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
447 ErrorResult& aRv) {
448 MediaStreamAudioSourceOptions options;
449 options.mMediaStream = aMediaStream;
450
451 return MediaStreamAudioSourceNode::Create(*this, options, aRv);
452 }
453
454 already_AddRefed<MediaStreamTrackAudioSourceNode>
CreateMediaStreamTrackSource(MediaStreamTrack & aMediaStreamTrack,ErrorResult & aRv)455 AudioContext::CreateMediaStreamTrackSource(MediaStreamTrack& aMediaStreamTrack,
456 ErrorResult& aRv) {
457 MediaStreamTrackAudioSourceOptions options;
458 options.mMediaStreamTrack = aMediaStreamTrack;
459
460 return MediaStreamTrackAudioSourceNode::Create(*this, options, aRv);
461 }
462
CreateGain(ErrorResult & aRv)463 already_AddRefed<GainNode> AudioContext::CreateGain(ErrorResult& aRv) {
464 return GainNode::Create(*this, GainOptions(), aRv);
465 }
466
CreateWaveShaper(ErrorResult & aRv)467 already_AddRefed<WaveShaperNode> AudioContext::CreateWaveShaper(
468 ErrorResult& aRv) {
469 return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv);
470 }
471
CreateDelay(double aMaxDelayTime,ErrorResult & aRv)472 already_AddRefed<DelayNode> AudioContext::CreateDelay(double aMaxDelayTime,
473 ErrorResult& aRv) {
474 DelayOptions options;
475 options.mMaxDelayTime = aMaxDelayTime;
476 return DelayNode::Create(*this, options, aRv);
477 }
478
CreatePanner(ErrorResult & aRv)479 already_AddRefed<PannerNode> AudioContext::CreatePanner(ErrorResult& aRv) {
480 return PannerNode::Create(*this, PannerOptions(), aRv);
481 }
482
CreateConvolver(ErrorResult & aRv)483 already_AddRefed<ConvolverNode> AudioContext::CreateConvolver(
484 ErrorResult& aRv) {
485 return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv);
486 }
487
CreateChannelSplitter(uint32_t aNumberOfOutputs,ErrorResult & aRv)488 already_AddRefed<ChannelSplitterNode> AudioContext::CreateChannelSplitter(
489 uint32_t aNumberOfOutputs, ErrorResult& aRv) {
490 ChannelSplitterOptions options;
491 options.mNumberOfOutputs = aNumberOfOutputs;
492 return ChannelSplitterNode::Create(*this, options, aRv);
493 }
494
CreateChannelMerger(uint32_t aNumberOfInputs,ErrorResult & aRv)495 already_AddRefed<ChannelMergerNode> AudioContext::CreateChannelMerger(
496 uint32_t aNumberOfInputs, ErrorResult& aRv) {
497 ChannelMergerOptions options;
498 options.mNumberOfInputs = aNumberOfInputs;
499 return ChannelMergerNode::Create(*this, options, aRv);
500 }
501
CreateDynamicsCompressor(ErrorResult & aRv)502 already_AddRefed<DynamicsCompressorNode> AudioContext::CreateDynamicsCompressor(
503 ErrorResult& aRv) {
504 return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(),
505 aRv);
506 }
507
CreateBiquadFilter(ErrorResult & aRv)508 already_AddRefed<BiquadFilterNode> AudioContext::CreateBiquadFilter(
509 ErrorResult& aRv) {
510 return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv);
511 }
512
CreateIIRFilter(const Sequence<double> & aFeedforward,const Sequence<double> & aFeedback,mozilla::ErrorResult & aRv)513 already_AddRefed<IIRFilterNode> AudioContext::CreateIIRFilter(
514 const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
515 mozilla::ErrorResult& aRv) {
516 IIRFilterOptions options;
517 options.mFeedforward = aFeedforward;
518 options.mFeedback = aFeedback;
519 return IIRFilterNode::Create(*this, options, aRv);
520 }
521
CreateOscillator(ErrorResult & aRv)522 already_AddRefed<OscillatorNode> AudioContext::CreateOscillator(
523 ErrorResult& aRv) {
524 return OscillatorNode::Create(*this, OscillatorOptions(), aRv);
525 }
526
CreatePeriodicWave(const Float32Array & aRealData,const Float32Array & aImagData,const PeriodicWaveConstraints & aConstraints,ErrorResult & aRv)527 already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(
528 const Float32Array& aRealData, const Float32Array& aImagData,
529 const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) {
530 aRealData.ComputeState();
531 aImagData.ComputeState();
532
533 RefPtr<PeriodicWave> periodicWave = new PeriodicWave(
534 this, aRealData.Data(), aRealData.Length(), aImagData.Data(),
535 aImagData.Length(), aConstraints.mDisableNormalization, aRv);
536 if (aRv.Failed()) {
537 return nullptr;
538 }
539 return periodicWave.forget();
540 }
541
Listener()542 AudioListener* AudioContext::Listener() {
543 if (!mListener) {
544 mListener = new AudioListener(this);
545 }
546 return mListener;
547 }
548
OutputLatency()549 double AudioContext::OutputLatency() {
550 if (mIsShutDown) {
551 return 0.0;
552 }
553 // When reduceFingerprinting is enabled, return a latency figure that is
554 // fixed, but plausible for the platform.
555 double latency_s = 0.0;
556 if (StaticPrefs::privacy_resistFingerprinting()) {
557 #ifdef XP_MACOSX
558 latency_s = 512. / mSampleRate;
559 #elif MOZ_WIDGET_ANDROID
560 latency_s = 0.020;
561 #elif XP_WIN
562 latency_s = 0.04;
563 #else // Catchall for other OSes, including Linux.
564 latency_s = 0.025;
565 #endif
566 } else {
567 return Graph()->AudioOutputLatency();
568 }
569 return latency_s;
570 }
571
GetOutputTimestamp(AudioTimestamp & aTimeStamp)572 void AudioContext::GetOutputTimestamp(AudioTimestamp& aTimeStamp) {
573 if (!Destination()) {
574 aTimeStamp.mContextTime.Construct(0.0);
575 aTimeStamp.mPerformanceTime.Construct(0.0);
576 return;
577 }
578
579 // The currentTime currently being output is the currentTime minus the audio
580 // output latency. The resolution of CurrentTime() is already reduced.
581 aTimeStamp.mContextTime.Construct(
582 std::max(0.0, CurrentTime() - OutputLatency()));
583 nsPIDOMWindowInner* parent = GetParentObject();
584 Performance* perf = parent ? parent->GetPerformance() : nullptr;
585 if (perf) {
586 // perf->Now() already has reduced resolution here, no need to do it again.
587 aTimeStamp.mPerformanceTime.Construct(
588 std::max(0., perf->Now() - (OutputLatency() * 1000.)));
589 } else {
590 aTimeStamp.mPerformanceTime.Construct(0.0);
591 }
592 }
593
GetAudioWorklet(ErrorResult & aRv)594 Worklet* AudioContext::GetAudioWorklet(ErrorResult& aRv) {
595 if (!mWorklet) {
596 mWorklet = AudioWorkletImpl::CreateWorklet(this, aRv);
597 }
598
599 return mWorklet;
600 }
IsRunning() const601 bool AudioContext::IsRunning() const {
602 return mAudioContextState == AudioContextState::Running;
603 }
604
CreatePromise(ErrorResult & aRv)605 already_AddRefed<Promise> AudioContext::CreatePromise(ErrorResult& aRv) {
606 // Get the relevant global for the promise from the wrapper cache because
607 // DOMEventTargetHelper::GetOwner() returns null if the document is unloaded.
608 // We know the wrapper exists because it is being used for |this| from JS.
609 // See https://github.com/heycam/webidl/issues/932 for why the relevant
610 // global is used instead of the current global.
611 nsCOMPtr<nsIGlobalObject> global = xpc::NativeGlobal(GetWrapper());
612 RefPtr<Promise> promise = Promise::Create(global, aRv);
613 if (aRv.Failed()) {
614 return nullptr;
615 }
616 /**
617 * If this's relevant global object's associated Document is not fully
618 * active then return a promise rejected with "InvalidStateError"
619 * DOMException.
620 */
621 nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(global);
622 if (!window->IsFullyActive()) {
623 promise->MaybeRejectWithInvalidStateError(
624 "The document is not fully active.");
625 }
626 return promise.forget();
627 }
628
DecodeAudioData(const ArrayBuffer & aBuffer,const Optional<OwningNonNull<DecodeSuccessCallback>> & aSuccessCallback,const Optional<OwningNonNull<DecodeErrorCallback>> & aFailureCallback,ErrorResult & aRv)629 already_AddRefed<Promise> AudioContext::DecodeAudioData(
630 const ArrayBuffer& aBuffer,
631 const Optional<OwningNonNull<DecodeSuccessCallback>>& aSuccessCallback,
632 const Optional<OwningNonNull<DecodeErrorCallback>>& aFailureCallback,
633 ErrorResult& aRv) {
634 AutoJSAPI jsapi;
635 jsapi.Init();
636 JSContext* cx = jsapi.cx();
637
638 // CheckedUnwrapStatic is OK, since we know we have an ArrayBuffer.
639 JS::Rooted<JSObject*> obj(cx, js::CheckedUnwrapStatic(aBuffer.Obj()));
640 if (!obj) {
641 aRv.ThrowSecurityError("Can't get audio data from cross-origin object");
642 return nullptr;
643 }
644
645 RefPtr<Promise> promise = CreatePromise(aRv);
646 if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
647 return promise.forget();
648 }
649
650 JSAutoRealm ar(cx, obj);
651 aBuffer.ComputeState();
652
653 if (!aBuffer.Data()) {
654 // Throw if the buffer is detached
655 aRv.ThrowTypeError("Buffer argument can't be a detached buffer");
656 return nullptr;
657 }
658
659 // Detach the array buffer
660 size_t length = aBuffer.Length();
661
662 uint8_t* data = static_cast<uint8_t*>(JS::StealArrayBufferContents(cx, obj));
663
664 // Sniff the content of the media.
665 // Failed type sniffing will be handled by AsyncDecodeWebAudio.
666 nsAutoCString contentType;
667 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType);
668
669 RefPtr<DecodeErrorCallback> failureCallback;
670 RefPtr<DecodeSuccessCallback> successCallback;
671 if (aFailureCallback.WasPassed()) {
672 failureCallback = &aFailureCallback.Value();
673 }
674 if (aSuccessCallback.WasPassed()) {
675 successCallback = &aSuccessCallback.Value();
676 }
677 UniquePtr<WebAudioDecodeJob> job(
678 new WebAudioDecodeJob(this, promise, successCallback, failureCallback));
679 AsyncDecodeWebAudio(contentType.get(), data, length, *job);
680 // Transfer the ownership to mDecodeJobs
681 mDecodeJobs.AppendElement(move(job));
682
683 return promise.forget();
684 }
685
RemoveFromDecodeQueue(WebAudioDecodeJob * aDecodeJob)686 void AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) {
687 // Since UniquePtr doesn't provide an operator== which allows you to compare
688 // against raw pointers, we need to iterate manually.
689 for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
690 if (mDecodeJobs[i].get() == aDecodeJob) {
691 mDecodeJobs.RemoveElementAt(i);
692 break;
693 }
694 }
695 }
696
RegisterActiveNode(AudioNode * aNode)697 void AudioContext::RegisterActiveNode(AudioNode* aNode) {
698 if (!mCloseCalled) {
699 mActiveNodes.Insert(aNode);
700 }
701 }
702
UnregisterActiveNode(AudioNode * aNode)703 void AudioContext::UnregisterActiveNode(AudioNode* aNode) {
704 mActiveNodes.Remove(aNode);
705 }
706
MaxChannelCount() const707 uint32_t AudioContext::MaxChannelCount() const {
708 if (StaticPrefs::privacy_resistFingerprinting()) {
709 return 2;
710 }
711 return std::min<uint32_t>(
712 WebAudioUtils::MaxChannelCount,
713 mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels());
714 }
715
ActiveNodeCount() const716 uint32_t AudioContext::ActiveNodeCount() const { return mActiveNodes.Count(); }
717
Graph() const718 MediaTrackGraph* AudioContext::Graph() const {
719 return Destination()->Track()->Graph();
720 }
721
DestinationTrack() const722 AudioNodeTrack* AudioContext::DestinationTrack() const {
723 if (Destination()) {
724 return Destination()->Track();
725 }
726 return nullptr;
727 }
728
ShutdownWorklet()729 void AudioContext::ShutdownWorklet() {
730 if (mWorklet) {
731 mWorklet->Impl()->NotifyWorkletFinished();
732 }
733 }
734
CurrentTime()735 double AudioContext::CurrentTime() {
736 mozilla::MediaTrack* track = Destination()->Track();
737
738 double rawTime = track->TrackTimeToSeconds(track->GetCurrentTime());
739
740 // CurrentTime increments in intervals of 128/sampleRate. If the Timer
741 // Precision Reduction is smaller than this interval, the jittered time
742 // can always be reversed to the raw step of the interval. In that case
743 // we can simply return the un-reduced time; and avoid breaking tests.
744 // We have to convert each variable into a common magnitude, we choose ms.
745 if ((128 / mSampleRate) * 1000.0 > nsRFPService::TimerResolution() / 1000.0) {
746 return rawTime;
747 }
748
749 MOZ_ASSERT(GetParentObject()->AsGlobal());
750 // The value of a MediaTrack's CurrentTime will always advance forward; it
751 // will never reset (even if one rewinds a video.) Therefore we can use a
752 // single Random Seed initialized at the same time as the object.
753 return nsRFPService::ReduceTimePrecisionAsSecs(
754 rawTime, GetRandomTimelineSeed(),
755 /* aIsSystemPrincipal */ false,
756 GetParentObject()->AsGlobal()->CrossOriginIsolated());
757 }
758
GetMainThread() const759 nsISerialEventTarget* AudioContext::GetMainThread() const {
760 if (nsPIDOMWindowInner* window = GetParentObject()) {
761 return window->AsGlobal()->EventTargetFor(TaskCategory::Other);
762 }
763
764 return GetCurrentSerialEventTarget();
765 }
766
DisconnectFromOwner()767 void AudioContext::DisconnectFromOwner() {
768 mIsDisconnecting = true;
769 OnWindowDestroy();
770 DOMEventTargetHelper::DisconnectFromOwner();
771 }
772
OnWindowDestroy()773 void AudioContext::OnWindowDestroy() {
774 // Avoid resend the Telemetry data.
775 if (!mIsShutDown) {
776 MaybeUpdateAutoplayTelemetryWhenShutdown();
777 }
778 mIsShutDown = true;
779
780 CloseInternal(nullptr, AudioContextOperationFlags::None);
781
782 // We don't want to touch promises if the global is going away soon.
783 if (!mIsDisconnecting) {
784 for (auto p : mPromiseGripArray) {
785 p->MaybeRejectWithInvalidStateError("Navigated away from page");
786 }
787
788 mPromiseGripArray.Clear();
789
790 for (const auto& p : mPendingResumePromises) {
791 p->MaybeRejectWithInvalidStateError("Navigated away from page");
792 }
793 mPendingResumePromises.Clear();
794 }
795
796 // On process shutdown, the MTG thread shuts down before the destination
797 // track is destroyed, but AudioWorklet needs to release objects on the MTG
798 // thread. AudioContext::Shutdown() is invoked on processing the
799 // PBrowser::Destroy() message before xpcom shutdown begins.
800 ShutdownWorklet();
801
802 if (mDestination) {
803 // We can destroy the MediaTrackGraph at this point.
804 // Although there may be other clients using the graph, this graph is used
805 // only for clients in the same window and this window is going away.
806 // This will also interrupt any worklet script still running on the graph
807 // thread.
808 Graph()->ForceShutDown();
809 // AudioDestinationNodes on rendering offline contexts have a
810 // self-reference which needs removal.
811 if (mIsOffline) {
812 mDestination->OfflineShutdown();
813 }
814 }
815 }
816
817 /* This runnable allows to fire the "statechange" event */
818 class OnStateChangeTask final : public Runnable {
819 public:
OnStateChangeTask(AudioContext * aAudioContext)820 explicit OnStateChangeTask(AudioContext* aAudioContext)
821 : Runnable("dom::OnStateChangeTask"), mAudioContext(aAudioContext) {}
822
823 NS_IMETHODIMP
Run()824 Run() override {
825 nsPIDOMWindowInner* parent = mAudioContext->GetParentObject();
826 if (!parent) {
827 return NS_ERROR_FAILURE;
828 }
829
830 Document* doc = parent->GetExtantDoc();
831 if (!doc) {
832 return NS_ERROR_FAILURE;
833 }
834
835 return nsContentUtils::DispatchTrustedEvent(
836 doc, static_cast<DOMEventTargetHelper*>(mAudioContext),
837 u"statechange"_ns, CanBubble::eNo, Cancelable::eNo);
838 }
839
840 private:
841 RefPtr<AudioContext> mAudioContext;
842 };
843
Dispatch(already_AddRefed<nsIRunnable> && aRunnable)844 void AudioContext::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
845 MOZ_ASSERT(NS_IsMainThread());
846 nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
847 // It can happen that this runnable took a long time to reach the main thread,
848 // and the global is not valid anymore.
849 if (parentObject) {
850 parentObject->AbstractMainThreadFor(TaskCategory::Other)
851 ->Dispatch(move(aRunnable));
852 } else {
853 RefPtr<nsIRunnable> runnable(aRunnable);
854 runnable = nullptr;
855 }
856 }
857
OnStateChanged(void * aPromise,AudioContextState aNewState)858 void AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) {
859 MOZ_ASSERT(NS_IsMainThread());
860
861 if (mAudioContextState == AudioContextState::Closed) {
862 fprintf(stderr,
863 "Invalid transition: mAudioContextState: %d -> aNewState %d\n",
864 static_cast<int>(mAudioContextState), static_cast<int>(aNewState));
865 MOZ_ASSERT(false);
866 }
867
868 if (aPromise) {
869 Promise* promise = reinterpret_cast<Promise*>(aPromise);
870 // It is possible for the promise to have been removed from
871 // mPromiseGripArray if the cycle collector has severed our connections. DO
872 // NOT dereference the promise pointer in that case since it may point to
873 // already freed memory.
874 if (mPromiseGripArray.Contains(promise)) {
875 promise->MaybeResolveWithUndefined();
876 DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
877 MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
878 }
879 }
880
881 // Resolve all pending promises once the audio context has been allowed to
882 // start.
883 if (aNewState == AudioContextState::Running) {
884 for (const auto& p : mPendingResumePromises) {
885 p->MaybeResolveWithUndefined();
886 }
887 mPendingResumePromises.Clear();
888 }
889
890 if (mAudioContextState != aNewState) {
891 RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
892 Dispatch(task.forget());
893 }
894
895 mAudioContextState = aNewState;
896 Destination()->NotifyAudioContextStateChanged();
897 }
898
GetAllTracks() const899 nsTArray<RefPtr<mozilla::MediaTrack>> AudioContext::GetAllTracks() const {
900 nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
901 for (AudioNode* node : mAllNodes) {
902 mozilla::MediaTrack* t = node->GetTrack();
903 if (t) {
904 tracks.AppendElement(t);
905 }
906 // Add the tracks of AudioParam.
907 const nsTArray<RefPtr<AudioParam>>& audioParams = node->GetAudioParams();
908 if (!audioParams.IsEmpty()) {
909 for (auto& param : audioParams) {
910 t = param->GetTrack();
911 if (t && !tracks.Contains(t)) {
912 tracks.AppendElement(t);
913 }
914 }
915 }
916 }
917 return tracks;
918 }
919
Suspend(ErrorResult & aRv)920 already_AddRefed<Promise> AudioContext::Suspend(ErrorResult& aRv) {
921 RefPtr<Promise> promise = CreatePromise(aRv);
922 if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
923 return promise.forget();
924 }
925 if (mIsOffline) {
926 // XXXbz This is not reachable, since we don't implement this
927 // method on OfflineAudioContext at all!
928 promise->MaybeRejectWithNotSupportedError(
929 "Can't suspend OfflineAudioContext yet");
930 return promise.forget();
931 }
932
933 if (mCloseCalled) {
934 promise->MaybeRejectWithInvalidStateError(
935 "Can't suspend if the control thread state is \"closed\"");
936 return promise.forget();
937 }
938
939 mSuspendedByContent = true;
940 mPromiseGripArray.AppendElement(promise);
941 SuspendInternal(promise, AudioContextOperationFlags::SendStateChange);
942 return promise.forget();
943 }
944
SuspendFromChrome()945 void AudioContext::SuspendFromChrome() {
946 if (mIsOffline || mIsShutDown) {
947 return;
948 }
949 SuspendInternal(nullptr, Preferences::GetBool("dom.audiocontext.testing")
950 ? AudioContextOperationFlags::SendStateChange
951 : AudioContextOperationFlags::None);
952 }
953
SuspendInternal(void * aPromise,AudioContextOperationFlags aFlags)954 void AudioContext::SuspendInternal(void* aPromise,
955 AudioContextOperationFlags aFlags) {
956 MOZ_ASSERT(NS_IsMainThread());
957 MOZ_ASSERT(!mIsOffline);
958 Destination()->Suspend();
959
960 nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
961 // If mSuspendCalled is true then we already suspended all our tracks,
962 // so don't suspend them again (since suspend(); suspend(); resume(); should
963 // cancel both suspends). But we still need to do ApplyAudioContextOperation
964 // to ensure our new promise is resolved.
965 if (!mSuspendCalled) {
966 tracks = GetAllTracks();
967 }
968 auto promise = Graph()->ApplyAudioContextOperation(
969 DestinationTrack(), move(tracks), AudioContextOperation::Suspend);
970 if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
971 promise->Then(
972 GetMainThread(), "AudioContext::OnStateChanged",
973 [self = RefPtr<AudioContext>(this),
974 aPromise](AudioContextState aNewState) {
975 self->OnStateChanged(aPromise, aNewState);
976 },
977 [] { MOZ_CRASH("Unexpected rejection"); });
978 }
979
980 mSuspendCalled = true;
981 }
982
ResumeFromChrome()983 void AudioContext::ResumeFromChrome() {
984 if (mIsOffline || mIsShutDown) {
985 return;
986 }
987 ResumeInternal(Preferences::GetBool("dom.audiocontext.testing")
988 ? AudioContextOperationFlags::SendStateChange
989 : AudioContextOperationFlags::None);
990 }
991
Resume(ErrorResult & aRv)992 already_AddRefed<Promise> AudioContext::Resume(ErrorResult& aRv) {
993 RefPtr<Promise> promise = CreatePromise(aRv);
994 if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
995 return promise.forget();
996 }
997
998 if (mIsOffline) {
999 promise->MaybeRejectWithNotSupportedError(
1000 "Can't resume OfflineAudioContext");
1001 return promise.forget();
1002 }
1003
1004 if (mCloseCalled) {
1005 promise->MaybeRejectWithInvalidStateError(
1006 "Can't resume if the control thread state is \"closed\"");
1007 return promise.forget();
1008 }
1009
1010 mSuspendedByContent = false;
1011 mPendingResumePromises.AppendElement(promise);
1012
1013 const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this);
1014 AUTOPLAY_LOG("Trying to resume AudioContext %p, IsAllowedToPlay=%d", this,
1015 isAllowedToPlay);
1016 if (isAllowedToPlay) {
1017 ResumeInternal(AudioContextOperationFlags::SendStateChange);
1018 } else {
1019 ReportBlocked();
1020 }
1021
1022 MaybeUpdateAutoplayTelemetry();
1023
1024 return promise.forget();
1025 }
1026
ResumeInternal(AudioContextOperationFlags aFlags)1027 void AudioContext::ResumeInternal(AudioContextOperationFlags aFlags) {
1028 MOZ_ASSERT(!mIsOffline);
1029 AUTOPLAY_LOG("Allow to resume AudioContext %p", this);
1030 mWasAllowedToStart = true;
1031
1032 Destination()->Resume();
1033
1034 nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
1035 // If mSuspendCalled is false then we already resumed all our tracks,
1036 // so don't resume them again (since suspend(); resume(); resume(); should
1037 // be OK). But we still need to do ApplyAudioContextOperation
1038 // to ensure our new promise is resolved.
1039 if (mSuspendCalled) {
1040 tracks = GetAllTracks();
1041 }
1042 auto promise = Graph()->ApplyAudioContextOperation(
1043 DestinationTrack(), move(tracks), AudioContextOperation::Resume);
1044 if (aFlags & AudioContextOperationFlags::SendStateChange) {
1045 promise->Then(
1046 GetMainThread(), "AudioContext::OnStateChanged",
1047 [self = RefPtr<AudioContext>(this)](AudioContextState aNewState) {
1048 self->OnStateChanged(nullptr, aNewState);
1049 },
1050 [] {}); // Promise may be rejected after graph shutdown.
1051 }
1052 mSuspendCalled = false;
1053 }
1054
UpdateAutoplayAssumptionStatus()1055 void AudioContext::UpdateAutoplayAssumptionStatus() {
1056 if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled(
1057 *this)) {
1058 mWasEverAllowedToStart |= true;
1059 mWouldBeAllowedToStart = true;
1060 } else {
1061 mWasEverBlockedToStart |= true;
1062 mWouldBeAllowedToStart = false;
1063 }
1064 }
1065
MaybeUpdateAutoplayTelemetry()1066 void AudioContext::MaybeUpdateAutoplayTelemetry() {
1067 // Exclude offline AudioContext because it's always allowed to start.
1068 if (mIsOffline) {
1069 return;
1070 }
1071
1072 if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled(
1073 *this) &&
1074 !mWouldBeAllowedToStart) {
1075 AccumulateCategorical(
1076 mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::AllowedAfterBlocked);
1077 }
1078 UpdateAutoplayAssumptionStatus();
1079 }
1080
MaybeUpdateAutoplayTelemetryWhenShutdown()1081 void AudioContext::MaybeUpdateAutoplayTelemetryWhenShutdown() {
1082 // Exclude offline AudioContext because it's always allowed to start.
1083 if (mIsOffline) {
1084 return;
1085 }
1086
1087 if (mWasEverAllowedToStart && !mWasEverBlockedToStart) {
1088 AccumulateCategorical(
1089 mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverBlocked);
1090 } else if (!mWasEverAllowedToStart && mWasEverBlockedToStart) {
1091 AccumulateCategorical(
1092 mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverAllowed);
1093 }
1094 }
1095
ReportBlocked()1096 void AudioContext::ReportBlocked() {
1097 ReportToConsole(nsIScriptError::warningFlag,
1098 "BlockAutoplayWebAudioStartError");
1099 mWasAllowedToStart = false;
1100
1101 if (!StaticPrefs::media_autoplay_block_event_enabled()) {
1102 return;
1103 }
1104
1105 RefPtr<AudioContext> self = this;
1106 RefPtr<nsIRunnable> r =
1107 NS_NewRunnableFunction("AudioContext::AutoplayBlocked", [self]() {
1108 nsPIDOMWindowInner* parent = self->GetParentObject();
1109 if (!parent) {
1110 return;
1111 }
1112
1113 Document* doc = parent->GetExtantDoc();
1114 if (!doc) {
1115 return;
1116 }
1117
1118 AUTOPLAY_LOG("Dispatch `blocked` event for AudioContext %p",
1119 self.get());
1120 nsContentUtils::DispatchTrustedEvent(
1121 doc, static_cast<DOMEventTargetHelper*>(self), u"blocked"_ns,
1122 CanBubble::eNo, Cancelable::eNo);
1123 });
1124 Dispatch(r.forget());
1125 }
1126
Close(ErrorResult & aRv)1127 already_AddRefed<Promise> AudioContext::Close(ErrorResult& aRv) {
1128 RefPtr<Promise> promise = CreatePromise(aRv);
1129 if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
1130 return promise.forget();
1131 }
1132
1133 if (mIsOffline) {
1134 // XXXbz This is not reachable, since we don't implement this
1135 // method on OfflineAudioContext at all!
1136 promise->MaybeRejectWithNotSupportedError(
1137 "Can't close OfflineAudioContext yet");
1138 return promise.forget();
1139 }
1140
1141 if (mCloseCalled) {
1142 promise->MaybeRejectWithInvalidStateError(
1143 "Can't close an AudioContext twice");
1144 return promise.forget();
1145 }
1146
1147 mPromiseGripArray.AppendElement(promise);
1148
1149 CloseInternal(promise, AudioContextOperationFlags::SendStateChange);
1150
1151 return promise.forget();
1152 }
1153
OfflineClose()1154 void AudioContext::OfflineClose() {
1155 CloseInternal(nullptr, AudioContextOperationFlags::None);
1156 }
1157
CloseInternal(void * aPromise,AudioContextOperationFlags aFlags)1158 void AudioContext::CloseInternal(void* aPromise,
1159 AudioContextOperationFlags aFlags) {
1160 // This can be called when freeing a document, and the tracks are dead at
1161 // this point, so we need extra null-checks.
1162 AudioNodeTrack* ds = DestinationTrack();
1163 if (ds && !mIsOffline) {
1164 Destination()->Close();
1165
1166 nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
1167 // If mSuspendCalled or mCloseCalled are true then we already suspended
1168 // all our tracks, so don't suspend them again. But we still need to do
1169 // ApplyAudioContextOperation to ensure our new promise is resolved.
1170 if (!mSuspendCalled && !mCloseCalled) {
1171 tracks = GetAllTracks();
1172 }
1173 auto promise = Graph()->ApplyAudioContextOperation(
1174 ds, move(tracks), AudioContextOperation::Close);
1175 if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
1176 promise->Then(
1177 GetMainThread(), "AudioContext::OnStateChanged",
1178 [self = RefPtr<AudioContext>(this),
1179 aPromise](AudioContextState aNewState) {
1180 self->OnStateChanged(aPromise, aNewState);
1181 },
1182 [] {}); // Promise may be rejected after graph shutdown.
1183 }
1184 }
1185 mCloseCalled = true;
1186 // Release references to active nodes.
1187 // Active AudioNodes don't unregister in destructors, at which point the
1188 // Node is already unregistered.
1189 mActiveNodes.Clear();
1190 }
1191
RegisterNode(AudioNode * aNode)1192 void AudioContext::RegisterNode(AudioNode* aNode) {
1193 MOZ_ASSERT(!mAllNodes.Contains(aNode));
1194 mAllNodes.Insert(aNode);
1195 }
1196
UnregisterNode(AudioNode * aNode)1197 void AudioContext::UnregisterNode(AudioNode* aNode) {
1198 MOZ_ASSERT(mAllNodes.Contains(aNode));
1199 mAllNodes.Remove(aNode);
1200 }
1201
StartRendering(ErrorResult & aRv)1202 already_AddRefed<Promise> AudioContext::StartRendering(ErrorResult& aRv) {
1203 MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext");
1204 RefPtr<Promise> promise = CreatePromise(aRv);
1205 if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
1206 return promise.forget();
1207 }
1208 if (mIsStarted) {
1209 aRv.ThrowInvalidStateError("Rendering already started");
1210 return nullptr;
1211 }
1212
1213 mIsStarted = true;
1214 mDestination->StartRendering(promise);
1215
1216 OnStateChanged(nullptr, AudioContextState::Running);
1217
1218 return promise.forget();
1219 }
1220
Length()1221 unsigned long AudioContext::Length() {
1222 MOZ_ASSERT(mIsOffline);
1223 return mDestination->Length();
1224 }
1225
Mute() const1226 void AudioContext::Mute() const {
1227 MOZ_ASSERT(!mIsOffline);
1228 if (mDestination) {
1229 mDestination->Mute();
1230 }
1231 }
1232
Unmute() const1233 void AudioContext::Unmute() const {
1234 MOZ_ASSERT(!mIsOffline);
1235 if (mDestination) {
1236 mDestination->Unmute();
1237 }
1238 }
1239
SetParamMapForWorkletName(const nsAString & aName,AudioParamDescriptorMap * aParamMap)1240 void AudioContext::SetParamMapForWorkletName(
1241 const nsAString& aName, AudioParamDescriptorMap* aParamMap) {
1242 MOZ_ASSERT(!mWorkletParamDescriptors.Contains(aName));
1243 Unused << mWorkletParamDescriptors.InsertOrUpdate(aName, move(*aParamMap),
1244 fallible);
1245 }
1246
SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const1247 size_t AudioContext::SizeOfIncludingThis(
1248 mozilla::MallocSizeOf aMallocSizeOf) const {
1249 // AudioNodes are tracked separately because we do not want the AudioContext
1250 // to track all of the AudioNodes it creates, so we wouldn't be able to
1251 // traverse them from here.
1252
1253 size_t amount = aMallocSizeOf(this);
1254 if (mListener) {
1255 amount += mListener->SizeOfIncludingThis(aMallocSizeOf);
1256 }
1257 amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf);
1258 for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
1259 amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf);
1260 }
1261 amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf);
1262 return amount;
1263 }
1264
1265 NS_IMETHODIMP
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1266 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
1267 nsISupports* aData, bool aAnonymize) {
1268 const nsLiteralCString nodeDescription(
1269 "Memory used by AudioNode DOM objects (Web Audio).");
1270 for (AudioNode* node : mAllNodes) {
1271 int64_t amount = node->SizeOfIncludingThis(MallocSizeOf);
1272 nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes",
1273 node->NodeType());
1274 aHandleReport->Callback(""_ns, domNodePath, KIND_HEAP, UNITS_BYTES, amount,
1275 nodeDescription, aData);
1276 }
1277
1278 int64_t amount = SizeOfIncludingThis(MallocSizeOf);
1279 MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
1280 amount,
1281 "Memory used by AudioContext objects (Web Audio).");
1282
1283 return NS_OK;
1284 }
1285
GetBasicWaveFormCache()1286 BasicWaveFormCache* AudioContext::GetBasicWaveFormCache() {
1287 MOZ_ASSERT(NS_IsMainThread());
1288 if (!mBasicWaveFormCache) {
1289 mBasicWaveFormCache = new BasicWaveFormCache(SampleRate());
1290 }
1291 return mBasicWaveFormCache;
1292 }
1293
ReportToConsole(uint32_t aErrorFlags,const char * aMsg) const1294 void AudioContext::ReportToConsole(uint32_t aErrorFlags,
1295 const char* aMsg) const {
1296 MOZ_ASSERT(aMsg);
1297 Document* doc =
1298 GetParentObject() ? GetParentObject()->GetExtantDoc() : nullptr;
1299 nsContentUtils::ReportToConsole(aErrorFlags, "Media"_ns, doc,
1300 nsContentUtils::eDOM_PROPERTIES, aMsg);
1301 }
1302
BasicWaveFormCache(uint32_t aSampleRate)1303 BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate)
1304 : mSampleRate(aSampleRate) {
1305 MOZ_ASSERT(NS_IsMainThread());
1306 }
1307 BasicWaveFormCache::~BasicWaveFormCache() = default;
1308
GetBasicWaveForm(OscillatorType aType)1309 WebCore::PeriodicWave* BasicWaveFormCache::GetBasicWaveForm(
1310 OscillatorType aType) {
1311 MOZ_ASSERT(!NS_IsMainThread());
1312 if (aType == OscillatorType::Sawtooth) {
1313 if (!mSawtooth) {
1314 mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate);
1315 }
1316 return mSawtooth;
1317 }
1318 if (aType == OscillatorType::Square) {
1319 if (!mSquare) {
1320 mSquare = WebCore::PeriodicWave::createSquare(mSampleRate);
1321 }
1322 return mSquare;
1323 }
1324 if (aType == OscillatorType::Triangle) {
1325 if (!mTriangle) {
1326 mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate);
1327 }
1328 return mTriangle;
1329 }
1330 MOZ_ASSERT(false, "Not reached");
1331 return nullptr;
1332 }
1333
1334 } // namespace mozilla::dom
1335