1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "AudioContext.h"
8 
9 #include "blink/PeriodicWave.h"
10 
11 #include "mozilla/ErrorResult.h"
12 #include "mozilla/OwningNonNull.h"
13 #include "mozilla/RefPtr.h"
14 
15 #include "mozilla/dom/AnalyserNode.h"
16 #include "mozilla/dom/AnalyserNodeBinding.h"
17 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
18 #include "mozilla/dom/AudioContextBinding.h"
19 #include "mozilla/dom/BaseAudioContextBinding.h"
20 #include "mozilla/dom/BiquadFilterNodeBinding.h"
21 #include "mozilla/dom/ChannelMergerNodeBinding.h"
22 #include "mozilla/dom/ChannelSplitterNodeBinding.h"
23 #include "mozilla/dom/ConvolverNodeBinding.h"
24 #include "mozilla/dom/DelayNodeBinding.h"
25 #include "mozilla/dom/DynamicsCompressorNodeBinding.h"
26 #include "mozilla/dom/GainNodeBinding.h"
27 #include "mozilla/dom/IIRFilterNodeBinding.h"
28 #include "mozilla/dom/HTMLMediaElement.h"
29 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
30 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
31 #include "mozilla/dom/OfflineAudioContextBinding.h"
32 #include "mozilla/dom/OscillatorNodeBinding.h"
33 #include "mozilla/dom/PannerNodeBinding.h"
34 #include "mozilla/dom/PeriodicWaveBinding.h"
35 #include "mozilla/dom/Promise.h"
36 #include "mozilla/dom/StereoPannerNodeBinding.h"
37 #include "mozilla/dom/WaveShaperNodeBinding.h"
38 
39 #include "AudioBuffer.h"
40 #include "AudioBufferSourceNode.h"
41 #include "AudioChannelService.h"
42 #include "AudioDestinationNode.h"
43 #include "AudioListener.h"
44 #include "AudioNodeStream.h"
45 #include "AudioStream.h"
46 #include "BiquadFilterNode.h"
47 #include "ChannelMergerNode.h"
48 #include "ChannelSplitterNode.h"
49 #include "ConstantSourceNode.h"
50 #include "ConvolverNode.h"
51 #include "DelayNode.h"
52 #include "DynamicsCompressorNode.h"
53 #include "GainNode.h"
54 #include "IIRFilterNode.h"
55 #include "MediaElementAudioSourceNode.h"
56 #include "MediaStreamAudioDestinationNode.h"
57 #include "MediaStreamAudioSourceNode.h"
58 #include "MediaStreamGraph.h"
59 #include "nsContentUtils.h"
60 #include "nsNetCID.h"
61 #include "nsNetUtil.h"
62 #include "nsPIDOMWindow.h"
63 #include "nsPrintfCString.h"
64 #include "nsRFPService.h"
65 #include "OscillatorNode.h"
66 #include "PannerNode.h"
67 #include "PeriodicWave.h"
68 #include "ScriptProcessorNode.h"
69 #include "StereoPannerNode.h"
70 #include "WaveShaperNode.h"
71 
72 namespace mozilla {
73 namespace dom {
74 
75 // 0 is a special value that MediaStreams use to denote they are not part of a
76 // AudioContext.
77 static dom::AudioContext::AudioContextId gAudioContextId = 1;
78 
79 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
80 
81 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
82   NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
83   NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
84   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
85   if (!tmp->mIsStarted) {
86     NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
87   }
88   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
89   // explicitly. mAllNodes is an array of weak pointers, ignore it here.
90   // mPannerNodes is an array of weak pointers, ignore it here.
91   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
92 
93   // Remove weak reference on the global window as the context is not usable
94   // without mDestination.
95   tmp->DisconnectFromWindow();
96 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
97 
98 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
99                                                   DOMEventTargetHelper)
100   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
101   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
102   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
103   if (!tmp->mIsStarted) {
104     MOZ_ASSERT(tmp->mIsOffline,
105                "Online AudioContexts should always be started");
106     NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
107   }
108   // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
109   // explicitly. mAllNodes is an array of weak pointers, ignore it here.
110   // mPannerNodes is an array of weak pointers, ignore it here.
111   // mBasicWaveFormCache cannot participate in cycles, ignore it here.
112 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
113 
NS_IMPL_ADDREF_INHERITED(AudioContext,DOMEventTargetHelper)114 NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper)
115 NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper)
116 
117 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext)
118   NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter)
119 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
120 
121 static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) {
122   if (aIsOffline) {
123     return aSampleRate;
124   } else {
125     return static_cast<float>(CubebUtils::PreferredSampleRate());
126   }
127 }
128 
AudioContext(nsPIDOMWindowInner * aWindow,bool aIsOffline,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate)129 AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, bool aIsOffline,
130                            uint32_t aNumberOfChannels, uint32_t aLength,
131                            float aSampleRate)
132     : DOMEventTargetHelper(aWindow),
133       mId(gAudioContextId++),
134       mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)),
135       mAudioContextState(AudioContextState::Suspended),
136       mNumberOfChannels(aNumberOfChannels),
137       mIsOffline(aIsOffline),
138       mIsStarted(!aIsOffline),
139       mIsShutDown(false),
140       mCloseCalled(false),
141       mSuspendCalled(false),
142       mIsDisconnecting(false) {
143   bool mute = aWindow->AddAudioContext(this);
144 
145   // Note: AudioDestinationNode needs an AudioContext that must already be
146   // bound to the window.
147   mDestination = new AudioDestinationNode(this, aIsOffline, aNumberOfChannels,
148                                           aLength, aSampleRate);
149 
150   // The context can't be muted until it has a destination.
151   if (mute) {
152     Mute();
153   }
154 }
155 
Init()156 nsresult AudioContext::Init() {
157   if (!mIsOffline) {
158     nsresult rv = mDestination->CreateAudioChannelAgent();
159     if (NS_WARN_IF(NS_FAILED(rv))) {
160       return rv;
161     }
162   }
163 
164   return NS_OK;
165 }
166 
DisconnectFromWindow()167 void AudioContext::DisconnectFromWindow() {
168   nsPIDOMWindowInner* window = GetOwner();
169   if (window) {
170     window->RemoveAudioContext(this);
171   }
172 }
173 
~AudioContext()174 AudioContext::~AudioContext() {
175   DisconnectFromWindow();
176   UnregisterWeakMemoryReporter(this);
177 }
178 
WrapObject(JSContext * aCx,JS::Handle<JSObject * > aGivenProto)179 JSObject* AudioContext::WrapObject(JSContext* aCx,
180                                    JS::Handle<JSObject*> aGivenProto) {
181   if (mIsOffline) {
182     return OfflineAudioContextBinding::Wrap(aCx, this, aGivenProto);
183   } else {
184     return AudioContextBinding::Wrap(aCx, this, aGivenProto);
185   }
186 }
187 
Constructor(const GlobalObject & aGlobal,ErrorResult & aRv)188 /* static */ already_AddRefed<AudioContext> AudioContext::Constructor(
189     const GlobalObject& aGlobal, ErrorResult& aRv) {
190   nsCOMPtr<nsPIDOMWindowInner> window =
191       do_QueryInterface(aGlobal.GetAsSupports());
192   if (!window) {
193     aRv.Throw(NS_ERROR_FAILURE);
194     return nullptr;
195   }
196 
197   uint32_t maxChannelCount = std::min<uint32_t>(
198       WebAudioUtils::MaxChannelCount, CubebUtils::MaxNumberOfChannels());
199   RefPtr<AudioContext> object =
200       new AudioContext(window, false, maxChannelCount);
201   aRv = object->Init();
202   if (NS_WARN_IF(aRv.Failed())) {
203     return nullptr;
204   }
205 
206   RegisterWeakMemoryReporter(object);
207 
208   return object.forget();
209 }
210 
Constructor(const GlobalObject & aGlobal,const OfflineAudioContextOptions & aOptions,ErrorResult & aRv)211 /* static */ already_AddRefed<AudioContext> AudioContext::Constructor(
212     const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
213     ErrorResult& aRv) {
214   return Constructor(aGlobal, aOptions.mNumberOfChannels, aOptions.mLength,
215                      aOptions.mSampleRate, aRv);
216 }
217 
Constructor(const GlobalObject & aGlobal,uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)218 /* static */ already_AddRefed<AudioContext> AudioContext::Constructor(
219     const GlobalObject& aGlobal, uint32_t aNumberOfChannels, uint32_t aLength,
220     float aSampleRate, ErrorResult& aRv) {
221   nsCOMPtr<nsPIDOMWindowInner> window =
222       do_QueryInterface(aGlobal.GetAsSupports());
223   if (!window) {
224     aRv.Throw(NS_ERROR_FAILURE);
225     return nullptr;
226   }
227 
228   if (aNumberOfChannels == 0 ||
229       aNumberOfChannels > WebAudioUtils::MaxChannelCount || aLength == 0 ||
230       aSampleRate < WebAudioUtils::MinSampleRate ||
231       aSampleRate > WebAudioUtils::MaxSampleRate) {
232     // The DOM binding protects us against infinity and NaN
233     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
234     return nullptr;
235   }
236 
237   RefPtr<AudioContext> object =
238       new AudioContext(window, true, aNumberOfChannels, aLength, aSampleRate);
239 
240   RegisterWeakMemoryReporter(object);
241 
242   return object.forget();
243 }
244 
CheckClosed(ErrorResult & aRv)245 bool AudioContext::CheckClosed(ErrorResult& aRv) {
246   if (mAudioContextState == AudioContextState::Closed || mIsShutDown ||
247       mIsDisconnecting) {
248     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
249     return true;
250   }
251   return false;
252 }
253 
CreateBufferSource(ErrorResult & aRv)254 already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource(
255     ErrorResult& aRv) {
256   return AudioBufferSourceNode::Create(nullptr, *this,
257                                        AudioBufferSourceOptions(), aRv);
258 }
259 
CreateConstantSource(ErrorResult & aRv)260 already_AddRefed<ConstantSourceNode> AudioContext::CreateConstantSource(
261     ErrorResult& aRv) {
262   if (CheckClosed(aRv)) {
263     return nullptr;
264   }
265 
266   RefPtr<ConstantSourceNode> constantSourceNode = new ConstantSourceNode(this);
267   return constantSourceNode.forget();
268 }
269 
CreateBuffer(uint32_t aNumberOfChannels,uint32_t aLength,float aSampleRate,ErrorResult & aRv)270 already_AddRefed<AudioBuffer> AudioContext::CreateBuffer(
271     uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate,
272     ErrorResult& aRv) {
273   if (!aNumberOfChannels) {
274     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
275     return nullptr;
276   }
277 
278   return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength,
279                              aSampleRate, aRv);
280 }
281 
282 namespace {
283 
IsValidBufferSize(uint32_t aBufferSize)284 bool IsValidBufferSize(uint32_t aBufferSize) {
285   switch (aBufferSize) {
286     case 0:  // let the implementation choose the buffer size
287     case 256:
288     case 512:
289     case 1024:
290     case 2048:
291     case 4096:
292     case 8192:
293     case 16384:
294       return true;
295     default:
296       return false;
297   }
298 }
299 
300 }  // namespace
301 
302 already_AddRefed<MediaStreamAudioDestinationNode>
CreateMediaStreamDestination(ErrorResult & aRv)303 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) {
304   return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(),
305                                                  aRv);
306 }
307 
CreateScriptProcessor(uint32_t aBufferSize,uint32_t aNumberOfInputChannels,uint32_t aNumberOfOutputChannels,ErrorResult & aRv)308 already_AddRefed<ScriptProcessorNode> AudioContext::CreateScriptProcessor(
309     uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
310     uint32_t aNumberOfOutputChannels, ErrorResult& aRv) {
311   if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) ||
312       aNumberOfInputChannels > WebAudioUtils::MaxChannelCount ||
313       aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount ||
314       !IsValidBufferSize(aBufferSize)) {
315     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
316     return nullptr;
317   }
318 
319   if (CheckClosed(aRv)) {
320     return nullptr;
321   }
322 
323   RefPtr<ScriptProcessorNode> scriptProcessor = new ScriptProcessorNode(
324       this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels);
325   return scriptProcessor.forget();
326 }
327 
CreateAnalyser(ErrorResult & aRv)328 already_AddRefed<AnalyserNode> AudioContext::CreateAnalyser(ErrorResult& aRv) {
329   return AnalyserNode::Create(*this, AnalyserOptions(), aRv);
330 }
331 
CreateStereoPanner(ErrorResult & aRv)332 already_AddRefed<StereoPannerNode> AudioContext::CreateStereoPanner(
333     ErrorResult& aRv) {
334   return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv);
335 }
336 
337 already_AddRefed<MediaElementAudioSourceNode>
CreateMediaElementSource(HTMLMediaElement & aMediaElement,ErrorResult & aRv)338 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
339                                        ErrorResult& aRv) {
340   MediaElementAudioSourceOptions options;
341   options.mMediaElement = aMediaElement;
342 
343   return MediaElementAudioSourceNode::Create(*this, options, aRv);
344 }
345 
346 already_AddRefed<MediaStreamAudioSourceNode>
CreateMediaStreamSource(DOMMediaStream & aMediaStream,ErrorResult & aRv)347 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
348                                       ErrorResult& aRv) {
349   MediaStreamAudioSourceOptions options;
350   options.mMediaStream = aMediaStream;
351 
352   return MediaStreamAudioSourceNode::Create(*this, options, aRv);
353 }
354 
CreateGain(ErrorResult & aRv)355 already_AddRefed<GainNode> AudioContext::CreateGain(ErrorResult& aRv) {
356   return GainNode::Create(*this, GainOptions(), aRv);
357 }
358 
CreateWaveShaper(ErrorResult & aRv)359 already_AddRefed<WaveShaperNode> AudioContext::CreateWaveShaper(
360     ErrorResult& aRv) {
361   return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv);
362 }
363 
CreateDelay(double aMaxDelayTime,ErrorResult & aRv)364 already_AddRefed<DelayNode> AudioContext::CreateDelay(double aMaxDelayTime,
365                                                       ErrorResult& aRv) {
366   DelayOptions options;
367   options.mMaxDelayTime = aMaxDelayTime;
368   return DelayNode::Create(*this, options, aRv);
369 }
370 
CreatePanner(ErrorResult & aRv)371 already_AddRefed<PannerNode> AudioContext::CreatePanner(ErrorResult& aRv) {
372   return PannerNode::Create(*this, PannerOptions(), aRv);
373 }
374 
CreateConvolver(ErrorResult & aRv)375 already_AddRefed<ConvolverNode> AudioContext::CreateConvolver(
376     ErrorResult& aRv) {
377   return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv);
378 }
379 
CreateChannelSplitter(uint32_t aNumberOfOutputs,ErrorResult & aRv)380 already_AddRefed<ChannelSplitterNode> AudioContext::CreateChannelSplitter(
381     uint32_t aNumberOfOutputs, ErrorResult& aRv) {
382   ChannelSplitterOptions options;
383   options.mNumberOfOutputs = aNumberOfOutputs;
384   return ChannelSplitterNode::Create(*this, options, aRv);
385 }
386 
CreateChannelMerger(uint32_t aNumberOfInputs,ErrorResult & aRv)387 already_AddRefed<ChannelMergerNode> AudioContext::CreateChannelMerger(
388     uint32_t aNumberOfInputs, ErrorResult& aRv) {
389   ChannelMergerOptions options;
390   options.mNumberOfInputs = aNumberOfInputs;
391   return ChannelMergerNode::Create(*this, options, aRv);
392 }
393 
CreateDynamicsCompressor(ErrorResult & aRv)394 already_AddRefed<DynamicsCompressorNode> AudioContext::CreateDynamicsCompressor(
395     ErrorResult& aRv) {
396   return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(),
397                                         aRv);
398 }
399 
CreateBiquadFilter(ErrorResult & aRv)400 already_AddRefed<BiquadFilterNode> AudioContext::CreateBiquadFilter(
401     ErrorResult& aRv) {
402   return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv);
403 }
404 
CreateIIRFilter(const Sequence<double> & aFeedforward,const Sequence<double> & aFeedback,mozilla::ErrorResult & aRv)405 already_AddRefed<IIRFilterNode> AudioContext::CreateIIRFilter(
406     const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
407     mozilla::ErrorResult& aRv) {
408   IIRFilterOptions options;
409   options.mFeedforward = aFeedforward;
410   options.mFeedback = aFeedback;
411   return IIRFilterNode::Create(*this, options, aRv);
412 }
413 
CreateOscillator(ErrorResult & aRv)414 already_AddRefed<OscillatorNode> AudioContext::CreateOscillator(
415     ErrorResult& aRv) {
416   return OscillatorNode::Create(*this, OscillatorOptions(), aRv);
417 }
418 
CreatePeriodicWave(const Float32Array & aRealData,const Float32Array & aImagData,const PeriodicWaveConstraints & aConstraints,ErrorResult & aRv)419 already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(
420     const Float32Array& aRealData, const Float32Array& aImagData,
421     const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) {
422   aRealData.ComputeLengthAndData();
423   aImagData.ComputeLengthAndData();
424 
425   if (aRealData.Length() != aImagData.Length() || aRealData.Length() == 0) {
426     aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
427     return nullptr;
428   }
429 
430   RefPtr<PeriodicWave> periodicWave = new PeriodicWave(
431       this, aRealData.Data(), aImagData.Data(), aImagData.Length(),
432       aConstraints.mDisableNormalization, aRv);
433   if (aRv.Failed()) {
434     return nullptr;
435   }
436   return periodicWave.forget();
437 }
438 
Listener()439 AudioListener* AudioContext::Listener() {
440   if (!mListener) {
441     mListener = new AudioListener(this);
442   }
443   return mListener;
444 }
445 
IsRunning() const446 bool AudioContext::IsRunning() const {
447   return mAudioContextState == AudioContextState::Running;
448 }
449 
DecodeAudioData(const ArrayBuffer & aBuffer,const Optional<OwningNonNull<DecodeSuccessCallback>> & aSuccessCallback,const Optional<OwningNonNull<DecodeErrorCallback>> & aFailureCallback,ErrorResult & aRv)450 already_AddRefed<Promise> AudioContext::DecodeAudioData(
451     const ArrayBuffer& aBuffer,
452     const Optional<OwningNonNull<DecodeSuccessCallback> >& aSuccessCallback,
453     const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback,
454     ErrorResult& aRv) {
455   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
456   RefPtr<Promise> promise;
457   AutoJSAPI jsapi;
458   jsapi.Init();
459   JSContext* cx = jsapi.cx();
460   JSAutoCompartment ac(cx, aBuffer.Obj());
461 
462   promise = Promise::Create(parentObject, aRv);
463   if (aRv.Failed()) {
464     return nullptr;
465   }
466 
467   aBuffer.ComputeLengthAndData();
468 
469   if (aBuffer.IsShared()) {
470     // Throw if the object is mapping shared memory (must opt in).
471     aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_SHARED>(
472         NS_LITERAL_STRING("Argument of AudioContext.decodeAudioData"));
473     return nullptr;
474   }
475 
476   if (!aBuffer.Data()) {
477     // Throw if the buffer is detached
478     aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_DETACHED>(
479         NS_LITERAL_STRING("Argument of AudioContext.decodeAudioData"));
480     return nullptr;
481   }
482 
483   // Detach the array buffer
484   size_t length = aBuffer.Length();
485   JS::RootedObject obj(cx, aBuffer.Obj());
486 
487   uint8_t* data = static_cast<uint8_t*>(JS_StealArrayBufferContents(cx, obj));
488 
489   // Sniff the content of the media.
490   // Failed type sniffing will be handled by AsyncDecodeWebAudio.
491   nsAutoCString contentType;
492   NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType);
493 
494   RefPtr<DecodeErrorCallback> failureCallback;
495   RefPtr<DecodeSuccessCallback> successCallback;
496   if (aFailureCallback.WasPassed()) {
497     failureCallback = &aFailureCallback.Value();
498   }
499   if (aSuccessCallback.WasPassed()) {
500     successCallback = &aSuccessCallback.Value();
501   }
502   UniquePtr<WebAudioDecodeJob> job(
503       new WebAudioDecodeJob(this, promise, successCallback, failureCallback));
504   AsyncDecodeWebAudio(contentType.get(), data, length, *job);
505   // Transfer the ownership to mDecodeJobs
506   mDecodeJobs.AppendElement(Move(job));
507 
508   return promise.forget();
509 }
510 
RemoveFromDecodeQueue(WebAudioDecodeJob * aDecodeJob)511 void AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) {
512   // Since UniquePtr doesn't provide an operator== which allows you to compare
513   // against raw pointers, we need to iterate manually.
514   for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
515     if (mDecodeJobs[i].get() == aDecodeJob) {
516       mDecodeJobs.RemoveElementAt(i);
517       break;
518     }
519   }
520 }
521 
RegisterActiveNode(AudioNode * aNode)522 void AudioContext::RegisterActiveNode(AudioNode* aNode) {
523   if (!mIsShutDown) {
524     mActiveNodes.PutEntry(aNode);
525   }
526 }
527 
UnregisterActiveNode(AudioNode * aNode)528 void AudioContext::UnregisterActiveNode(AudioNode* aNode) {
529   mActiveNodes.RemoveEntry(aNode);
530 }
531 
UnregisterAudioBufferSourceNode(AudioBufferSourceNode * aNode)532 void AudioContext::UnregisterAudioBufferSourceNode(
533     AudioBufferSourceNode* aNode) {
534   UpdatePannerSource();
535 }
536 
UnregisterPannerNode(PannerNode * aNode)537 void AudioContext::UnregisterPannerNode(PannerNode* aNode) {
538   mPannerNodes.RemoveEntry(aNode);
539   if (mListener) {
540     mListener->UnregisterPannerNode(aNode);
541   }
542 }
543 
UpdatePannerSource()544 void AudioContext::UpdatePannerSource() {
545   for (auto iter = mPannerNodes.Iter(); !iter.Done(); iter.Next()) {
546     iter.Get()->GetKey()->FindConnectedSources();
547   }
548 }
549 
MaxChannelCount() const550 uint32_t AudioContext::MaxChannelCount() const {
551   return std::min<uint32_t>(
552       WebAudioUtils::MaxChannelCount,
553       mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels());
554 }
555 
ActiveNodeCount() const556 uint32_t AudioContext::ActiveNodeCount() const { return mActiveNodes.Count(); }
557 
Graph() const558 MediaStreamGraph* AudioContext::Graph() const {
559   return Destination()->Stream()->Graph();
560 }
561 
DestinationStream() const562 MediaStream* AudioContext::DestinationStream() const {
563   if (Destination()) {
564     return Destination()->Stream();
565   }
566   return nullptr;
567 }
568 
CurrentTime()569 double AudioContext::CurrentTime() {
570   MediaStream* stream = Destination()->Stream();
571 
572   double rawTime = stream->StreamTimeToSeconds(stream->GetCurrentTime());
573 
574   // CurrentTime increments in intervals of 128/sampleRate. If the Timer
575   // Precision Reduction is smaller than this interval, the jittered time
576   // can always be reversed to the raw step of the interval. In that case
577   // we can simply return the un-reduced time; and avoid breaking tests.
578   // We have to convert each variable into a common magnitude, we choose ms.
579   if ((128 / mSampleRate) * 1000.0 > nsRFPService::TimerResolution() / 1000.0) {
580     return rawTime;
581   }
582 
583   // The value of a MediaStream's CurrentTime will always advance forward; it
584   // will never reset (even if one rewinds a video.) Therefore we can use a
585   // single Random Seed initialized at the same time as the object.
586   return nsRFPService::ReduceTimePrecisionAsSecs(rawTime,
587                                                  GetRandomTimelineSeed());
588 }
589 
DisconnectFromOwner()590 void AudioContext::DisconnectFromOwner() {
591   mIsDisconnecting = true;
592   Shutdown();
593   DOMEventTargetHelper::DisconnectFromOwner();
594 }
595 
Shutdown()596 void AudioContext::Shutdown() {
597   mIsShutDown = true;
598 
599   // We don't want to touch promises if the global is going away soon.
600   if (!mIsDisconnecting) {
601     if (!mIsOffline) {
602       RefPtr<Promise> ignored = Close(IgnoreErrors());
603     }
604 
605     for (auto p : mPromiseGripArray) {
606       p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
607     }
608 
609     mPromiseGripArray.Clear();
610   }
611 
612   // Release references to active nodes.
613   // Active AudioNodes don't unregister in destructors, at which point the
614   // Node is already unregistered.
615   mActiveNodes.Clear();
616 
617   // For offline contexts, we can destroy the MediaStreamGraph at this point.
618   if (mIsOffline && mDestination) {
619     mDestination->OfflineShutdown();
620   }
621 }
622 
StateChangeTask(AudioContext * aAudioContext,void * aPromise,AudioContextState aNewState)623 StateChangeTask::StateChangeTask(AudioContext* aAudioContext, void* aPromise,
624                                  AudioContextState aNewState)
625     : Runnable("dom::StateChangeTask"),
626       mAudioContext(aAudioContext),
627       mPromise(aPromise),
628       mAudioNodeStream(nullptr),
629       mNewState(aNewState) {
630   MOZ_ASSERT(NS_IsMainThread(),
631              "This constructor should be used from the main thread.");
632 }
633 
StateChangeTask(AudioNodeStream * aStream,void * aPromise,AudioContextState aNewState)634 StateChangeTask::StateChangeTask(AudioNodeStream* aStream, void* aPromise,
635                                  AudioContextState aNewState)
636     : Runnable("dom::StateChangeTask"),
637       mAudioContext(nullptr),
638       mPromise(aPromise),
639       mAudioNodeStream(aStream),
640       mNewState(aNewState) {
641   MOZ_ASSERT(!NS_IsMainThread(),
642              "This constructor should be used from the graph thread.");
643 }
644 
645 NS_IMETHODIMP
Run()646 StateChangeTask::Run() {
647   MOZ_ASSERT(NS_IsMainThread());
648 
649   if (!mAudioContext && !mAudioNodeStream) {
650     return NS_OK;
651   }
652   if (mAudioNodeStream) {
653     AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread();
654     if (!node) {
655       return NS_OK;
656     }
657     mAudioContext = node->Context();
658     if (!mAudioContext) {
659       return NS_OK;
660     }
661   }
662 
663   mAudioContext->OnStateChanged(mPromise, mNewState);
664   // We have can't call Release() on the AudioContext on the MSG thread, so we
665   // unref it here, on the main thread.
666   mAudioContext = nullptr;
667 
668   return NS_OK;
669 }
670 
671 /* This runnable allows to fire the "statechange" event */
672 class OnStateChangeTask final : public Runnable {
673  public:
OnStateChangeTask(AudioContext * aAudioContext)674   explicit OnStateChangeTask(AudioContext* aAudioContext)
675       : Runnable("dom::OnStateChangeTask"), mAudioContext(aAudioContext) {}
676 
677   NS_IMETHODIMP
Run()678   Run() override {
679     nsPIDOMWindowInner* parent = mAudioContext->GetParentObject();
680     if (!parent) {
681       return NS_ERROR_FAILURE;
682     }
683 
684     nsIDocument* doc = parent->GetExtantDoc();
685     if (!doc) {
686       return NS_ERROR_FAILURE;
687     }
688 
689     return nsContentUtils::DispatchTrustedEvent(
690         doc, static_cast<DOMEventTargetHelper*>(mAudioContext),
691         NS_LITERAL_STRING("statechange"), false, false);
692   }
693 
694  private:
695   RefPtr<AudioContext> mAudioContext;
696 };
697 
Dispatch(already_AddRefed<nsIRunnable> && aRunnable)698 void AudioContext::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
699   MOZ_ASSERT(NS_IsMainThread());
700   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
701   // It can happen that this runnable took a long time to reach the main thread,
702   // and the global is not valid anymore.
703   if (parentObject) {
704     parentObject->AbstractMainThreadFor(TaskCategory::Other)
705         ->Dispatch(std::move(aRunnable));
706   } else {
707     RefPtr<nsIRunnable> runnable(aRunnable);
708     runnable = nullptr;
709   }
710 }
711 
OnStateChanged(void * aPromise,AudioContextState aNewState)712 void AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) {
713   MOZ_ASSERT(NS_IsMainThread());
714 
715   // This can happen if close() was called right after creating the
716   // AudioContext, before the context has switched to "running".
717   if (mAudioContextState == AudioContextState::Closed &&
718       aNewState == AudioContextState::Running && !aPromise) {
719     return;
720   }
721 
722   // This can happen if this is called in reaction to a
723   // MediaStreamGraph shutdown, and a AudioContext was being
724   // suspended at the same time, for example if a page was being
725   // closed.
726   if (mAudioContextState == AudioContextState::Closed &&
727       aNewState == AudioContextState::Suspended) {
728     return;
729   }
730 
731 #ifndef WIN32  // Bug 1170547
732 #ifndef XP_MACOSX
733 #ifdef DEBUG
734 
735   if (!((mAudioContextState == AudioContextState::Suspended &&
736          aNewState == AudioContextState::Running) ||
737         (mAudioContextState == AudioContextState::Running &&
738          aNewState == AudioContextState::Suspended) ||
739         (mAudioContextState == AudioContextState::Running &&
740          aNewState == AudioContextState::Closed) ||
741         (mAudioContextState == AudioContextState::Suspended &&
742          aNewState == AudioContextState::Closed) ||
743         (mAudioContextState == aNewState))) {
744     fprintf(stderr,
745             "Invalid transition: mAudioContextState: %d -> aNewState %d\n",
746             static_cast<int>(mAudioContextState), static_cast<int>(aNewState));
747     MOZ_ASSERT(false);
748   }
749 
750 #endif  // DEBUG
751 #endif  // XP_MACOSX
752 #endif  // WIN32
753 
754   MOZ_ASSERT(
755       mIsOffline || aPromise || aNewState == AudioContextState::Running,
756       "We should have a promise here if this is a real-time AudioContext."
757       "Or this is the first time we switch to \"running\".");
758 
759   if (aPromise) {
760     Promise* promise = reinterpret_cast<Promise*>(aPromise);
761     // It is possible for the promise to have been removed from
762     // mPromiseGripArray if the cycle collector has severed our connections. DO
763     // NOT dereference the promise pointer in that case since it may point to
764     // already freed memory.
765     if (mPromiseGripArray.Contains(promise)) {
766       promise->MaybeResolveWithUndefined();
767       DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
768       MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
769     }
770   }
771 
772   if (mAudioContextState != aNewState) {
773     RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
774     Dispatch(task.forget());
775   }
776 
777   mAudioContextState = aNewState;
778 }
779 
GetAllStreams() const780 nsTArray<MediaStream*> AudioContext::GetAllStreams() const {
781   nsTArray<MediaStream*> streams;
782   for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) {
783     MediaStream* s = iter.Get()->GetKey()->GetStream();
784     if (s) {
785       streams.AppendElement(s);
786     }
787   }
788   return streams;
789 }
790 
Suspend(ErrorResult & aRv)791 already_AddRefed<Promise> AudioContext::Suspend(ErrorResult& aRv) {
792   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
793   RefPtr<Promise> promise;
794   promise = Promise::Create(parentObject, aRv);
795   if (aRv.Failed()) {
796     return nullptr;
797   }
798   if (mIsOffline) {
799     promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
800     return promise.forget();
801   }
802 
803   if (mAudioContextState == AudioContextState::Closed || mCloseCalled) {
804     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
805     return promise.forget();
806   }
807 
808   Destination()->Suspend();
809 
810   mPromiseGripArray.AppendElement(promise);
811 
812   nsTArray<MediaStream*> streams;
813   // If mSuspendCalled is true then we already suspended all our streams,
814   // so don't suspend them again (since suspend(); suspend(); resume(); should
815   // cancel both suspends). But we still need to do ApplyAudioContextOperation
816   // to ensure our new promise is resolved.
817   if (!mSuspendCalled) {
818     streams = GetAllStreams();
819   }
820   Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
821                                       streams, AudioContextOperation::Suspend,
822                                       promise);
823 
824   mSuspendCalled = true;
825 
826   return promise.forget();
827 }
828 
Resume(ErrorResult & aRv)829 already_AddRefed<Promise> AudioContext::Resume(ErrorResult& aRv) {
830   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
831   RefPtr<Promise> promise;
832   promise = Promise::Create(parentObject, aRv);
833   if (aRv.Failed()) {
834     return nullptr;
835   }
836 
837   if (mIsOffline) {
838     promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
839     return promise.forget();
840   }
841 
842   if (mAudioContextState == AudioContextState::Closed || mCloseCalled) {
843     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
844     return promise.forget();
845   }
846 
847   Destination()->Resume();
848 
849   nsTArray<MediaStream*> streams;
850   // If mSuspendCalled is false then we already resumed all our streams,
851   // so don't resume them again (since suspend(); resume(); resume(); should
852   // be OK). But we still need to do ApplyAudioContextOperation
853   // to ensure our new promise is resolved.
854   if (mSuspendCalled) {
855     streams = GetAllStreams();
856   }
857   mPromiseGripArray.AppendElement(promise);
858   Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
859                                       streams, AudioContextOperation::Resume,
860                                       promise);
861 
862   mSuspendCalled = false;
863 
864   return promise.forget();
865 }
866 
Close(ErrorResult & aRv)867 already_AddRefed<Promise> AudioContext::Close(ErrorResult& aRv) {
868   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
869   RefPtr<Promise> promise;
870   promise = Promise::Create(parentObject, aRv);
871   if (aRv.Failed()) {
872     return nullptr;
873   }
874 
875   if (mIsOffline) {
876     promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
877     return promise.forget();
878   }
879 
880   if (mAudioContextState == AudioContextState::Closed) {
881     promise->MaybeResolve(NS_ERROR_DOM_INVALID_STATE_ERR);
882     return promise.forget();
883   }
884 
885   if (Destination()) {
886     Destination()->DestroyAudioChannelAgent();
887   }
888 
889   mPromiseGripArray.AppendElement(promise);
890 
891   // This can be called when freeing a document, and the streams are dead at
892   // this point, so we need extra null-checks.
893   MediaStream* ds = DestinationStream();
894   if (ds) {
895     nsTArray<MediaStream*> streams;
896     // If mSuspendCalled or mCloseCalled are true then we already suspended
897     // all our streams, so don't suspend them again. But we still need to do
898     // ApplyAudioContextOperation to ensure our new promise is resolved.
899     if (!mSuspendCalled && !mCloseCalled) {
900       streams = GetAllStreams();
901     }
902     Graph()->ApplyAudioContextOperation(ds->AsAudioNodeStream(), streams,
903                                         AudioContextOperation::Close, promise);
904   }
905   mCloseCalled = true;
906 
907   return promise.forget();
908 }
909 
RegisterNode(AudioNode * aNode)910 void AudioContext::RegisterNode(AudioNode* aNode) {
911   MOZ_ASSERT(!mAllNodes.Contains(aNode));
912   mAllNodes.PutEntry(aNode);
913 }
914 
UnregisterNode(AudioNode * aNode)915 void AudioContext::UnregisterNode(AudioNode* aNode) {
916   MOZ_ASSERT(mAllNodes.Contains(aNode));
917   mAllNodes.RemoveEntry(aNode);
918 }
919 
GetGlobalJSObject() const920 JSObject* AudioContext::GetGlobalJSObject() const {
921   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
922   if (!parentObject) {
923     return nullptr;
924   }
925 
926   // This can also return null.
927   return parentObject->GetGlobalJSObject();
928 }
929 
StartRendering(ErrorResult & aRv)930 already_AddRefed<Promise> AudioContext::StartRendering(ErrorResult& aRv) {
931   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
932 
933   MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext");
934   if (mIsStarted) {
935     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
936     return nullptr;
937   }
938 
939   mIsStarted = true;
940   RefPtr<Promise> promise = Promise::Create(parentObject, aRv);
941   if (aRv.Failed()) {
942     return nullptr;
943   }
944   mDestination->StartRendering(promise);
945 
946   OnStateChanged(nullptr, AudioContextState::Running);
947 
948   return promise.forget();
949 }
950 
Length()951 unsigned long AudioContext::Length() {
952   MOZ_ASSERT(mIsOffline);
953   return mDestination->Length();
954 }
955 
Mute() const956 void AudioContext::Mute() const {
957   MOZ_ASSERT(!mIsOffline);
958   if (mDestination) {
959     mDestination->Mute();
960   }
961 }
962 
Unmute() const963 void AudioContext::Unmute() const {
964   MOZ_ASSERT(!mIsOffline);
965   if (mDestination) {
966     mDestination->Unmute();
967   }
968 }
969 
SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const970 size_t AudioContext::SizeOfIncludingThis(
971     mozilla::MallocSizeOf aMallocSizeOf) const {
972   // AudioNodes are tracked separately because we do not want the AudioContext
973   // to track all of the AudioNodes it creates, so we wouldn't be able to
974   // traverse them from here.
975 
976   size_t amount = aMallocSizeOf(this);
977   if (mListener) {
978     amount += mListener->SizeOfIncludingThis(aMallocSizeOf);
979   }
980   amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf);
981   for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
982     amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf);
983   }
984   amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf);
985   amount += mPannerNodes.ShallowSizeOfExcludingThis(aMallocSizeOf);
986   return amount;
987 }
988 
989 NS_IMETHODIMP
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)990 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
991                              nsISupports* aData, bool aAnonymize) {
992   const nsLiteralCString nodeDescription(
993       "Memory used by AudioNode DOM objects (Web Audio).");
994   for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) {
995     AudioNode* node = iter.Get()->GetKey();
996     int64_t amount = node->SizeOfIncludingThis(MallocSizeOf);
997     nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes",
998                                 node->NodeType());
999     aHandleReport->Callback(EmptyCString(), domNodePath, KIND_HEAP, UNITS_BYTES,
1000                             amount, nodeDescription, aData);
1001   }
1002 
1003   int64_t amount = SizeOfIncludingThis(MallocSizeOf);
1004   MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
1005                      amount,
1006                      "Memory used by AudioContext objects (Web Audio).");
1007 
1008   return NS_OK;
1009 }
1010 
GetBasicWaveFormCache()1011 BasicWaveFormCache* AudioContext::GetBasicWaveFormCache() {
1012   MOZ_ASSERT(NS_IsMainThread());
1013   if (!mBasicWaveFormCache) {
1014     mBasicWaveFormCache = new BasicWaveFormCache(SampleRate());
1015   }
1016   return mBasicWaveFormCache;
1017 }
1018 
BasicWaveFormCache(uint32_t aSampleRate)1019 BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate)
1020     : mSampleRate(aSampleRate) {
1021   MOZ_ASSERT(NS_IsMainThread());
1022 }
~BasicWaveFormCache()1023 BasicWaveFormCache::~BasicWaveFormCache() {}
1024 
GetBasicWaveForm(OscillatorType aType)1025 WebCore::PeriodicWave* BasicWaveFormCache::GetBasicWaveForm(
1026     OscillatorType aType) {
1027   MOZ_ASSERT(!NS_IsMainThread());
1028   if (aType == OscillatorType::Sawtooth) {
1029     if (!mSawtooth) {
1030       mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate);
1031     }
1032     return mSawtooth;
1033   } else if (aType == OscillatorType::Square) {
1034     if (!mSquare) {
1035       mSquare = WebCore::PeriodicWave::createSquare(mSampleRate);
1036     }
1037     return mSquare;
1038   } else if (aType == OscillatorType::Triangle) {
1039     if (!mTriangle) {
1040       mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate);
1041     }
1042     return mTriangle;
1043   } else {
1044     MOZ_ASSERT(false, "Not reached");
1045     return nullptr;
1046   }
1047 }
1048 
1049 }  // namespace dom
1050 }  // namespace mozilla
1051