1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "third_party/blink/renderer/modules/webaudio/audio_context.h"
6
7 #include "base/metrics/histogram_functions.h"
8 #include "build/build_config.h"
9 #include "services/metrics/public/cpp/ukm_builders.h"
10 #include "services/metrics/public/cpp/ukm_recorder.h"
11 #include "third_party/blink/public/common/browser_interface_broker_proxy.h"
12 #include "third_party/blink/public/platform/web_audio_latency_hint.h"
13 #include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
14 #include "third_party/blink/renderer/bindings/modules/v8/v8_audio_context_options.h"
15 #include "third_party/blink/renderer/bindings/modules/v8/v8_audio_timestamp.h"
16 #include "third_party/blink/renderer/core/dom/dom_exception.h"
17 #include "third_party/blink/renderer/core/frame/local_dom_window.h"
18 #include "third_party/blink/renderer/core/frame/local_frame.h"
19 #include "third_party/blink/renderer/core/html/media/html_media_element.h"
20 #include "third_party/blink/renderer/core/inspector/console_message.h"
21 #include "third_party/blink/renderer/core/probe/core_probes.h"
22 #include "third_party/blink/renderer/core/timing/dom_window_performance.h"
23 #include "third_party/blink/renderer/core/timing/window_performance.h"
24 #include "third_party/blink/renderer/modules/mediastream/media_stream.h"
25 #include "third_party/blink/renderer/modules/webaudio/audio_listener.h"
26 #include "third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h"
27 #include "third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h"
28 #include "third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h"
29 #include "third_party/blink/renderer/modules/webaudio/realtime_audio_destination_node.h"
30 #include "third_party/blink/renderer/platform/audio/audio_utilities.h"
31 #include "third_party/blink/renderer/platform/audio/vector_math.h"
32 #include "third_party/blink/renderer/platform/bindings/exception_messages.h"
33 #include "third_party/blink/renderer/platform/bindings/exception_state.h"
34 #include "third_party/blink/renderer/platform/heap/heap.h"
35 #include "third_party/blink/renderer/platform/instrumentation/use_counter.h"
36 #include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
37 #include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
38
39 #if DEBUG_AUDIONODE_REFERENCES
40 #include <stdio.h>
41 #endif
42
43 namespace blink {
44
45 // Number of AudioContexts still alive. It's incremented when an
46 // AudioContext is created and decremented when the context is closed.
47 static unsigned g_hardware_context_count = 0;
48
49 // A context ID that is incremented for each context that is created.
50 // This initializes the internal id for the context.
51 static unsigned g_context_id = 0;
52
Create(Document & document,const AudioContextOptions * context_options,ExceptionState & exception_state)53 AudioContext* AudioContext::Create(Document& document,
54 const AudioContextOptions* context_options,
55 ExceptionState& exception_state) {
56 DCHECK(IsMainThread());
57
58 if (document.IsDetached()) {
59 exception_state.ThrowDOMException(
60 DOMExceptionCode::kNotSupportedError,
61 "Cannot create AudioContext on a detached document.");
62 return nullptr;
63 }
64
65 document.domWindow()->CountUseOnlyInCrossOriginIframe(
66 WebFeature::kAudioContextCrossOriginIframe);
67
68 WebAudioLatencyHint latency_hint(WebAudioLatencyHint::kCategoryInteractive);
69 if (context_options->latencyHint().IsAudioContextLatencyCategory()) {
70 latency_hint = WebAudioLatencyHint(
71 context_options->latencyHint().GetAsAudioContextLatencyCategory());
72 } else if (context_options->latencyHint().IsDouble()) {
73 // This should be the requested output latency in seconds, without taking
74 // into account double buffering (same as baseLatency).
75 latency_hint =
76 WebAudioLatencyHint(context_options->latencyHint().GetAsDouble());
77 }
78
79 base::Optional<float> sample_rate;
80 if (context_options->hasSampleRate()) {
81 sample_rate = context_options->sampleRate();
82 }
83
84 // Validate options before trying to construct the actual context.
85 if (sample_rate.has_value() &&
86 !audio_utilities::IsValidAudioBufferSampleRate(sample_rate.value())) {
87 exception_state.ThrowDOMException(
88 DOMExceptionCode::kNotSupportedError,
89 ExceptionMessages::IndexOutsideRange(
90 "hardware sample rate", sample_rate.value(),
91 audio_utilities::MinAudioBufferSampleRate(),
92 ExceptionMessages::kInclusiveBound,
93 audio_utilities::MaxAudioBufferSampleRate(),
94 ExceptionMessages::kInclusiveBound));
95 return nullptr;
96 }
97
98 AudioContext* audio_context =
99 MakeGarbageCollected<AudioContext>(document, latency_hint, sample_rate);
100 ++g_hardware_context_count;
101 audio_context->UpdateStateIfNeeded();
102
103 // This starts the audio thread. The destination node's
104 // provideInput() method will now be called repeatedly to render
105 // audio. Each time provideInput() is called, a portion of the
106 // audio stream is rendered. Let's call this time period a "render
107 // quantum". NOTE: for now AudioContext does not need an explicit
108 // startRendering() call from JavaScript. We may want to consider
109 // requiring it for symmetry with OfflineAudioContext.
110 audio_context->MaybeAllowAutoplayWithUnlockType(
111 AutoplayUnlockType::kContextConstructor);
112 if (audio_context->IsAllowedToStart()) {
113 audio_context->StartRendering();
114 audio_context->SetContextState(kRunning);
115 }
116 #if DEBUG_AUDIONODE_REFERENCES
117 fprintf(stderr, "[%16p]: AudioContext::AudioContext(): %u #%u\n",
118 audio_context, audio_context->context_id_, g_hardware_context_count);
119 #endif
120
121 base::UmaHistogramSparse("WebAudio.AudioContext.MaxChannelsAvailable",
122 audio_context->destination()->maxChannelCount());
123
124 probe::DidCreateAudioContext(&document);
125
126 return audio_context;
127 }
128
AudioContext(Document & document,const WebAudioLatencyHint & latency_hint,base::Optional<float> sample_rate)129 AudioContext::AudioContext(Document& document,
130 const WebAudioLatencyHint& latency_hint,
131 base::Optional<float> sample_rate)
132 : BaseAudioContext(&document, kRealtimeContext),
133 context_id_(g_context_id++),
134 audio_context_manager_(document.GetExecutionContext()),
135 keep_alive_(PERSISTENT_FROM_HERE, this) {
136 destination_node_ =
137 RealtimeAudioDestinationNode::Create(this, latency_hint, sample_rate);
138
139 switch (GetAutoplayPolicy()) {
140 case AutoplayPolicy::Type::kNoUserGestureRequired:
141 break;
142 case AutoplayPolicy::Type::kUserGestureRequired:
143 // kUserGestureRequire policy only applies to cross-origin iframes for Web
144 // Audio.
145 if (document.GetFrame() &&
146 document.GetFrame()->IsCrossOriginToMainFrame()) {
147 autoplay_status_ = AutoplayStatus::kFailed;
148 user_gesture_required_ = true;
149 }
150 break;
151 case AutoplayPolicy::Type::kDocumentUserActivationRequired:
152 autoplay_status_ = AutoplayStatus::kFailed;
153 user_gesture_required_ = true;
154 break;
155 }
156
157 Initialize();
158
159 // Compute the base latency now and cache the value since it doesn't change
160 // once the context is constructed. We need the destination to be initialized
161 // so we have to compute it here.
162 //
163 // TODO(hongchan): Due to the incompatible constructor between
164 // AudioDestinationNode and RealtimeAudioDestinationNode, casting directly
165 // from |destination()| is impossible. This is a temporary workaround until
166 // the refactoring is completed.
167 RealtimeAudioDestinationHandler& destination_handler =
168 static_cast<RealtimeAudioDestinationHandler&>(
169 destination()->GetAudioDestinationHandler());
170 base_latency_ = destination_handler.GetFramesPerBuffer() /
171 static_cast<double>(sampleRate());
172 }
173
Uninitialize()174 void AudioContext::Uninitialize() {
175 DCHECK(IsMainThread());
176 DCHECK_NE(g_hardware_context_count, 0u);
177 --g_hardware_context_count;
178 StopRendering();
179 DidClose();
180 RecordAutoplayMetrics();
181 BaseAudioContext::Uninitialize();
182 }
183
~AudioContext()184 AudioContext::~AudioContext() {
185 // TODO(crbug.com/945379) Disable this DCHECK for now. It's not terrible if
186 // the autoplay metrics aren't recorded in some odd situations. haraken@ said
187 // that we shouldn't get here without also calling |Uninitialize()|, but it
188 // can happen. Until that is fixed, disable this DCHECK.
189
190 // DCHECK(!autoplay_status_.has_value());
191 #if DEBUG_AUDIONODE_REFERENCES
192 fprintf(stderr, "[%16p]: AudioContext::~AudioContext(): %u\n", this,
193 context_id_);
194 #endif
195 }
196
Trace(Visitor * visitor) const197 void AudioContext::Trace(Visitor* visitor) const {
198 visitor->Trace(close_resolver_);
199 visitor->Trace(audio_context_manager_);
200 BaseAudioContext::Trace(visitor);
201 }
202
suspendContext(ScriptState * script_state)203 ScriptPromise AudioContext::suspendContext(ScriptState* script_state) {
204 DCHECK(IsMainThread());
205
206 auto* resolver = MakeGarbageCollected<ScriptPromiseResolver>(script_state);
207 ScriptPromise promise = resolver->Promise();
208
209 if (ContextState() == kClosed) {
210 resolver->Reject(MakeGarbageCollected<DOMException>(
211 DOMExceptionCode::kInvalidStateError,
212 "Cannot suspend a context that has been closed"));
213 } else {
214 suspended_by_user_ = true;
215
216 // Stop rendering now.
217 if (destination())
218 SuspendRendering();
219
220 // Since we don't have any way of knowing when the hardware actually stops,
221 // we'll just resolve the promise now.
222 resolver->Resolve();
223
224 // Probe reports the suspension only when the promise is resolved.
225 probe::DidSuspendAudioContext(GetDocument());
226 }
227
228 return promise;
229 }
230
resumeContext(ScriptState * script_state,ExceptionState & exception_state)231 ScriptPromise AudioContext::resumeContext(ScriptState* script_state,
232 ExceptionState& exception_state) {
233 DCHECK(IsMainThread());
234
235 if (IsContextClosed()) {
236 exception_state.ThrowDOMException(DOMExceptionCode::kInvalidAccessError,
237 "cannot resume a closed AudioContext");
238 return ScriptPromise();
239 }
240
241 auto* resolver = MakeGarbageCollected<ScriptPromiseResolver>(script_state);
242 ScriptPromise promise = resolver->Promise();
243
244 // If we're already running, just resolve; nothing else needs to be done.
245 if (ContextState() == kRunning) {
246 resolver->Resolve();
247 return promise;
248 }
249
250 suspended_by_user_ = false;
251
252 // Restart the destination node to pull on the audio graph.
253 if (destination()) {
254 MaybeAllowAutoplayWithUnlockType(AutoplayUnlockType::kContextResume);
255 if (IsAllowedToStart()) {
256 // Do not set the state to running here. We wait for the
257 // destination to start to set the state.
258 StartRendering();
259
260 // Probe reports only when the user gesture allows the audio rendering.
261 probe::DidResumeAudioContext(GetDocument());
262 }
263 }
264
265 // Save the resolver which will get resolved when the destination node starts
266 // pulling on the graph again.
267 {
268 GraphAutoLocker locker(this);
269 resume_resolvers_.push_back(resolver);
270 }
271
272 return promise;
273 }
274
IsPullingAudioGraph() const275 bool AudioContext::IsPullingAudioGraph() const {
276 DCHECK(IsMainThread());
277
278 if (!destination())
279 return false;
280
281 RealtimeAudioDestinationHandler& destination_handler =
282 static_cast<RealtimeAudioDestinationHandler&>(
283 destination()->GetAudioDestinationHandler());
284
285 // The realtime context is pulling on the audio graph if the realtime
286 // destination allows it.
287 return destination_handler.IsPullingAudioGraphAllowed();
288 }
289
getOutputTimestamp(ScriptState * script_state) const290 AudioTimestamp* AudioContext::getOutputTimestamp(
291 ScriptState* script_state) const {
292 AudioTimestamp* result = AudioTimestamp::Create();
293
294 DCHECK(IsMainThread());
295 LocalDOMWindow* window = LocalDOMWindow::From(script_state);
296 if (!window)
297 return result;
298
299 if (!destination()) {
300 result->setContextTime(0.0);
301 result->setPerformanceTime(0.0);
302 return result;
303 }
304
305 WindowPerformance* performance = DOMWindowPerformance::performance(*window);
306 DCHECK(performance);
307
308 AudioIOPosition position = OutputPosition();
309
310 // The timestamp of what is currently being played (contextTime) cannot be
311 // later than what is being rendered. (currentTime)
312 if (position.position > currentTime()) {
313 position.position = currentTime();
314 }
315
316 double performance_time = performance->MonotonicTimeToDOMHighResTimeStamp(
317 base::TimeTicks() + base::TimeDelta::FromSecondsD(position.timestamp));
318 if (performance_time < 0.0)
319 performance_time = 0.0;
320
321 result->setContextTime(position.position);
322 result->setPerformanceTime(performance_time);
323 return result;
324 }
325
closeContext(ScriptState * script_state,ExceptionState & exception_state)326 ScriptPromise AudioContext::closeContext(ScriptState* script_state,
327 ExceptionState& exception_state) {
328 if (IsContextClosed()) {
329 // We've already closed the context previously, but it hasn't yet been
330 // resolved, so just throw a DOM exception to trigger a promise rejection
331 // and return an empty promise.
332 exception_state.ThrowDOMException(
333 DOMExceptionCode::kInvalidStateError,
334 "Cannot close a context that is being closed or has already been "
335 "closed.");
336 return ScriptPromise();
337 }
338
339 close_resolver_ = MakeGarbageCollected<ScriptPromiseResolver>(script_state);
340 ScriptPromise promise = close_resolver_->Promise();
341
342 // Stops the rendering, but it doesn't release the resources here.
343 StopRendering();
344
345 // The promise from closing context resolves immediately after this function.
346 DidClose();
347
348 probe::DidCloseAudioContext(GetDocument());
349
350 return promise;
351 }
352
DidClose()353 void AudioContext::DidClose() {
354 SetContextState(kClosed);
355
356 if (close_resolver_)
357 close_resolver_->Resolve();
358 }
359
IsContextClosed() const360 bool AudioContext::IsContextClosed() const {
361 return close_resolver_ || BaseAudioContext::IsContextClosed();
362 }
363
StartRendering()364 void AudioContext::StartRendering() {
365 DCHECK(IsMainThread());
366
367 if (!keep_alive_)
368 keep_alive_ = this;
369 BaseAudioContext::StartRendering();
370 }
371
StopRendering()372 void AudioContext::StopRendering() {
373 DCHECK(IsMainThread());
374 DCHECK(destination());
375
376 // It is okay to perform the following on a suspended AudioContext because
377 // this method gets called from ExecutionContext::ContextDestroyed() meaning
378 // the AudioContext is already unreachable from the user code.
379 if (ContextState() != kClosed) {
380 destination()->GetAudioDestinationHandler().StopRendering();
381 SetContextState(kClosed);
382 GetDeferredTaskHandler().ClearHandlersToBeDeleted();
383 keep_alive_.Clear();
384 }
385 }
386
SuspendRendering()387 void AudioContext::SuspendRendering() {
388 DCHECK(IsMainThread());
389 DCHECK(destination());
390
391 if (ContextState() == kRunning) {
392 destination()->GetAudioDestinationHandler().StopRendering();
393 SetContextState(kSuspended);
394 }
395 }
396
baseLatency() const397 double AudioContext::baseLatency() const {
398 DCHECK(IsMainThread());
399 DCHECK(destination());
400
401 return base_latency_;
402 }
403
createMediaElementSource(HTMLMediaElement * media_element,ExceptionState & exception_state)404 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(
405 HTMLMediaElement* media_element,
406 ExceptionState& exception_state) {
407 DCHECK(IsMainThread());
408
409 return MediaElementAudioSourceNode::Create(*this, *media_element,
410 exception_state);
411 }
412
createMediaStreamSource(MediaStream * media_stream,ExceptionState & exception_state)413 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(
414 MediaStream* media_stream,
415 ExceptionState& exception_state) {
416 DCHECK(IsMainThread());
417
418 return MediaStreamAudioSourceNode::Create(*this, *media_stream,
419 exception_state);
420 }
421
createMediaStreamDestination(ExceptionState & exception_state)422 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination(
423 ExceptionState& exception_state) {
424 DCHECK(IsMainThread());
425
426 // Set number of output channels to stereo by default.
427 return MediaStreamAudioDestinationNode::Create(*this, 2, exception_state);
428 }
429
NotifySourceNodeStart()430 void AudioContext::NotifySourceNodeStart() {
431 DCHECK(IsMainThread());
432
433 source_node_started_ = true;
434 if (!user_gesture_required_)
435 return;
436
437 MaybeAllowAutoplayWithUnlockType(AutoplayUnlockType::kSourceNodeStart);
438
439 if (ContextState() == AudioContextState::kSuspended && !suspended_by_user_ &&
440 IsAllowedToStart()) {
441 StartRendering();
442 SetContextState(kRunning);
443 }
444 }
445
GetAutoplayPolicy() const446 AutoplayPolicy::Type AudioContext::GetAutoplayPolicy() const {
447 Document* document = GetDocument();
448 DCHECK(document);
449
450 auto autoplay_policy =
451 AutoplayPolicy::GetAutoplayPolicyForDocument(*document);
452
453 if (autoplay_policy ==
454 AutoplayPolicy::Type::kDocumentUserActivationRequired &&
455 RuntimeEnabledFeatures::AutoplayIgnoresWebAudioEnabled()) {
456 // When ignored, the policy is different on Android compared to Desktop.
457 #if defined(OS_ANDROID)
458 return AutoplayPolicy::Type::kUserGestureRequired;
459 #else
460 // Force no user gesture required on desktop.
461 return AutoplayPolicy::Type::kNoUserGestureRequired;
462 #endif
463 }
464
465 return autoplay_policy;
466 }
467
AreAutoplayRequirementsFulfilled() const468 bool AudioContext::AreAutoplayRequirementsFulfilled() const {
469 DCHECK(GetDocument());
470
471 switch (GetAutoplayPolicy()) {
472 case AutoplayPolicy::Type::kNoUserGestureRequired:
473 return true;
474 case AutoplayPolicy::Type::kUserGestureRequired:
475 return LocalFrame::HasTransientUserActivation(GetDocument()->GetFrame());
476 case AutoplayPolicy::Type::kDocumentUserActivationRequired:
477 return AutoplayPolicy::IsDocumentAllowedToPlay(*GetDocument());
478 }
479
480 NOTREACHED();
481 return false;
482 }
483
MaybeAllowAutoplayWithUnlockType(AutoplayUnlockType type)484 void AudioContext::MaybeAllowAutoplayWithUnlockType(AutoplayUnlockType type) {
485 if (!user_gesture_required_ || !AreAutoplayRequirementsFulfilled())
486 return;
487
488 DCHECK(!autoplay_status_.has_value() ||
489 autoplay_status_ != AutoplayStatus::kSucceeded);
490
491 user_gesture_required_ = false;
492 autoplay_status_ = AutoplayStatus::kSucceeded;
493
494 DCHECK(!autoplay_unlock_type_.has_value());
495 autoplay_unlock_type_ = type;
496 }
497
IsAllowedToStart() const498 bool AudioContext::IsAllowedToStart() const {
499 if (!user_gesture_required_)
500 return true;
501
502 LocalDOMWindow* window = To<LocalDOMWindow>(GetExecutionContext());
503 DCHECK(window);
504
505 switch (GetAutoplayPolicy()) {
506 case AutoplayPolicy::Type::kNoUserGestureRequired:
507 NOTREACHED();
508 break;
509 case AutoplayPolicy::Type::kUserGestureRequired:
510 DCHECK(window->GetFrame());
511 DCHECK(window->GetFrame()->IsCrossOriginToMainFrame());
512 window->AddConsoleMessage(MakeGarbageCollected<ConsoleMessage>(
513 mojom::ConsoleMessageSource::kOther,
514 mojom::ConsoleMessageLevel::kWarning,
515 "The AudioContext was not allowed to start. It must be resumed (or "
516 "created) from a user gesture event handler. https://goo.gl/7K7WLu"));
517 break;
518 case AutoplayPolicy::Type::kDocumentUserActivationRequired:
519 window->AddConsoleMessage(MakeGarbageCollected<ConsoleMessage>(
520 mojom::ConsoleMessageSource::kOther,
521 mojom::ConsoleMessageLevel::kWarning,
522 "The AudioContext was not allowed to start. It must be resumed (or "
523 "created) after a user gesture on the page. https://goo.gl/7K7WLu"));
524 break;
525 }
526
527 return false;
528 }
529
RecordAutoplayMetrics()530 void AudioContext::RecordAutoplayMetrics() {
531 if (!autoplay_status_.has_value() || !GetDocument())
532 return;
533
534 ukm::UkmRecorder* ukm_recorder = GetDocument()->UkmRecorder();
535 DCHECK(ukm_recorder);
536 ukm::builders::Media_Autoplay_AudioContext(GetDocument()->UkmSourceID())
537 .SetStatus(static_cast<int>(autoplay_status_.value()))
538 .SetUnlockType(autoplay_unlock_type_
539 ? static_cast<int>(autoplay_unlock_type_.value())
540 : -1)
541 .SetSourceNodeStarted(source_node_started_)
542 .Record(ukm_recorder);
543
544 // Record autoplay_status_ value.
545 base::UmaHistogramEnumeration("WebAudio.Autoplay", autoplay_status_.value());
546
547 if (GetDocument()->GetFrame() &&
548 GetDocument()->GetFrame()->IsCrossOriginToMainFrame()) {
549 base::UmaHistogramEnumeration("WebAudio.Autoplay.CrossOrigin",
550 autoplay_status_.value());
551 }
552
553 autoplay_status_.reset();
554
555 // Record autoplay_unlock_type_ value.
556 if (autoplay_unlock_type_.has_value()) {
557 base::UmaHistogramEnumeration("WebAudio.Autoplay.UnlockType",
558 autoplay_unlock_type_.value());
559
560 autoplay_unlock_type_.reset();
561 }
562 }
563
ContextDestroyed()564 void AudioContext::ContextDestroyed() {
565 Uninitialize();
566 }
567
HasPendingActivity() const568 bool AudioContext::HasPendingActivity() const {
569 // There's activity if the context is is not closed. Suspended contexts count
570 // as having activity even though they are basically idle with nothing going
571 // on. However, they can be resumed at any time, so we don't want contexts
572 // going away prematurely.
573 return (ContextState() != kClosed) && BaseAudioContext::HasPendingActivity();
574 }
575
HandlePreRenderTasks(const AudioIOPosition * output_position,const AudioCallbackMetric * metric)576 bool AudioContext::HandlePreRenderTasks(const AudioIOPosition* output_position,
577 const AudioCallbackMetric* metric) {
578 DCHECK(IsAudioThread());
579
580 // At the beginning of every render quantum, try to update the internal
581 // rendering graph state (from main thread changes). It's OK if the tryLock()
582 // fails, we'll just take slightly longer to pick up the changes.
583 if (TryLock()) {
584 GetDeferredTaskHandler().HandleDeferredTasks();
585
586 ResolvePromisesForUnpause();
587
588 // Check to see if source nodes can be stopped because the end time has
589 // passed.
590 HandleStoppableSourceNodes();
591
592 // Update the dirty state of the listener.
593 listener()->UpdateState();
594
595 // Update output timestamp and metric.
596 output_position_ = *output_position;
597 callback_metric_ = *metric;
598
599 unlock();
600 }
601
602 // Realtime context ignores the return result, but return true, just in case.
603 return true;
604 }
605
NotifyAudibleAudioStarted()606 void AudioContext::NotifyAudibleAudioStarted() {
607 DCHECK(IsMainThread());
608
609 EnsureAudioContextManagerService();
610 if (audio_context_manager_.is_bound())
611 audio_context_manager_->AudioContextAudiblePlaybackStarted(context_id_);
612 }
613
HandlePostRenderTasks()614 void AudioContext::HandlePostRenderTasks() {
615 DCHECK(IsAudioThread());
616
617 // Must use a tryLock() here too. Don't worry, the lock will very rarely be
618 // contended and this method is called frequently. The worst that can happen
619 // is that there will be some nodes which will take slightly longer than usual
620 // to be deleted or removed from the render graph (in which case they'll
621 // render silence).
622 if (TryLock()) {
623 // Take care of AudioNode tasks where the tryLock() failed previously.
624 GetDeferredTaskHandler().BreakConnections();
625
626 GetDeferredTaskHandler().HandleDeferredTasks();
627 GetDeferredTaskHandler().RequestToDeleteHandlersOnMainThread();
628
629 unlock();
630 }
631 }
632
IsAudible(const AudioBus * rendered_data)633 static bool IsAudible(const AudioBus* rendered_data) {
634 // Compute the energy in each channel and sum up the energy in each channel
635 // for the total energy.
636 float energy = 0;
637
638 uint32_t data_size = rendered_data->length();
639 for (uint32_t k = 0; k < rendered_data->NumberOfChannels(); ++k) {
640 const float* data = rendered_data->Channel(k)->Data();
641 float channel_energy;
642 vector_math::Vsvesq(data, 1, &channel_energy, data_size);
643 energy += channel_energy;
644 }
645
646 return energy > 0;
647 }
648
HandleAudibility(AudioBus * destination_bus)649 void AudioContext::HandleAudibility(AudioBus* destination_bus) {
650 DCHECK(IsAudioThread());
651
652 // Detect silence (or not) for MEI
653 bool is_audible = IsAudible(destination_bus);
654
655 if (is_audible) {
656 ++total_audible_renders_;
657 }
658
659 if (was_audible_ != is_audible) {
660 // Audibility changed in this render, so report the change.
661 was_audible_ = is_audible;
662 if (is_audible) {
663 PostCrossThreadTask(
664 *task_runner_, FROM_HERE,
665 CrossThreadBindOnce(&AudioContext::NotifyAudibleAudioStarted,
666 WrapCrossThreadPersistent(this)));
667 } else {
668 PostCrossThreadTask(
669 *task_runner_, FROM_HERE,
670 CrossThreadBindOnce(&AudioContext::NotifyAudibleAudioStopped,
671 WrapCrossThreadPersistent(this)));
672 }
673 }
674 }
675
ResolvePromisesForUnpause()676 void AudioContext::ResolvePromisesForUnpause() {
677 // This runs inside the BaseAudioContext's lock when handling pre-render
678 // tasks.
679 DCHECK(IsAudioThread());
680 AssertGraphOwner();
681
682 // Resolve any pending promises created by resume(). Only do this if we
683 // haven't already started resolving these promises. This gets called very
684 // often and it takes some time to resolve the promises in the main thread.
685 if (!is_resolving_resume_promises_ && resume_resolvers_.size() > 0) {
686 is_resolving_resume_promises_ = true;
687 ScheduleMainThreadCleanup();
688 }
689 }
690
OutputPosition() const691 AudioIOPosition AudioContext::OutputPosition() const {
692 DCHECK(IsMainThread());
693 GraphAutoLocker locker(this);
694 return output_position_;
695 }
696
NotifyAudibleAudioStopped()697 void AudioContext::NotifyAudibleAudioStopped() {
698 DCHECK(IsMainThread());
699
700 EnsureAudioContextManagerService();
701 if (audio_context_manager_.is_bound())
702 audio_context_manager_->AudioContextAudiblePlaybackStopped(context_id_);
703 }
704
EnsureAudioContextManagerService()705 void AudioContext::EnsureAudioContextManagerService() {
706 if (audio_context_manager_.is_bound() || !GetDocument())
707 return;
708
709 GetDocument()->GetFrame()->GetBrowserInterfaceBroker().GetInterface(
710 mojo::GenericPendingReceiver(
711 audio_context_manager_.BindNewPipeAndPassReceiver(
712 GetDocument()->GetTaskRunner(TaskType::kInternalMedia))));
713
714 audio_context_manager_.set_disconnect_handler(
715 WTF::Bind(&AudioContext::OnAudioContextManagerServiceConnectionError,
716 WrapWeakPersistent(this)));
717 }
718
OnAudioContextManagerServiceConnectionError()719 void AudioContext::OnAudioContextManagerServiceConnectionError() {
720 audio_context_manager_.reset();
721 }
722
GetCallbackMetric() const723 AudioCallbackMetric AudioContext::GetCallbackMetric() const {
724 // Return a copy under the graph lock because returning a reference would
725 // allow seeing the audio thread changing the struct values. This method
726 // gets called once per second and the size of the struct is small, so
727 // creating a copy is acceptable here.
728 GraphAutoLocker locker(this);
729 return callback_metric_;
730 }
731
732 } // namespace blink
733