1 extern crate js_sys;
2 extern crate wasm_bindgen;
3 extern crate web_sys;
4
5 use self::js_sys::eval;
6 use self::wasm_bindgen::prelude::*;
7 use self::wasm_bindgen::JsCast;
8 use self::web_sys::{AudioContext, AudioContextOptions};
9 use crate::{
10 BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError,
11 DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
12 PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize,
13 SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError,
14 };
15 use std::ops::DerefMut;
16 use std::sync::{Arc, Mutex, RwLock};
17 use traits::{DeviceTrait, HostTrait, StreamTrait};
18
19 /// Content is false if the iterator is empty.
20 pub struct Devices(bool);
21
22 #[derive(Clone, Debug, PartialEq, Eq)]
23 pub struct Device;
24
25 pub struct Host;
26
27 pub struct Stream {
28 ctx: Arc<AudioContext>,
29 on_ended_closures: Vec<Arc<RwLock<Option<Closure<dyn FnMut()>>>>>,
30 config: StreamConfig,
31 buffer_size_frames: usize,
32 }
33
34 pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
35 pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
36
37 const MIN_CHANNELS: u16 = 1;
38 const MAX_CHANNELS: u16 = 32;
39 const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000);
40 const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000);
41 const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100);
42 const MIN_BUFFER_SIZE: u32 = 1;
43 const MAX_BUFFER_SIZE: u32 = u32::MAX;
44 const DEFAULT_BUFFER_SIZE: usize = 2048;
45 const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
46
47 impl Host {
new() -> Result<Self, crate::HostUnavailable>48 pub fn new() -> Result<Self, crate::HostUnavailable> {
49 Ok(Host)
50 }
51 }
52
53 impl HostTrait for Host {
54 type Devices = Devices;
55 type Device = Device;
56
is_available() -> bool57 fn is_available() -> bool {
58 // Assume this host is always available on webaudio.
59 true
60 }
61
devices(&self) -> Result<Self::Devices, DevicesError>62 fn devices(&self) -> Result<Self::Devices, DevicesError> {
63 Devices::new()
64 }
65
default_input_device(&self) -> Option<Self::Device>66 fn default_input_device(&self) -> Option<Self::Device> {
67 default_input_device()
68 }
69
default_output_device(&self) -> Option<Self::Device>70 fn default_output_device(&self) -> Option<Self::Device> {
71 default_output_device()
72 }
73 }
74
75 impl Devices {
new() -> Result<Self, DevicesError>76 fn new() -> Result<Self, DevicesError> {
77 Ok(Self::default())
78 }
79 }
80
81 impl Device {
82 #[inline]
name(&self) -> Result<String, DeviceNameError>83 fn name(&self) -> Result<String, DeviceNameError> {
84 Ok("Default Device".to_owned())
85 }
86
87 #[inline]
supported_input_configs( &self, ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError>88 fn supported_input_configs(
89 &self,
90 ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
91 // TODO
92 Ok(Vec::new().into_iter())
93 }
94
95 #[inline]
supported_output_configs( &self, ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError>96 fn supported_output_configs(
97 &self,
98 ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
99 let buffer_size = SupportedBufferSize::Range {
100 min: MIN_BUFFER_SIZE,
101 max: MAX_BUFFER_SIZE,
102 };
103 let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS)
104 .map(|channels| SupportedStreamConfigRange {
105 channels,
106 min_sample_rate: MIN_SAMPLE_RATE,
107 max_sample_rate: MAX_SAMPLE_RATE,
108 buffer_size: buffer_size.clone(),
109 sample_format: SUPPORTED_SAMPLE_FORMAT,
110 })
111 .collect();
112 Ok(configs.into_iter())
113 }
114
115 #[inline]
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>116 fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
117 // TODO
118 Err(DefaultStreamConfigError::StreamTypeNotSupported)
119 }
120
121 #[inline]
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>122 fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
123 const EXPECT: &str = "expected at least one valid webaudio stream config";
124 let config = self
125 .supported_output_configs()
126 .expect(EXPECT)
127 .max_by(|a, b| a.cmp_default_heuristics(b))
128 .unwrap()
129 .with_sample_rate(DEFAULT_SAMPLE_RATE);
130
131 Ok(config)
132 }
133 }
134
135 impl DeviceTrait for Device {
136 type SupportedInputConfigs = SupportedInputConfigs;
137 type SupportedOutputConfigs = SupportedOutputConfigs;
138 type Stream = Stream;
139
140 #[inline]
name(&self) -> Result<String, DeviceNameError>141 fn name(&self) -> Result<String, DeviceNameError> {
142 Device::name(self)
143 }
144
145 #[inline]
supported_input_configs( &self, ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError>146 fn supported_input_configs(
147 &self,
148 ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
149 Device::supported_input_configs(self)
150 }
151
152 #[inline]
supported_output_configs( &self, ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError>153 fn supported_output_configs(
154 &self,
155 ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
156 Device::supported_output_configs(self)
157 }
158
159 #[inline]
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>160 fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
161 Device::default_input_config(self)
162 }
163
164 #[inline]
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>165 fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
166 Device::default_output_config(self)
167 }
168
build_input_stream_raw<D, E>( &self, _config: &StreamConfig, _sample_format: SampleFormat, _data_callback: D, _error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,169 fn build_input_stream_raw<D, E>(
170 &self,
171 _config: &StreamConfig,
172 _sample_format: SampleFormat,
173 _data_callback: D,
174 _error_callback: E,
175 ) -> Result<Self::Stream, BuildStreamError>
176 where
177 D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
178 E: FnMut(StreamError) + Send + 'static,
179 {
180 // TODO
181 Err(BuildStreamError::StreamConfigNotSupported)
182 }
183
184 /// Create an output stream.
build_output_stream_raw<D, E>( &self, config: &StreamConfig, sample_format: SampleFormat, data_callback: D, _error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,185 fn build_output_stream_raw<D, E>(
186 &self,
187 config: &StreamConfig,
188 sample_format: SampleFormat,
189 data_callback: D,
190 _error_callback: E,
191 ) -> Result<Self::Stream, BuildStreamError>
192 where
193 D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
194 E: FnMut(StreamError) + Send + 'static,
195 {
196 if !valid_config(config, sample_format) {
197 return Err(BuildStreamError::StreamConfigNotSupported);
198 }
199
200 let n_channels = config.channels as usize;
201
202 let buffer_size_frames = match config.buffer_size {
203 BufferSize::Fixed(v) => {
204 if v == 0 {
205 return Err(BuildStreamError::StreamConfigNotSupported);
206 } else {
207 v as usize
208 }
209 }
210 BufferSize::Default => DEFAULT_BUFFER_SIZE,
211 };
212 let buffer_size_samples = buffer_size_frames * n_channels;
213 let buffer_time_step_secs = buffer_time_step_secs(buffer_size_frames, config.sample_rate);
214
215 let data_callback = Arc::new(Mutex::new(Box::new(data_callback)));
216
217 // Create the WebAudio stream.
218 let mut stream_opts = AudioContextOptions::new();
219 stream_opts.sample_rate(config.sample_rate.0 as f32);
220 let ctx = Arc::new(
221 AudioContext::new_with_context_options(&stream_opts).map_err(
222 |err| -> BuildStreamError {
223 let description = format!("{:?}", err);
224 let err = BackendSpecificError { description };
225 err.into()
226 },
227 )?,
228 );
229
230 // A container for managing the lifecycle of the audio callbacks.
231 let mut on_ended_closures: Vec<Arc<RwLock<Option<Closure<dyn FnMut()>>>>> = Vec::new();
232
233 // A cursor keeping track of the current time at which new frames should be scheduled.
234 let time = Arc::new(RwLock::new(0f64));
235
236 // Create a set of closures / callbacks which will continuously fetch and schedule sample
237 // playback. Starting with two workers, e.g. a front and back buffer so that audio frames
238 // can be fetched in the background.
239 for _i in 0..2 {
240 let data_callback_handle = data_callback.clone();
241 let ctx_handle = ctx.clone();
242 let time_handle = time.clone();
243
244 // A set of temporary buffers to be used for intermediate sample transformation steps.
245 let mut temporary_buffer = vec![0f32; buffer_size_samples];
246 let mut temporary_channel_buffer = vec![0f32; buffer_size_frames];
247
248 // Create a webaudio buffer which will be reused to avoid allocations.
249 let ctx_buffer = ctx
250 .create_buffer(
251 config.channels as u32,
252 buffer_size_frames as u32,
253 config.sample_rate.0 as f32,
254 )
255 .map_err(|err| -> BuildStreamError {
256 let description = format!("{:?}", err);
257 let err = BackendSpecificError { description };
258 err.into()
259 })?;
260
261 // A self reference to this closure for passing to future audio event calls.
262 let on_ended_closure: Arc<RwLock<Option<Closure<dyn FnMut()>>>> =
263 Arc::new(RwLock::new(None));
264 let on_ended_closure_handle = on_ended_closure.clone();
265
266 on_ended_closure
267 .write()
268 .unwrap()
269 .replace(Closure::wrap(Box::new(move || {
270 let now = ctx_handle.current_time();
271 let time_at_start_of_buffer = {
272 let time_at_start_of_buffer = time_handle
273 .read()
274 .expect("Unable to get a read lock on the time cursor");
275 // Synchronise first buffer as necessary (eg. keep the time value
276 // referenced to the context clock).
277 if *time_at_start_of_buffer > 0.001 {
278 *time_at_start_of_buffer
279 } else {
280 // 25ms of time to fetch the first sample data, increase to avoid
281 // initial underruns.
282 now + 0.025
283 }
284 };
285
286 // Populate the sample data into an interleaved temporary buffer.
287 {
288 let len = temporary_buffer.len();
289 let data = temporary_buffer.as_mut_ptr() as *mut ();
290 let mut data = unsafe { Data::from_parts(data, len, sample_format) };
291 let mut data_callback = data_callback_handle.lock().unwrap();
292 let callback = crate::StreamInstant::from_secs_f64(now);
293 let playback = crate::StreamInstant::from_secs_f64(time_at_start_of_buffer);
294 let timestamp = crate::OutputStreamTimestamp { callback, playback };
295 let info = OutputCallbackInfo { timestamp };
296 (data_callback.deref_mut())(&mut data, &info);
297 }
298
299 // Deinterleave the sample data and copy into the audio context buffer.
300 // We do not reference the audio context buffer directly e.g. getChannelData.
301 // As wasm-bindgen only gives us a copy, not a direct reference.
302 for channel in 0..n_channels {
303 for i in 0..buffer_size_frames {
304 temporary_channel_buffer[i] =
305 temporary_buffer[n_channels * i + channel];
306 }
307 ctx_buffer
308 .copy_to_channel(&mut temporary_channel_buffer, channel as i32)
309 .expect("Unable to write sample data into the audio context buffer");
310 }
311
312 // Create an AudioBufferSourceNode, schedule it to playback the reused buffer
313 // in the future.
314 let source = ctx_handle
315 .create_buffer_source()
316 .expect("Unable to create a webaudio buffer source");
317 source.set_buffer(Some(&ctx_buffer));
318 source
319 .connect_with_audio_node(&ctx_handle.destination())
320 .expect(
321 "Unable to connect the web audio buffer source to the context destination",
322 );
323 source.set_onended(Some(
324 on_ended_closure_handle
325 .read()
326 .unwrap()
327 .as_ref()
328 .unwrap()
329 .as_ref()
330 .unchecked_ref(),
331 ));
332
333 source
334 .start_with_when(time_at_start_of_buffer)
335 .expect("Unable to start the webaudio buffer source");
336
337 // Keep track of when the next buffer worth of samples should be played.
338 *time_handle.write().unwrap() = time_at_start_of_buffer + buffer_time_step_secs;
339 }) as Box<dyn FnMut()>));
340
341 on_ended_closures.push(on_ended_closure);
342 }
343
344 Ok(Stream {
345 ctx,
346 on_ended_closures,
347 config: config.clone(),
348 buffer_size_frames,
349 })
350 }
351 }
352
353 impl StreamTrait for Stream {
play(&self) -> Result<(), PlayStreamError>354 fn play(&self) -> Result<(), PlayStreamError> {
355 let window = web_sys::window().unwrap();
356 match self.ctx.resume() {
357 Ok(_) => {
358 // Begin webaudio playback, initially scheduling the closures to fire on a timeout
359 // event.
360 let mut offset_ms = 10;
361 let time_step_secs =
362 buffer_time_step_secs(self.buffer_size_frames, self.config.sample_rate);
363 let time_step_ms = (time_step_secs * 1_000.0) as i32;
364 for on_ended_closure in self.on_ended_closures.iter() {
365 window
366 .set_timeout_with_callback_and_timeout_and_arguments_0(
367 on_ended_closure
368 .read()
369 .unwrap()
370 .as_ref()
371 .unwrap()
372 .as_ref()
373 .unchecked_ref(),
374 offset_ms,
375 )
376 .unwrap();
377 offset_ms += time_step_ms;
378 }
379 Ok(())
380 }
381 Err(err) => {
382 let description = format!("{:?}", err);
383 let err = BackendSpecificError { description };
384 Err(err.into())
385 }
386 }
387 }
388
pause(&self) -> Result<(), PauseStreamError>389 fn pause(&self) -> Result<(), PauseStreamError> {
390 match self.ctx.suspend() {
391 Ok(_) => Ok(()),
392 Err(err) => {
393 let description = format!("{:?}", err);
394 let err = BackendSpecificError { description };
395 Err(err.into())
396 }
397 }
398 }
399 }
400
401 impl Drop for Stream {
drop(&mut self)402 fn drop(&mut self) {
403 let _ = self.ctx.close();
404 }
405 }
406
407 impl Default for Devices {
default() -> Devices408 fn default() -> Devices {
409 // We produce an empty iterator if the WebAudio API isn't available.
410 Devices(is_webaudio_available())
411 }
412 }
413
414 impl Iterator for Devices {
415 type Item = Device;
416 #[inline]
next(&mut self) -> Option<Device>417 fn next(&mut self) -> Option<Device> {
418 if self.0 {
419 self.0 = false;
420 Some(Device)
421 } else {
422 None
423 }
424 }
425 }
426
427 #[inline]
default_input_device() -> Option<Device>428 fn default_input_device() -> Option<Device> {
429 // TODO
430 None
431 }
432
433 #[inline]
default_output_device() -> Option<Device>434 fn default_output_device() -> Option<Device> {
435 if is_webaudio_available() {
436 Some(Device)
437 } else {
438 None
439 }
440 }
441
442 // Detects whether the `AudioContext` global variable is available.
is_webaudio_available() -> bool443 fn is_webaudio_available() -> bool {
444 if let Ok(audio_context_is_defined) = eval("typeof AudioContext !== 'undefined'") {
445 audio_context_is_defined.as_bool().unwrap()
446 } else {
447 false
448 }
449 }
450
451 // Whether or not the given stream configuration is valid for building a stream.
valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool452 fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool {
453 conf.channels <= MAX_CHANNELS
454 && conf.channels >= MIN_CHANNELS
455 && conf.sample_rate <= MAX_SAMPLE_RATE
456 && conf.sample_rate >= MIN_SAMPLE_RATE
457 && sample_format == SUPPORTED_SAMPLE_FORMAT
458 }
459
buffer_time_step_secs(buffer_size_frames: usize, sample_rate: SampleRate) -> f64460 fn buffer_time_step_secs(buffer_size_frames: usize, sample_rate: SampleRate) -> f64 {
461 buffer_size_frames as f64 / sample_rate.0 as f64
462 }
463