1 use std::mem;
2 use std::os::raw::c_void;
3 use std::slice::from_raw_parts;
4 use stdweb;
5 use stdweb::unstable::TryInto;
6 use stdweb::web::set_timeout;
7 use stdweb::web::TypedArray;
8 use stdweb::Reference;
9 
10 use crate::{
11     BufferSize, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
12     InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat,
13     SampleRate, StreamConfig, StreamError, SupportedBufferSize, SupportedStreamConfig,
14     SupportedStreamConfigRange, SupportedStreamConfigsError,
15 };
16 use traits::{DeviceTrait, HostTrait, StreamTrait};
17 
18 // The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`.
19 // Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. Creation of a
20 // `Host` instance initializes the `stdweb` context.
21 
22 /// The default emscripten host type.
23 #[derive(Debug)]
24 pub struct Host;
25 
26 /// Content is false if the iterator is empty.
27 pub struct Devices(bool);
28 
29 #[derive(Clone, Debug, PartialEq, Eq)]
30 pub struct Device;
31 
32 pub struct Stream {
33     // A reference to an `AudioContext` object.
34     audio_ctxt_ref: Reference,
35 }
36 
37 // Index within the `streams` array of the events loop.
38 #[derive(Debug, Clone, PartialEq, Eq, Hash)]
39 pub struct StreamId(usize);
40 
41 pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
42 pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
43 
44 const MIN_CHANNELS: u16 = 1;
45 const MAX_CHANNELS: u16 = 32;
46 const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000);
47 const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000);
48 const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100);
49 const MIN_BUFFER_SIZE: u32 = 1;
50 const MAX_BUFFER_SIZE: u32 = u32::MAX;
51 const DEFAULT_BUFFER_SIZE: usize = 2048;
52 const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
53 
54 impl Host {
new() -> Result<Self, crate::HostUnavailable>55     pub fn new() -> Result<Self, crate::HostUnavailable> {
56         stdweb::initialize();
57         Ok(Host)
58     }
59 }
60 
61 impl Devices {
new() -> Result<Self, DevicesError>62     fn new() -> Result<Self, DevicesError> {
63         Ok(Self::default())
64     }
65 }
66 
67 impl Device {
68     #[inline]
name(&self) -> Result<String, DeviceNameError>69     fn name(&self) -> Result<String, DeviceNameError> {
70         Ok("Default Device".to_owned())
71     }
72 
73     #[inline]
supported_input_configs( &self, ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError>74     fn supported_input_configs(
75         &self,
76     ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
77         unimplemented!();
78     }
79 
80     #[inline]
supported_output_configs( &self, ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError>81     fn supported_output_configs(
82         &self,
83     ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
84         let buffer_size = SupportedBufferSize::Range {
85             min: MIN_BUFFER_SIZE,
86             max: MAX_BUFFER_SIZE,
87         };
88         let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS)
89             .map(|channels| SupportedStreamConfigRange {
90                 channels,
91                 min_sample_rate: MIN_SAMPLE_RATE,
92                 max_sample_rate: MAX_SAMPLE_RATE,
93                 buffer_size: buffer_size.clone(),
94                 sample_format: SUPPORTED_SAMPLE_FORMAT,
95             })
96             .collect();
97         Ok(configs.into_iter())
98     }
99 
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>100     fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
101         unimplemented!();
102     }
103 
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>104     fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
105         const EXPECT: &str = "expected at least one valid webaudio stream config";
106         let config = self
107             .supported_output_configs()
108             .expect(EXPECT)
109             .max_by(|a, b| a.cmp_default_heuristics(b))
110             .unwrap()
111             .with_sample_rate(DEFAULT_SAMPLE_RATE);
112 
113         Ok(config)
114     }
115 }
116 
117 impl HostTrait for Host {
118     type Devices = Devices;
119     type Device = Device;
120 
is_available() -> bool121     fn is_available() -> bool {
122         // Assume this host is always available on emscripten.
123         true
124     }
125 
devices(&self) -> Result<Self::Devices, DevicesError>126     fn devices(&self) -> Result<Self::Devices, DevicesError> {
127         Devices::new()
128     }
129 
default_input_device(&self) -> Option<Self::Device>130     fn default_input_device(&self) -> Option<Self::Device> {
131         default_input_device()
132     }
133 
default_output_device(&self) -> Option<Self::Device>134     fn default_output_device(&self) -> Option<Self::Device> {
135         default_output_device()
136     }
137 }
138 
139 impl DeviceTrait for Device {
140     type SupportedInputConfigs = SupportedInputConfigs;
141     type SupportedOutputConfigs = SupportedOutputConfigs;
142     type Stream = Stream;
143 
name(&self) -> Result<String, DeviceNameError>144     fn name(&self) -> Result<String, DeviceNameError> {
145         Device::name(self)
146     }
147 
supported_input_configs( &self, ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError>148     fn supported_input_configs(
149         &self,
150     ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
151         Device::supported_input_configs(self)
152     }
153 
supported_output_configs( &self, ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError>154     fn supported_output_configs(
155         &self,
156     ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
157         Device::supported_output_configs(self)
158     }
159 
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>160     fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
161         Device::default_input_config(self)
162     }
163 
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>164     fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
165         Device::default_output_config(self)
166     }
167 
build_input_stream_raw<D, E>( &self, _config: &StreamConfig, _sample_format: SampleFormat, _data_callback: D, _error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,168     fn build_input_stream_raw<D, E>(
169         &self,
170         _config: &StreamConfig,
171         _sample_format: SampleFormat,
172         _data_callback: D,
173         _error_callback: E,
174     ) -> Result<Self::Stream, BuildStreamError>
175     where
176         D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
177         E: FnMut(StreamError) + Send + 'static,
178     {
179         unimplemented!()
180     }
181 
build_output_stream_raw<D, E>( &self, config: &StreamConfig, sample_format: SampleFormat, data_callback: D, error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,182     fn build_output_stream_raw<D, E>(
183         &self,
184         config: &StreamConfig,
185         sample_format: SampleFormat,
186         data_callback: D,
187         error_callback: E,
188     ) -> Result<Self::Stream, BuildStreamError>
189     where
190         D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
191         E: FnMut(StreamError) + Send + 'static,
192     {
193         if !valid_config(config, sample_format) {
194             return Err(BuildStreamError::StreamConfigNotSupported);
195         }
196 
197         let buffer_size_frames = match config.buffer_size {
198             BufferSize::Fixed(v) => {
199                 if v == 0 {
200                     return Err(BuildStreamError::StreamConfigNotSupported);
201                 } else {
202                     v as usize
203                 }
204             }
205             BufferSize::Default => DEFAULT_BUFFER_SIZE,
206         };
207 
208         // Create the stream.
209         let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap();
210         let stream = Stream { audio_ctxt_ref };
211 
212         // Specify the callback.
213         let mut user_data = (self, data_callback, error_callback);
214         let user_data_ptr = &mut user_data as *mut (_, _, _);
215 
216         // Use `set_timeout` to invoke a Rust callback repeatedly.
217         //
218         // The job of this callback is to fill the content of the audio buffers.
219         //
220         // See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates
221         // the loop.
222         set_timeout(
223             || {
224                 audio_callback_fn::<D, E>(
225                     user_data_ptr as *mut c_void,
226                     config,
227                     sample_format,
228                     buffer_size_frames,
229                 )
230             },
231             10,
232         );
233 
234         Ok(stream)
235     }
236 }
237 
238 impl StreamTrait for Stream {
play(&self) -> Result<(), PlayStreamError>239     fn play(&self) -> Result<(), PlayStreamError> {
240         let audio_ctxt = &self.audio_ctxt_ref;
241         js!(@{audio_ctxt}.resume());
242         Ok(())
243     }
244 
pause(&self) -> Result<(), PauseStreamError>245     fn pause(&self) -> Result<(), PauseStreamError> {
246         let audio_ctxt = &self.audio_ctxt_ref;
247         js!(@{audio_ctxt}.suspend());
248         Ok(())
249     }
250 }
251 
252 // The first argument of the callback function (a `void*`) is a cast pointer to `self`
253 // and to the `callback` parameter that was passed to `run`.
audio_callback_fn<D, E>( user_data_ptr: *mut c_void, config: &StreamConfig, sample_format: SampleFormat, buffer_size_frames: usize, ) where D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,254 fn audio_callback_fn<D, E>(
255     user_data_ptr: *mut c_void,
256     config: &StreamConfig,
257     sample_format: SampleFormat,
258     buffer_size_frames: usize,
259 ) where
260     D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
261     E: FnMut(StreamError) + Send + 'static,
262 {
263     let num_channels = config.channels as usize;
264     let sample_rate = config.sample_rate.0;
265     let buffer_size_samples = buffer_size_frames * num_channels;
266 
267     unsafe {
268         let user_data_ptr2 = user_data_ptr as *mut (&Stream, D, E);
269         let user_data = &mut *user_data_ptr2;
270         let (ref stream, ref mut data_cb, ref mut _err_cb) = user_data;
271         let audio_ctxt = &stream.audio_ctxt_ref;
272 
273         // TODO: We should be re-using a buffer.
274         let mut temporary_buffer = vec![0f32; buffer_size_samples];
275 
276         {
277             let len = temporary_buffer.len();
278             let data = temporary_buffer.as_mut_ptr() as *mut ();
279             let mut data = Data::from_parts(data, len, sample_format);
280 
281             let now_secs: f64 = js!(@{audio_ctxt}.getOutputTimestamp().currentTime)
282                 .try_into()
283                 .expect("failed to retrieve Value as f64");
284             let callback = crate::StreamInstant::from_secs_f64(now_secs);
285             // TODO: Use proper latency instead. Currently, unsupported on most browsers though, so
286             // we estimate based on buffer size instead. Probably should use this, but it's only
287             // supported by firefox (2020-04-28).
288             // let latency_secs: f64 = js!(@{audio_ctxt}.outputLatency).try_into().unwrap();
289             let buffer_duration = frames_to_duration(len, sample_rate as usize);
290             let playback = callback
291                 .add(buffer_duration)
292                 .expect("`playback` occurs beyond representation supported by `StreamInstant`");
293             let timestamp = crate::OutputStreamTimestamp { callback, playback };
294             let info = OutputCallbackInfo { timestamp };
295             data_cb(&mut data, &info);
296         }
297 
298         // TODO: directly use a TypedArray<f32> once this is supported by stdweb
299         let typed_array = {
300             let f32_slice = temporary_buffer.as_slice();
301             let u8_slice: &[u8] = from_raw_parts(
302                 f32_slice.as_ptr() as *const _,
303                 f32_slice.len() * mem::size_of::<f32>(),
304             );
305             let typed_array: TypedArray<u8> = u8_slice.into();
306             typed_array
307         };
308 
309         debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0);
310 
311         js!(
312             var src_buffer = new Float32Array(@{typed_array}.buffer);
313             var context = @{audio_ctxt};
314             var buffer_size_frames = @{buffer_size_frames as u32};
315             var num_channels = @{num_channels as u32};
316             var sample_rate = sample_rate;
317 
318             var buffer = context.createBuffer(num_channels, buffer_size_frames, sample_rate);
319             for (var channel = 0; channel < num_channels; ++channel) {
320                 var buffer_content = buffer.getChannelData(channel);
321                 for (var i = 0; i < buffer_size_frames; ++i) {
322                     buffer_content[i] = src_buffer[i * num_channels + channel];
323                 }
324             }
325 
326             var node = context.createBufferSource();
327             node.buffer = buffer;
328             node.connect(context.destination);
329             node.start();
330         );
331 
332         // TODO: handle latency better ; right now we just use setInterval with the amount of sound
333         // data that is in each buffer ; this is obviously bad, and also the schedule is too tight
334         // and there may be underflows
335         set_timeout(
336             || audio_callback_fn::<D, E>(user_data_ptr, config, sample_format, buffer_size_frames),
337             buffer_size_frames as u32 * 1000 / sample_rate,
338         );
339     }
340 }
341 
342 impl Default for Devices {
default() -> Devices343     fn default() -> Devices {
344         // We produce an empty iterator if the WebAudio API isn't available.
345         Devices(is_webaudio_available())
346     }
347 }
348 impl Iterator for Devices {
349     type Item = Device;
350     #[inline]
next(&mut self) -> Option<Device>351     fn next(&mut self) -> Option<Device> {
352         if self.0 {
353             self.0 = false;
354             Some(Device)
355         } else {
356             None
357         }
358     }
359 }
360 
361 #[inline]
default_input_device() -> Option<Device>362 fn default_input_device() -> Option<Device> {
363     unimplemented!();
364 }
365 
366 #[inline]
default_output_device() -> Option<Device>367 fn default_output_device() -> Option<Device> {
368     if is_webaudio_available() {
369         Some(Device)
370     } else {
371         None
372     }
373 }
374 
375 // Detects whether the `AudioContext` global variable is available.
is_webaudio_available() -> bool376 fn is_webaudio_available() -> bool {
377     stdweb::initialize();
378     js!(if (!AudioContext) {
379         return false;
380     } else {
381         return true;
382     })
383     .try_into()
384     .unwrap()
385 }
386 
387 // Whether or not the given stream configuration is valid for building a stream.
valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool388 fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool {
389     conf.channels <= MAX_CHANNELS
390         && conf.channels >= MIN_CHANNELS
391         && conf.sample_rate <= MAX_SAMPLE_RATE
392         && conf.sample_rate >= MIN_SAMPLE_RATE
393         && sample_format == SUPPORTED_SAMPLE_FORMAT
394 }
395 
396 // Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
frames_to_duration(frames: usize, rate: usize) -> std::time::Duration397 fn frames_to_duration(frames: usize, rate: usize) -> std::time::Duration {
398     let secsf = frames as f64 / rate as f64;
399     let secs = secsf as u64;
400     let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
401     std::time::Duration::new(secs, nanos)
402 }
403