1 //!
2 //! coreaudio on iOS looks a bit different from macOS. A lot of configuration needs to use
3 //! the AVAudioSession objc API which doesn't exist on macOS.
4 //!
5 //! TODO:
6 //! - Use AVAudioSession to enumerate buffer size / sample rate / number of channels and set
7 //!   buffer size.
8 //!
9 
10 extern crate core_foundation_sys;
11 extern crate coreaudio;
12 
13 use std::cell::RefCell;
14 
15 use self::coreaudio::audio_unit::render_callback::data;
16 use self::coreaudio::audio_unit::{render_callback, AudioUnit, Element, Scope};
17 use self::coreaudio::sys::{
18     kAudioOutputUnitProperty_EnableIO, kAudioUnitProperty_StreamFormat, AudioBuffer,
19     AudioStreamBasicDescription,
20 };
21 
22 use super::{asbd_from_config, frames_to_duration, host_time_to_stream_instant};
23 use traits::{DeviceTrait, HostTrait, StreamTrait};
24 
25 use crate::{
26     BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError,
27     DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
28     PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize,
29     SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError,
30 };
31 
32 use self::enumerate::{
33     default_input_device, default_output_device, Devices, SupportedInputConfigs,
34     SupportedOutputConfigs,
35 };
36 use std::slice;
37 
38 pub mod enumerate;
39 
40 // These days the default of iOS is now F32 and no longer I16
41 const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
42 
43 #[derive(Clone, Debug, PartialEq, Eq)]
44 pub struct Device;
45 
46 pub struct Host;
47 
48 impl Host {
new() -> Result<Self, crate::HostUnavailable>49     pub fn new() -> Result<Self, crate::HostUnavailable> {
50         Ok(Host)
51     }
52 }
53 
54 impl HostTrait for Host {
55     type Devices = Devices;
56     type Device = Device;
57 
is_available() -> bool58     fn is_available() -> bool {
59         true
60     }
61 
devices(&self) -> Result<Self::Devices, DevicesError>62     fn devices(&self) -> Result<Self::Devices, DevicesError> {
63         Devices::new()
64     }
65 
default_input_device(&self) -> Option<Self::Device>66     fn default_input_device(&self) -> Option<Self::Device> {
67         default_input_device()
68     }
69 
default_output_device(&self) -> Option<Self::Device>70     fn default_output_device(&self) -> Option<Self::Device> {
71         default_output_device()
72     }
73 }
74 
75 impl Device {
76     #[inline]
name(&self) -> Result<String, DeviceNameError>77     fn name(&self) -> Result<String, DeviceNameError> {
78         Ok("Default Device".to_owned())
79     }
80 
81     #[inline]
supported_input_configs( &self, ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError>82     fn supported_input_configs(
83         &self,
84     ) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
85         // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size
86         // probably need to actually be set to see if it works, but channels can be enumerated.
87 
88         let asbd: AudioStreamBasicDescription = default_input_asbd()?;
89         let stream_config = stream_config_from_asbd(asbd);
90         Ok(vec![SupportedStreamConfigRange {
91             channels: stream_config.channels,
92             min_sample_rate: stream_config.sample_rate,
93             max_sample_rate: stream_config.sample_rate,
94             buffer_size: stream_config.buffer_size.clone(),
95             sample_format: SUPPORTED_SAMPLE_FORMAT,
96         }]
97         .into_iter())
98     }
99 
100     #[inline]
supported_output_configs( &self, ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError>101     fn supported_output_configs(
102         &self,
103     ) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
104         // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size
105         // probably need to actually be set to see if it works, but channels can be enumerated.
106 
107         let asbd: AudioStreamBasicDescription = default_output_asbd()?;
108         let stream_config = stream_config_from_asbd(asbd);
109 
110         let configs: Vec<_> = (1..=asbd.mChannelsPerFrame as u16)
111             .map(|channels| SupportedStreamConfigRange {
112                 channels,
113                 min_sample_rate: stream_config.sample_rate,
114                 max_sample_rate: stream_config.sample_rate,
115                 buffer_size: stream_config.buffer_size.clone(),
116                 sample_format: SUPPORTED_SAMPLE_FORMAT,
117             })
118             .collect();
119         Ok(configs.into_iter())
120     }
121 
122     #[inline]
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>123     fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
124         let asbd: AudioStreamBasicDescription = default_input_asbd()?;
125         let stream_config = stream_config_from_asbd(asbd);
126         Ok(stream_config)
127     }
128 
129     #[inline]
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>130     fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
131         let asbd: AudioStreamBasicDescription = default_output_asbd()?;
132         let stream_config = stream_config_from_asbd(asbd);
133         Ok(stream_config)
134     }
135 }
136 
137 impl DeviceTrait for Device {
138     type SupportedInputConfigs = SupportedInputConfigs;
139     type SupportedOutputConfigs = SupportedOutputConfigs;
140     type Stream = Stream;
141 
142     #[inline]
name(&self) -> Result<String, DeviceNameError>143     fn name(&self) -> Result<String, DeviceNameError> {
144         Device::name(self)
145     }
146 
147     #[inline]
supported_input_configs( &self, ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError>148     fn supported_input_configs(
149         &self,
150     ) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
151         Device::supported_input_configs(self)
152     }
153 
154     #[inline]
supported_output_configs( &self, ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError>155     fn supported_output_configs(
156         &self,
157     ) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
158         Device::supported_output_configs(self)
159     }
160 
161     #[inline]
default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>162     fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
163         Device::default_input_config(self)
164     }
165 
166     #[inline]
default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>167     fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
168         Device::default_output_config(self)
169     }
170 
build_input_stream_raw<D, E>( &self, config: &StreamConfig, sample_format: SampleFormat, mut data_callback: D, mut error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,171     fn build_input_stream_raw<D, E>(
172         &self,
173         config: &StreamConfig,
174         sample_format: SampleFormat,
175         mut data_callback: D,
176         mut error_callback: E,
177     ) -> Result<Self::Stream, BuildStreamError>
178     where
179         D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
180         E: FnMut(StreamError) + Send + 'static,
181     {
182         // The scope and element for working with a device's input stream.
183         let scope = Scope::Output;
184         let element = Element::Input;
185 
186         let mut audio_unit = create_audio_unit()?;
187         audio_unit.uninitialize()?;
188         configure_for_recording(&mut audio_unit)?;
189         audio_unit.initialize()?;
190 
191         // Set the stream in interleaved mode.
192         let asbd = asbd_from_config(config, sample_format);
193         audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
194 
195         // Set the buffersize
196         match config.buffer_size {
197             BufferSize::Fixed(_) => {
198                 return Err(BuildStreamError::StreamConfigNotSupported);
199             }
200             BufferSize::Default => (),
201         }
202 
203         // Register the callback that is being called by coreaudio whenever it needs data to be
204         // fed to the audio buffer.
205         let bytes_per_channel = sample_format.sample_size();
206         let sample_rate = config.sample_rate;
207         type Args = render_callback::Args<data::Raw>;
208         audio_unit.set_input_callback(move |args: Args| unsafe {
209             let ptr = (*args.data.data).mBuffers.as_ptr() as *const AudioBuffer;
210             let len = (*args.data.data).mNumberBuffers as usize;
211             let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len);
212 
213             // There is only 1 buffer when using interleaved channels
214             let AudioBuffer {
215                 mNumberChannels: channels,
216                 mDataByteSize: data_byte_size,
217                 mData: data,
218             } = buffers[0];
219 
220             let data = data as *mut ();
221             let len = (data_byte_size as usize / bytes_per_channel) as usize;
222             let data = Data::from_parts(data, len, sample_format);
223 
224             // TODO: Need a better way to get delay, for now we assume a double-buffer offset.
225             let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
226                 Err(err) => {
227                     error_callback(err.into());
228                     return Err(());
229                 }
230                 Ok(cb) => cb,
231             };
232             let buffer_frames = len / channels as usize;
233             let delay = frames_to_duration(buffer_frames, sample_rate);
234             let capture = callback
235                 .sub(delay)
236                 .expect("`capture` occurs before origin of alsa `StreamInstant`");
237             let timestamp = crate::InputStreamTimestamp { callback, capture };
238 
239             let info = InputCallbackInfo { timestamp };
240             data_callback(&data, &info);
241             Ok(())
242         })?;
243 
244         audio_unit.start()?;
245 
246         Ok(Stream::new(StreamInner {
247             playing: true,
248             audio_unit,
249         }))
250     }
251 
252     /// Create an output stream.
build_output_stream_raw<D, E>( &self, config: &StreamConfig, sample_format: SampleFormat, mut data_callback: D, mut error_callback: E, ) -> Result<Self::Stream, BuildStreamError> where D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static,253     fn build_output_stream_raw<D, E>(
254         &self,
255         config: &StreamConfig,
256         sample_format: SampleFormat,
257         mut data_callback: D,
258         mut error_callback: E,
259     ) -> Result<Self::Stream, BuildStreamError>
260     where
261         D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
262         E: FnMut(StreamError) + Send + 'static,
263     {
264         match config.buffer_size {
265             BufferSize::Fixed(_) => {
266                 return Err(BuildStreamError::StreamConfigNotSupported);
267             }
268             BufferSize::Default => (),
269         };
270 
271         let mut audio_unit = create_audio_unit()?;
272 
273         // The scope and element for working with a device's output stream.
274         let scope = Scope::Input;
275         let element = Element::Output;
276 
277         // Set the stream in interleaved mode.
278         let asbd = asbd_from_config(config, sample_format);
279         audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
280 
281         // Register the callback that is being called by coreaudio whenever it needs data to be
282         // fed to the audio buffer.
283         let bytes_per_channel = sample_format.sample_size();
284         let sample_rate = config.sample_rate;
285         type Args = render_callback::Args<data::Raw>;
286         audio_unit.set_render_callback(move |args: Args| unsafe {
287             // If `run()` is currently running, then a callback will be available from this list.
288             // Otherwise, we just fill the buffer with zeroes and return.
289 
290             let AudioBuffer {
291                 mNumberChannels: channels,
292                 mDataByteSize: data_byte_size,
293                 mData: data,
294             } = (*args.data.data).mBuffers[0];
295 
296             let data = data as *mut ();
297             let len = (data_byte_size as usize / bytes_per_channel) as usize;
298             let mut data = Data::from_parts(data, len, sample_format);
299 
300             let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
301                 Err(err) => {
302                     error_callback(err.into());
303                     return Err(());
304                 }
305                 Ok(cb) => cb,
306             };
307             // TODO: Need a better way to get delay, for now we assume a double-buffer offset.
308             let buffer_frames = len / channels as usize;
309             let delay = frames_to_duration(buffer_frames, sample_rate);
310             let playback = callback
311                 .add(delay)
312                 .expect("`playback` occurs beyond representation supported by `StreamInstant`");
313             let timestamp = crate::OutputStreamTimestamp { callback, playback };
314 
315             let info = OutputCallbackInfo { timestamp };
316             data_callback(&mut data, &info);
317             Ok(())
318         })?;
319 
320         audio_unit.start()?;
321 
322         Ok(Stream::new(StreamInner {
323             playing: true,
324             audio_unit,
325         }))
326     }
327 }
328 
329 pub struct Stream {
330     inner: RefCell<StreamInner>,
331 }
332 
333 impl Stream {
new(inner: StreamInner) -> Self334     fn new(inner: StreamInner) -> Self {
335         Self {
336             inner: RefCell::new(inner),
337         }
338     }
339 }
340 
341 impl StreamTrait for Stream {
play(&self) -> Result<(), PlayStreamError>342     fn play(&self) -> Result<(), PlayStreamError> {
343         let mut stream = self.inner.borrow_mut();
344 
345         if !stream.playing {
346             if let Err(e) = stream.audio_unit.start() {
347                 let description = format!("{}", e);
348                 let err = BackendSpecificError { description };
349                 return Err(err.into());
350             }
351             stream.playing = true;
352         }
353         Ok(())
354     }
355 
pause(&self) -> Result<(), PauseStreamError>356     fn pause(&self) -> Result<(), PauseStreamError> {
357         let mut stream = self.inner.borrow_mut();
358 
359         if stream.playing {
360             if let Err(e) = stream.audio_unit.stop() {
361                 let description = format!("{}", e);
362                 let err = BackendSpecificError { description };
363                 return Err(err.into());
364             }
365 
366             stream.playing = false;
367         }
368         Ok(())
369     }
370 }
371 
372 struct StreamInner {
373     playing: bool,
374     audio_unit: AudioUnit,
375 }
376 
create_audio_unit() -> Result<AudioUnit, coreaudio::Error>377 fn create_audio_unit() -> Result<AudioUnit, coreaudio::Error> {
378     AudioUnit::new(coreaudio::audio_unit::IOType::RemoteIO)
379 }
380 
configure_for_recording(audio_unit: &mut AudioUnit) -> Result<(), coreaudio::Error>381 fn configure_for_recording(audio_unit: &mut AudioUnit) -> Result<(), coreaudio::Error> {
382     // Enable mic recording
383     let enable_input = 1u32;
384     audio_unit.set_property(
385         kAudioOutputUnitProperty_EnableIO,
386         Scope::Input,
387         Element::Input,
388         Some(&enable_input),
389     )?;
390 
391     // Disable output
392     let disable_output = 0u32;
393     audio_unit.set_property(
394         kAudioOutputUnitProperty_EnableIO,
395         Scope::Output,
396         Element::Output,
397         Some(&disable_output),
398     )?;
399 
400     Ok(())
401 }
402 
default_output_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error>403 fn default_output_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error> {
404     let audio_unit = create_audio_unit()?;
405     let id = kAudioUnitProperty_StreamFormat;
406     let asbd: AudioStreamBasicDescription =
407         audio_unit.get_property(id, Scope::Output, Element::Output)?;
408     Ok(asbd)
409 }
410 
default_input_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error>411 fn default_input_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error> {
412     let mut audio_unit = create_audio_unit()?;
413     audio_unit.uninitialize()?;
414     configure_for_recording(&mut audio_unit)?;
415     audio_unit.initialize()?;
416 
417     let id = kAudioUnitProperty_StreamFormat;
418     let asbd: AudioStreamBasicDescription =
419         audio_unit.get_property(id, Scope::Input, Element::Input)?;
420     Ok(asbd)
421 }
422 
stream_config_from_asbd(asbd: AudioStreamBasicDescription) -> SupportedStreamConfig423 fn stream_config_from_asbd(asbd: AudioStreamBasicDescription) -> SupportedStreamConfig {
424     let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 };
425     SupportedStreamConfig {
426         channels: asbd.mChannelsPerFrame as u16,
427         sample_rate: SampleRate(asbd.mSampleRate as u32),
428         buffer_size: buffer_size.clone(),
429         sample_format: SUPPORTED_SAMPLE_FORMAT,
430     }
431 }
432