1 #ifdef RTAUDIO_ENABLED // -GODOT-
2
3 /************************************************************************/
4 /*! \class RtAudio
5 \brief Realtime audio i/o C++ classes.
6
7 RtAudio provides a common API (Application Programming Interface)
8 for realtime audio input/output across Linux (native ALSA, Jack,
9 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
10 (DirectSound, ASIO and WASAPI) operating systems.
11
12 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13
14 RtAudio: realtime audio i/o C++ classes
15 Copyright (c) 2001-2016 Gary P. Scavone
16
17 Permission is hereby granted, free of charge, to any person
18 obtaining a copy of this software and associated documentation files
19 (the "Software"), to deal in the Software without restriction,
20 including without limitation the rights to use, copy, modify, merge,
21 publish, distribute, sublicense, and/or sell copies of the Software,
22 and to permit persons to whom the Software is furnished to do so,
23 subject to the following conditions:
24
25 The above copyright notice and this permission notice shall be
26 included in all copies or substantial portions of the Software.
27
28 Any person wishing to distribute modifications to the Software is
29 asked to send the modifications to the original developer so that
30 they can be incorporated into the canonical version. This is,
31 however, not a binding provision of this license.
32
33 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
37 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
38 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 */
41 /************************************************************************/
42
43 // RtAudio: Version 4.1.2
44
45 #include "RtAudio.h"
46 #include <iostream>
47 #include <cstdlib>
48 #include <cstring>
49 #include <climits>
50 #include <algorithm>
51
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
64
65 #include "tchar.h"
66
convertCharPointerToStdString(const char * text)67 static std::string convertCharPointerToStdString(const char *text)
68 {
69 return std::string(text);
70 }
71
convertCharPointerToStdString(const wchar_t * text)72 static std::string convertCharPointerToStdString(const wchar_t *text)
73 {
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
77 return s;
78 }
79
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 // pthread API
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #else
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
89 #endif
90
91 // *************************************************** //
92 //
93 // RtAudio definitions.
94 //
95 // *************************************************** //
96
getVersion(void)97 std::string RtAudio :: getVersion( void ) throw()
98 {
99 return RTAUDIO_VERSION;
100 }
101
getCompiledApi(std::vector<RtAudio::Api> & apis)102 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
103 {
104 apis.clear();
105
106 // The order here will control the order of RtAudio's API search in
107 // the constructor.
108 #if defined(__UNIX_JACK__)
109 apis.push_back( UNIX_JACK );
110 #endif
111 #if defined(__LINUX_ALSA__)
112 apis.push_back( LINUX_ALSA );
113 #endif
114 #if defined(__LINUX_PULSE__)
115 apis.push_back( LINUX_PULSE );
116 #endif
117 #if defined(__LINUX_OSS__)
118 apis.push_back( LINUX_OSS );
119 #endif
120 #if defined(__WINDOWS_ASIO__)
121 apis.push_back( WINDOWS_ASIO );
122 #endif
123 #if defined(__WINDOWS_WASAPI__)
124 apis.push_back( WINDOWS_WASAPI );
125 #endif
126 #if defined(__WINDOWS_DS__)
127 apis.push_back( WINDOWS_DS );
128 #endif
129 #if defined(__MACOSX_CORE__)
130 apis.push_back( MACOSX_CORE );
131 #endif
132 #if defined(__RTAUDIO_DUMMY__)
133 apis.push_back( RTAUDIO_DUMMY );
134 #endif
135 }
136
openRtApi(RtAudio::Api api)137 void RtAudio :: openRtApi( RtAudio::Api api )
138 {
139 if ( rtapi_ )
140 delete rtapi_;
141 rtapi_ = 0;
142
143 #if defined(__UNIX_JACK__)
144 if ( api == UNIX_JACK )
145 rtapi_ = new RtApiJack();
146 #endif
147 #if defined(__LINUX_ALSA__)
148 if ( api == LINUX_ALSA )
149 rtapi_ = new RtApiAlsa();
150 #endif
151 #if defined(__LINUX_PULSE__)
152 if ( api == LINUX_PULSE )
153 rtapi_ = new RtApiPulse();
154 #endif
155 #if defined(__LINUX_OSS__)
156 if ( api == LINUX_OSS )
157 rtapi_ = new RtApiOss();
158 #endif
159 #if defined(__WINDOWS_ASIO__)
160 if ( api == WINDOWS_ASIO )
161 rtapi_ = new RtApiAsio();
162 #endif
163 #if defined(__WINDOWS_WASAPI__)
164 if ( api == WINDOWS_WASAPI )
165 rtapi_ = new RtApiWasapi();
166 #endif
167 #if defined(__WINDOWS_DS__)
168 if ( api == WINDOWS_DS )
169 rtapi_ = new RtApiDs();
170 #endif
171 #if defined(__MACOSX_CORE__)
172 if ( api == MACOSX_CORE )
173 rtapi_ = new RtApiCore();
174 #endif
175 #if defined(__RTAUDIO_DUMMY__)
176 if ( api == RTAUDIO_DUMMY )
177 rtapi_ = new RtApiDummy();
178 #endif
179 }
180
RtAudio(RtAudio::Api api)181 RtAudio :: RtAudio( RtAudio::Api api )
182 {
183 rtapi_ = 0;
184
185 if ( api != UNSPECIFIED ) {
186 // Attempt to open the specified API.
187 openRtApi( api );
188 if ( rtapi_ ) return;
189
190 // No compiled support for specified API value. Issue a debug
191 // warning and continue as if no API was specified.
192 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
193 }
194
195 // Iterate through the compiled APIs and return as soon as we find
196 // one with at least one device or we reach the end of the list.
197 std::vector< RtAudio::Api > apis;
198 getCompiledApi( apis );
199 for ( unsigned int i=0; i<apis.size(); i++ ) {
200 openRtApi( apis[i] );
201 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
202 }
203
204 if ( rtapi_ ) return;
205
206 // It should not be possible to get here because the preprocessor
207 // definition __RTAUDIO_DUMMY__ is automatically defined if no
208 // API-specific definitions are passed to the compiler. But just in
209 // case something weird happens, we'll thow an error.
210 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
211 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
212 }
213
~RtAudio()214 RtAudio :: ~RtAudio() throw()
215 {
216 if ( rtapi_ )
217 delete rtapi_;
218 }
219
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)220 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
221 RtAudio::StreamParameters *inputParameters,
222 RtAudioFormat format, unsigned int sampleRate,
223 unsigned int *bufferFrames,
224 RtAudioCallback callback, void *userData,
225 RtAudio::StreamOptions *options,
226 RtAudioErrorCallback errorCallback )
227 {
228 return rtapi_->openStream( outputParameters, inputParameters, format,
229 sampleRate, bufferFrames, callback,
230 userData, options, errorCallback );
231 }
232
233 // *************************************************** //
234 //
235 // Public RtApi definitions (see end of file for
236 // private or protected utility functions).
237 //
238 // *************************************************** //
239
RtApi()240 RtApi :: RtApi()
241 {
242 stream_.state = STREAM_CLOSED;
243 stream_.mode = UNINITIALIZED;
244 stream_.apiHandle = 0;
245 stream_.userBuffer[0] = 0;
246 stream_.userBuffer[1] = 0;
247 MUTEX_INITIALIZE( &stream_.mutex );
248 showWarnings_ = true;
249 firstErrorOccurred_ = false;
250 }
251
~RtApi()252 RtApi :: ~RtApi()
253 {
254 MUTEX_DESTROY( &stream_.mutex );
255 }
256
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)257 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
258 RtAudio::StreamParameters *iParams,
259 RtAudioFormat format, unsigned int sampleRate,
260 unsigned int *bufferFrames,
261 RtAudioCallback callback, void *userData,
262 RtAudio::StreamOptions *options,
263 RtAudioErrorCallback errorCallback )
264 {
265 if ( stream_.state != STREAM_CLOSED ) {
266 errorText_ = "RtApi::openStream: a stream is already open!";
267 error( RtAudioError::INVALID_USE );
268 return;
269 }
270
271 // Clear stream information potentially left from a previously open stream.
272 clearStreamInfo();
273
274 if ( oParams && oParams->nChannels < 1 ) {
275 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
276 error( RtAudioError::INVALID_USE );
277 return;
278 }
279
280 if ( iParams && iParams->nChannels < 1 ) {
281 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
282 error( RtAudioError::INVALID_USE );
283 return;
284 }
285
286 if ( oParams == NULL && iParams == NULL ) {
287 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
288 error( RtAudioError::INVALID_USE );
289 return;
290 }
291
292 if ( formatBytes(format) == 0 ) {
293 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
294 error( RtAudioError::INVALID_USE );
295 return;
296 }
297
298 unsigned int nDevices = getDeviceCount();
299 unsigned int oChannels = 0;
300 if ( oParams ) {
301 oChannels = oParams->nChannels;
302 if ( oParams->deviceId >= nDevices ) {
303 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
304 error( RtAudioError::INVALID_USE );
305 return;
306 }
307 }
308
309 unsigned int iChannels = 0;
310 if ( iParams ) {
311 iChannels = iParams->nChannels;
312 if ( iParams->deviceId >= nDevices ) {
313 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
314 error( RtAudioError::INVALID_USE );
315 return;
316 }
317 }
318
319 bool result;
320
321 if ( oChannels > 0 ) {
322
323 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
324 sampleRate, format, bufferFrames, options );
325 if ( result == false ) {
326 error( RtAudioError::SYSTEM_ERROR );
327 return;
328 }
329 }
330
331 if ( iChannels > 0 ) {
332
333 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
334 sampleRate, format, bufferFrames, options );
335 if ( result == false ) {
336 if ( oChannels > 0 ) closeStream();
337 error( RtAudioError::SYSTEM_ERROR );
338 return;
339 }
340 }
341
342 stream_.callbackInfo.callback = (void *) callback;
343 stream_.callbackInfo.userData = userData;
344 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345
346 if ( options ) options->numberOfBuffers = stream_.nBuffers;
347 stream_.state = STREAM_STOPPED;
348 }
349
getDefaultInputDevice(void)350 unsigned int RtApi :: getDefaultInputDevice( void )
351 {
352 // Should be implemented in subclasses if possible.
353 return 0;
354 }
355
getDefaultOutputDevice(void)356 unsigned int RtApi :: getDefaultOutputDevice( void )
357 {
358 // Should be implemented in subclasses if possible.
359 return 0;
360 }
361
closeStream(void)362 void RtApi :: closeStream( void )
363 {
364 // MUST be implemented in subclasses!
365 return;
366 }
367
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)368 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
369 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
370 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
371 RtAudio::StreamOptions * /*options*/ )
372 {
373 // MUST be implemented in subclasses!
374 return FAILURE;
375 }
376
tickStreamTime(void)377 void RtApi :: tickStreamTime( void )
378 {
379 // Subclasses that do not provide their own implementation of
380 // getStreamTime should call this function once per buffer I/O to
381 // provide basic stream time support.
382
383 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384
385 #if defined( HAVE_GETTIMEOFDAY )
386 gettimeofday( &stream_.lastTickTimestamp, NULL );
387 #endif
388 }
389
getStreamLatency(void)390 long RtApi :: getStreamLatency( void )
391 {
392 verifyStream();
393
394 long totalLatency = 0;
395 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
396 totalLatency = stream_.latency[0];
397 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
398 totalLatency += stream_.latency[1];
399
400 return totalLatency;
401 }
402
getStreamTime(void)403 double RtApi :: getStreamTime( void )
404 {
405 verifyStream();
406
407 #if defined( HAVE_GETTIMEOFDAY )
408 // Return a very accurate estimate of the stream time by
409 // adding in the elapsed time since the last tick.
410 struct timeval then;
411 struct timeval now;
412
413 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
414 return stream_.streamTime;
415
416 gettimeofday( &now, NULL );
417 then = stream_.lastTickTimestamp;
418 return stream_.streamTime +
419 ((now.tv_sec + 0.000001 * now.tv_usec) -
420 (then.tv_sec + 0.000001 * then.tv_usec));
421 #else
422 return stream_.streamTime;
423 #endif
424 }
425
setStreamTime(double time)426 void RtApi :: setStreamTime( double time )
427 {
428 verifyStream();
429
430 if ( time >= 0.0 )
431 stream_.streamTime = time;
432 }
433
getStreamSampleRate(void)434 unsigned int RtApi :: getStreamSampleRate( void )
435 {
436 verifyStream();
437
438 return stream_.sampleRate;
439 }
440
441
442 // *************************************************** //
443 //
444 // OS/API-specific methods.
445 //
446 // *************************************************** //
447
448 #if defined(__MACOSX_CORE__)
449
450 // The OS X CoreAudio API is designed to use a separate callback
451 // procedure for each of its audio devices. A single RtAudio duplex
452 // stream using two different devices is supported here, though it
453 // cannot be guaranteed to always behave correctly because we cannot
454 // synchronize these two callbacks.
455 //
456 // A property listener is installed for over/underrun information.
457 // However, no functionality is currently provided to allow property
458 // listeners to trigger user handlers because it is unclear what could
459 // be done if a critical stream parameter (buffer size, sample rate,
460 // device disconnect) notification arrived. The listeners entail
461 // quite a bit of extra code and most likely, a user program wouldn't
462 // be prepared for the result anyway. However, we do provide a flag
463 // to the client callback function to inform of an over/underrun.
464
465 // A structure to hold various information related to the CoreAudio API
466 // implementation.
467 struct CoreHandle {
468 AudioDeviceID id[2]; // device ids
469 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
470 AudioDeviceIOProcID procId[2];
471 #endif
472 UInt32 iStream[2]; // device stream index (or first if using multiple)
473 UInt32 nStreams[2]; // number of streams to use
474 bool xrun[2];
475 char *deviceBuffer;
476 pthread_cond_t condition;
477 int drainCounter; // Tracks callback counts when draining
478 bool internalDrain; // Indicates if stop is initiated from callback or not.
479
CoreHandleCoreHandle480 CoreHandle()
481 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
482 };
483
RtApiCore()484 RtApiCore:: RtApiCore()
485 {
486 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
487 // This is a largely undocumented but absolutely necessary
488 // requirement starting with OS-X 10.6. If not called, queries and
489 // updates to various audio device properties are not handled
490 // correctly.
491 CFRunLoopRef theRunLoop = NULL;
492 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
493 kAudioObjectPropertyScopeGlobal,
494 kAudioObjectPropertyElementMaster };
495 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
496 if ( result != noErr ) {
497 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
498 error( RtAudioError::WARNING );
499 }
500 #endif
501 }
502
~RtApiCore()503 RtApiCore :: ~RtApiCore()
504 {
505 // The subclass destructor gets called before the base class
506 // destructor, so close an existing stream before deallocating
507 // apiDeviceId memory.
508 if ( stream_.state != STREAM_CLOSED ) closeStream();
509 }
510
getDeviceCount(void)511 unsigned int RtApiCore :: getDeviceCount( void )
512 {
513 // Find out how many audio devices there are, if any.
514 UInt32 dataSize;
515 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
516 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
517 if ( result != noErr ) {
518 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
519 error( RtAudioError::WARNING );
520 return 0;
521 }
522
523 return dataSize / sizeof( AudioDeviceID );
524 }
525
getDefaultInputDevice(void)526 unsigned int RtApiCore :: getDefaultInputDevice( void )
527 {
528 unsigned int nDevices = getDeviceCount();
529 if ( nDevices <= 1 ) return 0;
530
531 AudioDeviceID id;
532 UInt32 dataSize = sizeof( AudioDeviceID );
533 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
534 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
535 if ( result != noErr ) {
536 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
537 error( RtAudioError::WARNING );
538 return 0;
539 }
540
541 dataSize *= nDevices;
542 AudioDeviceID deviceList[ nDevices ];
543 property.mSelector = kAudioHardwarePropertyDevices;
544 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
545 if ( result != noErr ) {
546 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
547 error( RtAudioError::WARNING );
548 return 0;
549 }
550
551 for ( unsigned int i=0; i<nDevices; i++ )
552 if ( id == deviceList[i] ) return i;
553
554 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
555 error( RtAudioError::WARNING );
556 return 0;
557 }
558
getDefaultOutputDevice(void)559 unsigned int RtApiCore :: getDefaultOutputDevice( void )
560 {
561 unsigned int nDevices = getDeviceCount();
562 if ( nDevices <= 1 ) return 0;
563
564 AudioDeviceID id;
565 UInt32 dataSize = sizeof( AudioDeviceID );
566 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
567 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
568 if ( result != noErr ) {
569 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
570 error( RtAudioError::WARNING );
571 return 0;
572 }
573
574 dataSize = sizeof( AudioDeviceID ) * nDevices;
575 AudioDeviceID deviceList[ nDevices ];
576 property.mSelector = kAudioHardwarePropertyDevices;
577 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
578 if ( result != noErr ) {
579 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
580 error( RtAudioError::WARNING );
581 return 0;
582 }
583
584 for ( unsigned int i=0; i<nDevices; i++ )
585 if ( id == deviceList[i] ) return i;
586
587 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
588 error( RtAudioError::WARNING );
589 return 0;
590 }
591
getDeviceInfo(unsigned int device)592 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
593 {
594 RtAudio::DeviceInfo info;
595 info.probed = false;
596
597 // Get device ID
598 unsigned int nDevices = getDeviceCount();
599 if ( nDevices == 0 ) {
600 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
601 error( RtAudioError::INVALID_USE );
602 return info;
603 }
604
605 if ( device >= nDevices ) {
606 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
607 error( RtAudioError::INVALID_USE );
608 return info;
609 }
610
611 AudioDeviceID deviceList[ nDevices ];
612 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
613 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
614 kAudioObjectPropertyScopeGlobal,
615 kAudioObjectPropertyElementMaster };
616 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
617 0, NULL, &dataSize, (void *) &deviceList );
618 if ( result != noErr ) {
619 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
620 error( RtAudioError::WARNING );
621 return info;
622 }
623
624 AudioDeviceID id = deviceList[ device ];
625
626 // Get the device name.
627 info.name.erase();
628 CFStringRef cfname;
629 dataSize = sizeof( CFStringRef );
630 property.mSelector = kAudioObjectPropertyManufacturer;
631 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
632 if ( result != noErr ) {
633 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
634 errorText_ = errorStream_.str();
635 error( RtAudioError::WARNING );
636 return info;
637 }
638
639 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
640 int length = CFStringGetLength(cfname);
641 char *mname = (char *)malloc(length * 3 + 1);
642 #if defined( UNICODE ) || defined( _UNICODE )
643 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
644 #else
645 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
646 #endif
647 info.name.append( (const char *)mname, strlen(mname) );
648 info.name.append( ": " );
649 CFRelease( cfname );
650 free(mname);
651
652 property.mSelector = kAudioObjectPropertyName;
653 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
654 if ( result != noErr ) {
655 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
656 errorText_ = errorStream_.str();
657 error( RtAudioError::WARNING );
658 return info;
659 }
660
661 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
662 length = CFStringGetLength(cfname);
663 char *name = (char *)malloc(length * 3 + 1);
664 #if defined( UNICODE ) || defined( _UNICODE )
665 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
666 #else
667 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
668 #endif
669 info.name.append( (const char *)name, strlen(name) );
670 CFRelease( cfname );
671 free(name);
672
673 // Get the output stream "configuration".
674 AudioBufferList *bufferList = nil;
675 property.mSelector = kAudioDevicePropertyStreamConfiguration;
676 property.mScope = kAudioDevicePropertyScopeOutput;
677 // property.mElement = kAudioObjectPropertyElementWildcard;
678 dataSize = 0;
679 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
680 if ( result != noErr || dataSize == 0 ) {
681 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
682 errorText_ = errorStream_.str();
683 error( RtAudioError::WARNING );
684 return info;
685 }
686
687 // Allocate the AudioBufferList.
688 bufferList = (AudioBufferList *) malloc( dataSize );
689 if ( bufferList == NULL ) {
690 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
691 error( RtAudioError::WARNING );
692 return info;
693 }
694
695 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
696 if ( result != noErr || dataSize == 0 ) {
697 free( bufferList );
698 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
699 errorText_ = errorStream_.str();
700 error( RtAudioError::WARNING );
701 return info;
702 }
703
704 // Get output channel information.
705 unsigned int i, nStreams = bufferList->mNumberBuffers;
706 for ( i=0; i<nStreams; i++ )
707 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
708 free( bufferList );
709
710 // Get the input stream "configuration".
711 property.mScope = kAudioDevicePropertyScopeInput;
712 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
713 if ( result != noErr || dataSize == 0 ) {
714 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
717 return info;
718 }
719
720 // Allocate the AudioBufferList.
721 bufferList = (AudioBufferList *) malloc( dataSize );
722 if ( bufferList == NULL ) {
723 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
724 error( RtAudioError::WARNING );
725 return info;
726 }
727
728 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
729 if (result != noErr || dataSize == 0) {
730 free( bufferList );
731 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
732 errorText_ = errorStream_.str();
733 error( RtAudioError::WARNING );
734 return info;
735 }
736
737 // Get input channel information.
738 nStreams = bufferList->mNumberBuffers;
739 for ( i=0; i<nStreams; i++ )
740 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
741 free( bufferList );
742
743 // If device opens for both playback and capture, we determine the channels.
744 if ( info.outputChannels > 0 && info.inputChannels > 0 )
745 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
746
747 // Probe the device sample rates.
748 bool isInput = false;
749 if ( info.outputChannels == 0 ) isInput = true;
750
751 // Determine the supported sample rates.
752 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
753 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
754 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
755 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
759 return info;
760 }
761
762 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
763 AudioValueRange rangeList[ nRanges ];
764 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
765 if ( result != kAudioHardwareNoError ) {
766 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
767 errorText_ = errorStream_.str();
768 error( RtAudioError::WARNING );
769 return info;
770 }
771
772 // The sample rate reporting mechanism is a bit of a mystery. It
773 // seems that it can either return individual rates or a range of
774 // rates. I assume that if the min / max range values are the same,
775 // then that represents a single supported rate and if the min / max
776 // range values are different, the device supports an arbitrary
777 // range of values (though there might be multiple ranges, so we'll
778 // use the most conservative range).
779 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
780 bool haveValueRange = false;
781 info.sampleRates.clear();
782 for ( UInt32 i=0; i<nRanges; i++ ) {
783 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
784 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
785 info.sampleRates.push_back( tmpSr );
786
787 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
788 info.preferredSampleRate = tmpSr;
789
790 } else {
791 haveValueRange = true;
792 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
793 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
794 }
795 }
796
797 if ( haveValueRange ) {
798 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
799 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
800 info.sampleRates.push_back( SAMPLE_RATES[k] );
801
802 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
803 info.preferredSampleRate = SAMPLE_RATES[k];
804 }
805 }
806 }
807
808 // Sort and remove any redundant values
809 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
810 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
811
812 if ( info.sampleRates.size() == 0 ) {
813 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
814 errorText_ = errorStream_.str();
815 error( RtAudioError::WARNING );
816 return info;
817 }
818
819 // CoreAudio always uses 32-bit floating point data for PCM streams.
820 // Thus, any other "physical" formats supported by the device are of
821 // no interest to the client.
822 info.nativeFormats = RTAUDIO_FLOAT32;
823
824 if ( info.outputChannels > 0 )
825 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
826 if ( info.inputChannels > 0 )
827 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
828
829 info.probed = true;
830 return info;
831 }
832
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)833 static OSStatus callbackHandler( AudioDeviceID inDevice,
834 const AudioTimeStamp* /*inNow*/,
835 const AudioBufferList* inInputData,
836 const AudioTimeStamp* /*inInputTime*/,
837 AudioBufferList* outOutputData,
838 const AudioTimeStamp* /*inOutputTime*/,
839 void* infoPointer )
840 {
841 CallbackInfo *info = (CallbackInfo *) infoPointer;
842
843 RtApiCore *object = (RtApiCore *) info->object;
844 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
845 return kAudioHardwareUnspecifiedError;
846 else
847 return kAudioHardwareNoError;
848 }
849
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)850 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
851 UInt32 nAddresses,
852 const AudioObjectPropertyAddress properties[],
853 void* handlePointer )
854 {
855 CoreHandle *handle = (CoreHandle *) handlePointer;
856 for ( UInt32 i=0; i<nAddresses; i++ ) {
857 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
858 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
859 handle->xrun[1] = true;
860 else
861 handle->xrun[0] = true;
862 }
863 }
864
865 return kAudioHardwareNoError;
866 }
867
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)868 static OSStatus rateListener( AudioObjectID inDevice,
869 UInt32 /*nAddresses*/,
870 const AudioObjectPropertyAddress /*properties*/[],
871 void* ratePointer )
872 {
873 Float64 *rate = (Float64 *) ratePointer;
874 UInt32 dataSize = sizeof( Float64 );
875 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
876 kAudioObjectPropertyScopeGlobal,
877 kAudioObjectPropertyElementMaster };
878 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
879 return kAudioHardwareNoError;
880 }
881
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)882 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
883 unsigned int firstChannel, unsigned int sampleRate,
884 RtAudioFormat format, unsigned int *bufferSize,
885 RtAudio::StreamOptions *options )
886 {
887 // Get device ID
888 unsigned int nDevices = getDeviceCount();
889 if ( nDevices == 0 ) {
890 // This should not happen because a check is made before this function is called.
891 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
892 return FAILURE;
893 }
894
895 if ( device >= nDevices ) {
896 // This should not happen because a check is made before this function is called.
897 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
898 return FAILURE;
899 }
900
901 AudioDeviceID deviceList[ nDevices ];
902 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
903 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
904 kAudioObjectPropertyScopeGlobal,
905 kAudioObjectPropertyElementMaster };
906 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
907 0, NULL, &dataSize, (void *) &deviceList );
908 if ( result != noErr ) {
909 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
910 return FAILURE;
911 }
912
913 AudioDeviceID id = deviceList[ device ];
914
915 // Setup for stream mode.
916 bool isInput = false;
917 if ( mode == INPUT ) {
918 isInput = true;
919 property.mScope = kAudioDevicePropertyScopeInput;
920 }
921 else
922 property.mScope = kAudioDevicePropertyScopeOutput;
923
924 // Get the stream "configuration".
925 AudioBufferList *bufferList = nil;
926 dataSize = 0;
927 property.mSelector = kAudioDevicePropertyStreamConfiguration;
928 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
929 if ( result != noErr || dataSize == 0 ) {
930 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
931 errorText_ = errorStream_.str();
932 return FAILURE;
933 }
934
935 // Allocate the AudioBufferList.
936 bufferList = (AudioBufferList *) malloc( dataSize );
937 if ( bufferList == NULL ) {
938 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
939 return FAILURE;
940 }
941
942 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
943 if (result != noErr || dataSize == 0) {
944 free( bufferList );
945 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
946 errorText_ = errorStream_.str();
947 return FAILURE;
948 }
949
950 // Search for one or more streams that contain the desired number of
951 // channels. CoreAudio devices can have an arbitrary number of
952 // streams and each stream can have an arbitrary number of channels.
953 // For each stream, a single buffer of interleaved samples is
954 // provided. RtAudio prefers the use of one stream of interleaved
955 // data or multiple consecutive single-channel streams. However, we
956 // now support multiple consecutive multi-channel streams of
957 // interleaved data as well.
958 UInt32 iStream, offsetCounter = firstChannel;
959 UInt32 nStreams = bufferList->mNumberBuffers;
960 bool monoMode = false;
961 bool foundStream = false;
962
963 // First check that the device supports the requested number of
964 // channels.
965 UInt32 deviceChannels = 0;
966 for ( iStream=0; iStream<nStreams; iStream++ )
967 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
968
969 if ( deviceChannels < ( channels + firstChannel ) ) {
970 free( bufferList );
971 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
972 errorText_ = errorStream_.str();
973 return FAILURE;
974 }
975
976 // Look for a single stream meeting our needs.
977 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
978 for ( iStream=0; iStream<nStreams; iStream++ ) {
979 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
980 if ( streamChannels >= channels + offsetCounter ) {
981 firstStream = iStream;
982 channelOffset = offsetCounter;
983 foundStream = true;
984 break;
985 }
986 if ( streamChannels > offsetCounter ) break;
987 offsetCounter -= streamChannels;
988 }
989
990 // If we didn't find a single stream above, then we should be able
991 // to meet the channel specification with multiple streams.
992 if ( foundStream == false ) {
993 monoMode = true;
994 offsetCounter = firstChannel;
995 for ( iStream=0; iStream<nStreams; iStream++ ) {
996 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
997 if ( streamChannels > offsetCounter ) break;
998 offsetCounter -= streamChannels;
999 }
1000
1001 firstStream = iStream;
1002 channelOffset = offsetCounter;
1003 Int32 channelCounter = channels + offsetCounter - streamChannels;
1004
1005 if ( streamChannels > 1 ) monoMode = false;
1006 while ( channelCounter > 0 ) {
1007 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1008 if ( streamChannels > 1 ) monoMode = false;
1009 channelCounter -= streamChannels;
1010 streamCount++;
1011 }
1012 }
1013
1014 free( bufferList );
1015
1016 // Determine the buffer size.
1017 AudioValueRange bufferRange;
1018 dataSize = sizeof( AudioValueRange );
1019 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1020 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1021
1022 if ( result != noErr ) {
1023 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1024 errorText_ = errorStream_.str();
1025 return FAILURE;
1026 }
1027
1028 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1029 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1030 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031
1032 // Set the buffer size. For multiple streams, I'm assuming we only
1033 // need to make this setting for the master channel.
1034 UInt32 theSize = (UInt32) *bufferSize;
1035 dataSize = sizeof( UInt32 );
1036 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1037 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1038
1039 if ( result != noErr ) {
1040 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1041 errorText_ = errorStream_.str();
1042 return FAILURE;
1043 }
1044
1045 // If attempting to setup a duplex stream, the bufferSize parameter
1046 // MUST be the same in both directions!
1047 *bufferSize = theSize;
1048 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1049 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1050 errorText_ = errorStream_.str();
1051 return FAILURE;
1052 }
1053
1054 stream_.bufferSize = *bufferSize;
1055 stream_.nBuffers = 1;
1056
1057 // Try to set "hog" mode ... it's not clear to me this is working.
1058 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1059 pid_t hog_pid;
1060 dataSize = sizeof( hog_pid );
1061 property.mSelector = kAudioDevicePropertyHogMode;
1062 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1063 if ( result != noErr ) {
1064 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1065 errorText_ = errorStream_.str();
1066 return FAILURE;
1067 }
1068
1069 if ( hog_pid != getpid() ) {
1070 hog_pid = getpid();
1071 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1072 if ( result != noErr ) {
1073 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1074 errorText_ = errorStream_.str();
1075 return FAILURE;
1076 }
1077 }
1078 }
1079
1080 // Check and if necessary, change the sample rate for the device.
1081 Float64 nominalRate;
1082 dataSize = sizeof( Float64 );
1083 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1084 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1085 if ( result != noErr ) {
1086 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1087 errorText_ = errorStream_.str();
1088 return FAILURE;
1089 }
1090
1091 // Only change the sample rate if off by more than 1 Hz.
1092 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1093
1094 // Set a property listener for the sample rate change
1095 Float64 reportedRate = 0.0;
1096 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1097 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1101 return FAILURE;
1102 }
1103
1104 nominalRate = (Float64) sampleRate;
1105 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1106 if ( result != noErr ) {
1107 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1109 errorText_ = errorStream_.str();
1110 return FAILURE;
1111 }
1112
1113 // Now wait until the reported nominal rate is what we just set.
1114 UInt32 microCounter = 0;
1115 while ( reportedRate != nominalRate ) {
1116 microCounter += 5000;
1117 if ( microCounter > 5000000 ) break;
1118 usleep( 5000 );
1119 }
1120
1121 // Remove the property listener.
1122 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1123
1124 if ( microCounter > 5000000 ) {
1125 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1126 errorText_ = errorStream_.str();
1127 return FAILURE;
1128 }
1129 }
1130
1131 // Now set the stream format for all streams. Also, check the
1132 // physical format of the device and change that if necessary.
1133 AudioStreamBasicDescription description;
1134 dataSize = sizeof( AudioStreamBasicDescription );
1135 property.mSelector = kAudioStreamPropertyVirtualFormat;
1136 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1137 if ( result != noErr ) {
1138 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1139 errorText_ = errorStream_.str();
1140 return FAILURE;
1141 }
1142
1143 // Set the sample rate and data format id. However, only make the
1144 // change if the sample rate is not within 1.0 of the desired
1145 // rate and the format is not linear pcm.
1146 bool updateFormat = false;
1147 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1148 description.mSampleRate = (Float64) sampleRate;
1149 updateFormat = true;
1150 }
1151
1152 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1153 description.mFormatID = kAudioFormatLinearPCM;
1154 updateFormat = true;
1155 }
1156
1157 if ( updateFormat ) {
1158 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1159 if ( result != noErr ) {
1160 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1161 errorText_ = errorStream_.str();
1162 return FAILURE;
1163 }
1164 }
1165
1166 // Now check the physical format.
1167 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1168 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1169 if ( result != noErr ) {
1170 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1171 errorText_ = errorStream_.str();
1172 return FAILURE;
1173 }
1174
1175 //std::cout << "Current physical stream format:" << std::endl;
1176 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1177 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1178 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1179 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1180
1181 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1182 description.mFormatID = kAudioFormatLinearPCM;
1183 //description.mSampleRate = (Float64) sampleRate;
1184 AudioStreamBasicDescription testDescription = description;
1185 UInt32 formatFlags;
1186
1187 // We'll try higher bit rates first and then work our way down.
1188 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1194 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1196 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1198 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1200 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1201
1202 bool setPhysicalFormat = false;
1203 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1204 testDescription = description;
1205 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1206 testDescription.mFormatFlags = physicalFormats[i].second;
1207 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1208 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1209 else
1210 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1211 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1212 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1213 if ( result == noErr ) {
1214 setPhysicalFormat = true;
1215 //std::cout << "Updated physical stream format:" << std::endl;
1216 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1217 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1218 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1219 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1220 break;
1221 }
1222 }
1223
1224 if ( !setPhysicalFormat ) {
1225 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1226 errorText_ = errorStream_.str();
1227 return FAILURE;
1228 }
1229 } // done setting virtual/physical formats.
1230
1231 // Get the stream / device latency.
1232 UInt32 latency;
1233 dataSize = sizeof( UInt32 );
1234 property.mSelector = kAudioDevicePropertyLatency;
1235 if ( AudioObjectHasProperty( id, &property ) == true ) {
1236 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1237 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1238 else {
1239 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1240 errorText_ = errorStream_.str();
1241 error( RtAudioError::WARNING );
1242 }
1243 }
1244
1245 // Byte-swapping: According to AudioHardware.h, the stream data will
1246 // always be presented in native-endian format, so we should never
1247 // need to byte swap.
1248 stream_.doByteSwap[mode] = false;
1249
1250 // From the CoreAudio documentation, PCM data must be supplied as
1251 // 32-bit floats.
1252 stream_.userFormat = format;
1253 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1254
1255 if ( streamCount == 1 )
1256 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1257 else // multiple streams
1258 stream_.nDeviceChannels[mode] = channels;
1259 stream_.nUserChannels[mode] = channels;
1260 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1261 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1262 else stream_.userInterleaved = true;
1263 stream_.deviceInterleaved[mode] = true;
1264 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1265
1266 // Set flags for buffer conversion.
1267 stream_.doConvertBuffer[mode] = false;
1268 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1269 stream_.doConvertBuffer[mode] = true;
1270 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( streamCount == 1 ) {
1273 if ( stream_.nUserChannels[mode] > 1 &&
1274 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1275 stream_.doConvertBuffer[mode] = true;
1276 }
1277 else if ( monoMode && stream_.userInterleaved )
1278 stream_.doConvertBuffer[mode] = true;
1279
1280 // Allocate our CoreHandle structure for the stream.
1281 CoreHandle *handle = 0;
1282 if ( stream_.apiHandle == 0 ) {
1283 try {
1284 handle = new CoreHandle;
1285 }
1286 catch ( std::bad_alloc& ) {
1287 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1288 goto error;
1289 }
1290
1291 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1292 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1293 goto error;
1294 }
1295 stream_.apiHandle = (void *) handle;
1296 }
1297 else
1298 handle = (CoreHandle *) stream_.apiHandle;
1299 handle->iStream[mode] = firstStream;
1300 handle->nStreams[mode] = streamCount;
1301 handle->id[mode] = id;
1302
1303 // Allocate necessary internal buffers.
1304 unsigned long bufferBytes;
1305 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1306 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1307 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1308 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1309 if ( stream_.userBuffer[mode] == NULL ) {
1310 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1311 goto error;
1312 }
1313
1314 // If possible, we will make use of the CoreAudio stream buffers as
1315 // "device buffers". However, we can't do this if using multiple
1316 // streams.
1317 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1318
1319 bool makeBuffer = true;
1320 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1321 if ( mode == INPUT ) {
1322 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1323 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1324 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1325 }
1326 }
1327
1328 if ( makeBuffer ) {
1329 bufferBytes *= *bufferSize;
1330 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1331 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1332 if ( stream_.deviceBuffer == NULL ) {
1333 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1334 goto error;
1335 }
1336 }
1337 }
1338
1339 stream_.sampleRate = sampleRate;
1340 stream_.device[mode] = device;
1341 stream_.state = STREAM_STOPPED;
1342 stream_.callbackInfo.object = (void *) this;
1343
1344 // Setup the buffer conversion information structure.
1345 if ( stream_.doConvertBuffer[mode] ) {
1346 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1347 else setConvertInfo( mode, channelOffset );
1348 }
1349
1350 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1351 // Only one callback procedure per device.
1352 stream_.mode = DUPLEX;
1353 else {
1354 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1355 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1356 #else
1357 // deprecated in favor of AudioDeviceCreateIOProcID()
1358 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1359 #endif
1360 if ( result != noErr ) {
1361 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1362 errorText_ = errorStream_.str();
1363 goto error;
1364 }
1365 if ( stream_.mode == OUTPUT && mode == INPUT )
1366 stream_.mode = DUPLEX;
1367 else
1368 stream_.mode = mode;
1369 }
1370
1371 // Setup the device property listener for over/underload.
1372 property.mSelector = kAudioDeviceProcessorOverload;
1373 property.mScope = kAudioObjectPropertyScopeGlobal;
1374 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1375
1376 return SUCCESS;
1377
1378 error:
1379 if ( handle ) {
1380 pthread_cond_destroy( &handle->condition );
1381 delete handle;
1382 stream_.apiHandle = 0;
1383 }
1384
1385 for ( int i=0; i<2; i++ ) {
1386 if ( stream_.userBuffer[i] ) {
1387 free( stream_.userBuffer[i] );
1388 stream_.userBuffer[i] = 0;
1389 }
1390 }
1391
1392 if ( stream_.deviceBuffer ) {
1393 free( stream_.deviceBuffer );
1394 stream_.deviceBuffer = 0;
1395 }
1396
1397 stream_.state = STREAM_CLOSED;
1398 return FAILURE;
1399 }
1400
closeStream(void)1401 void RtApiCore :: closeStream( void )
1402 {
1403 if ( stream_.state == STREAM_CLOSED ) {
1404 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1405 error( RtAudioError::WARNING );
1406 return;
1407 }
1408
1409 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1411 if (handle) {
1412 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1413 kAudioObjectPropertyScopeGlobal,
1414 kAudioObjectPropertyElementMaster };
1415
1416 property.mSelector = kAudioDeviceProcessorOverload;
1417 property.mScope = kAudioObjectPropertyScopeGlobal;
1418 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1419 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1420 error( RtAudioError::WARNING );
1421 }
1422 }
1423 if ( stream_.state == STREAM_RUNNING )
1424 AudioDeviceStop( handle->id[0], callbackHandler );
1425 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1426 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1427 #else
1428 // deprecated in favor of AudioDeviceDestroyIOProcID()
1429 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1430 #endif
1431 }
1432
1433 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1434 if (handle) {
1435 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1436 kAudioObjectPropertyScopeGlobal,
1437 kAudioObjectPropertyElementMaster };
1438
1439 property.mSelector = kAudioDeviceProcessorOverload;
1440 property.mScope = kAudioObjectPropertyScopeGlobal;
1441 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1442 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1443 error( RtAudioError::WARNING );
1444 }
1445 }
1446 if ( stream_.state == STREAM_RUNNING )
1447 AudioDeviceStop( handle->id[1], callbackHandler );
1448 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1449 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1450 #else
1451 // deprecated in favor of AudioDeviceDestroyIOProcID()
1452 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1453 #endif
1454 }
1455
1456 for ( int i=0; i<2; i++ ) {
1457 if ( stream_.userBuffer[i] ) {
1458 free( stream_.userBuffer[i] );
1459 stream_.userBuffer[i] = 0;
1460 }
1461 }
1462
1463 if ( stream_.deviceBuffer ) {
1464 free( stream_.deviceBuffer );
1465 stream_.deviceBuffer = 0;
1466 }
1467
1468 // Destroy pthread condition variable.
1469 pthread_cond_destroy( &handle->condition );
1470 delete handle;
1471 stream_.apiHandle = 0;
1472
1473 stream_.mode = UNINITIALIZED;
1474 stream_.state = STREAM_CLOSED;
1475 }
1476
startStream(void)1477 void RtApiCore :: startStream( void )
1478 {
1479 verifyStream();
1480 if ( stream_.state == STREAM_RUNNING ) {
1481 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1482 error( RtAudioError::WARNING );
1483 return;
1484 }
1485
1486 OSStatus result = noErr;
1487 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1489
1490 result = AudioDeviceStart( handle->id[0], callbackHandler );
1491 if ( result != noErr ) {
1492 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1493 errorText_ = errorStream_.str();
1494 goto unlock;
1495 }
1496 }
1497
1498 if ( stream_.mode == INPUT ||
1499 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1500
1501 result = AudioDeviceStart( handle->id[1], callbackHandler );
1502 if ( result != noErr ) {
1503 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1504 errorText_ = errorStream_.str();
1505 goto unlock;
1506 }
1507 }
1508
1509 handle->drainCounter = 0;
1510 handle->internalDrain = false;
1511 stream_.state = STREAM_RUNNING;
1512
1513 unlock:
1514 if ( result == noErr ) return;
1515 error( RtAudioError::SYSTEM_ERROR );
1516 }
1517
stopStream(void)1518 void RtApiCore :: stopStream( void )
1519 {
1520 verifyStream();
1521 if ( stream_.state == STREAM_STOPPED ) {
1522 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1523 error( RtAudioError::WARNING );
1524 return;
1525 }
1526
1527 OSStatus result = noErr;
1528 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1529 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1530
1531 if ( handle->drainCounter == 0 ) {
1532 handle->drainCounter = 2;
1533 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1534 }
1535
1536 result = AudioDeviceStop( handle->id[0], callbackHandler );
1537 if ( result != noErr ) {
1538 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1539 errorText_ = errorStream_.str();
1540 goto unlock;
1541 }
1542 }
1543
1544 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1545
1546 result = AudioDeviceStop( handle->id[1], callbackHandler );
1547 if ( result != noErr ) {
1548 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1549 errorText_ = errorStream_.str();
1550 goto unlock;
1551 }
1552 }
1553
1554 stream_.state = STREAM_STOPPED;
1555
1556 unlock:
1557 if ( result == noErr ) return;
1558 error( RtAudioError::SYSTEM_ERROR );
1559 }
1560
abortStream(void)1561 void RtApiCore :: abortStream( void )
1562 {
1563 verifyStream();
1564 if ( stream_.state == STREAM_STOPPED ) {
1565 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1566 error( RtAudioError::WARNING );
1567 return;
1568 }
1569
1570 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1571 handle->drainCounter = 2;
1572
1573 stopStream();
1574 }
1575
1576 // This function will be called by a spawned thread when the user
1577 // callback function signals that the stream should be stopped or
1578 // aborted. It is better to handle it this way because the
1579 // callbackEvent() function probably should return before the AudioDeviceStop()
1580 // function is called.
coreStopStream(void * ptr)1581 static void *coreStopStream( void *ptr )
1582 {
1583 CallbackInfo *info = (CallbackInfo *) ptr;
1584 RtApiCore *object = (RtApiCore *) info->object;
1585
1586 object->stopStream();
1587 pthread_exit( NULL );
1588 }
1589
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1590 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1591 const AudioBufferList *inBufferList,
1592 const AudioBufferList *outBufferList )
1593 {
1594 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1595 if ( stream_.state == STREAM_CLOSED ) {
1596 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1597 error( RtAudioError::WARNING );
1598 return FAILURE;
1599 }
1600
1601 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1602 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1603
1604 // Check if we were draining the stream and signal is finished.
1605 if ( handle->drainCounter > 3 ) {
1606 ThreadHandle threadId;
1607
1608 stream_.state = STREAM_STOPPING;
1609 if ( handle->internalDrain == true )
1610 pthread_create( &threadId, NULL, coreStopStream, info );
1611 else // external call to stopStream()
1612 pthread_cond_signal( &handle->condition );
1613 return SUCCESS;
1614 }
1615
1616 AudioDeviceID outputDevice = handle->id[0];
1617
1618 // Invoke user callback to get fresh output data UNLESS we are
1619 // draining stream or duplex mode AND the input/output devices are
1620 // different AND this function is called for the input device.
1621 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1622 RtAudioCallback callback = (RtAudioCallback) info->callback;
1623 double streamTime = getStreamTime();
1624 RtAudioStreamStatus status = 0;
1625 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1626 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1627 handle->xrun[0] = false;
1628 }
1629 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1630 status |= RTAUDIO_INPUT_OVERFLOW;
1631 handle->xrun[1] = false;
1632 }
1633
1634 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1635 stream_.bufferSize, streamTime, status, info->userData );
1636 if ( cbReturnValue == 2 ) {
1637 stream_.state = STREAM_STOPPING;
1638 handle->drainCounter = 2;
1639 abortStream();
1640 return SUCCESS;
1641 }
1642 else if ( cbReturnValue == 1 ) {
1643 handle->drainCounter = 1;
1644 handle->internalDrain = true;
1645 }
1646 }
1647
1648 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1649
1650 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1651
1652 if ( handle->nStreams[0] == 1 ) {
1653 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1654 0,
1655 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1656 }
1657 else { // fill multiple streams with zeros
1658 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1659 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1660 0,
1661 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1662 }
1663 }
1664 }
1665 else if ( handle->nStreams[0] == 1 ) {
1666 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1667 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1668 stream_.userBuffer[0], stream_.convertInfo[0] );
1669 }
1670 else { // copy from user buffer
1671 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1672 stream_.userBuffer[0],
1673 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1674 }
1675 }
1676 else { // fill multiple streams
1677 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1678 if ( stream_.doConvertBuffer[0] ) {
1679 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1680 inBuffer = (Float32 *) stream_.deviceBuffer;
1681 }
1682
1683 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1684 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1685 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1686 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1687 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1688 }
1689 }
1690 else { // fill multiple multi-channel streams with interleaved data
1691 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1692 Float32 *out, *in;
1693
1694 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1695 UInt32 inChannels = stream_.nUserChannels[0];
1696 if ( stream_.doConvertBuffer[0] ) {
1697 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1698 inChannels = stream_.nDeviceChannels[0];
1699 }
1700
1701 if ( inInterleaved ) inOffset = 1;
1702 else inOffset = stream_.bufferSize;
1703
1704 channelsLeft = inChannels;
1705 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1706 in = inBuffer;
1707 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1708 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1709
1710 outJump = 0;
1711 // Account for possible channel offset in first stream
1712 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1713 streamChannels -= stream_.channelOffset[0];
1714 outJump = stream_.channelOffset[0];
1715 out += outJump;
1716 }
1717
1718 // Account for possible unfilled channels at end of the last stream
1719 if ( streamChannels > channelsLeft ) {
1720 outJump = streamChannels - channelsLeft;
1721 streamChannels = channelsLeft;
1722 }
1723
1724 // Determine input buffer offsets and skips
1725 if ( inInterleaved ) {
1726 inJump = inChannels;
1727 in += inChannels - channelsLeft;
1728 }
1729 else {
1730 inJump = 1;
1731 in += (inChannels - channelsLeft) * inOffset;
1732 }
1733
1734 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1735 for ( unsigned int j=0; j<streamChannels; j++ ) {
1736 *out++ = in[j*inOffset];
1737 }
1738 out += outJump;
1739 in += inJump;
1740 }
1741 channelsLeft -= streamChannels;
1742 }
1743 }
1744 }
1745 }
1746
1747 // Don't bother draining input
1748 if ( handle->drainCounter ) {
1749 handle->drainCounter++;
1750 goto unlock;
1751 }
1752
1753 AudioDeviceID inputDevice;
1754 inputDevice = handle->id[1];
1755 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1756
1757 if ( handle->nStreams[1] == 1 ) {
1758 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1759 convertBuffer( stream_.userBuffer[1],
1760 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1761 stream_.convertInfo[1] );
1762 }
1763 else { // copy to user buffer
1764 memcpy( stream_.userBuffer[1],
1765 inBufferList->mBuffers[handle->iStream[1]].mData,
1766 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1767 }
1768 }
1769 else { // read from multiple streams
1770 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1771 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1772
1773 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1774 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1775 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1776 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1777 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1778 }
1779 }
1780 else { // read from multiple multi-channel streams
1781 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1782 Float32 *out, *in;
1783
1784 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1785 UInt32 outChannels = stream_.nUserChannels[1];
1786 if ( stream_.doConvertBuffer[1] ) {
1787 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1788 outChannels = stream_.nDeviceChannels[1];
1789 }
1790
1791 if ( outInterleaved ) outOffset = 1;
1792 else outOffset = stream_.bufferSize;
1793
1794 channelsLeft = outChannels;
1795 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1796 out = outBuffer;
1797 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1798 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1799
1800 inJump = 0;
1801 // Account for possible channel offset in first stream
1802 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1803 streamChannels -= stream_.channelOffset[1];
1804 inJump = stream_.channelOffset[1];
1805 in += inJump;
1806 }
1807
1808 // Account for possible unread channels at end of the last stream
1809 if ( streamChannels > channelsLeft ) {
1810 inJump = streamChannels - channelsLeft;
1811 streamChannels = channelsLeft;
1812 }
1813
1814 // Determine output buffer offsets and skips
1815 if ( outInterleaved ) {
1816 outJump = outChannels;
1817 out += outChannels - channelsLeft;
1818 }
1819 else {
1820 outJump = 1;
1821 out += (outChannels - channelsLeft) * outOffset;
1822 }
1823
1824 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1825 for ( unsigned int j=0; j<streamChannels; j++ ) {
1826 out[j*outOffset] = *in++;
1827 }
1828 out += outJump;
1829 in += inJump;
1830 }
1831 channelsLeft -= streamChannels;
1832 }
1833 }
1834
1835 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1836 convertBuffer( stream_.userBuffer[1],
1837 stream_.deviceBuffer,
1838 stream_.convertInfo[1] );
1839 }
1840 }
1841 }
1842
1843 unlock:
1844 //MUTEX_UNLOCK( &stream_.mutex );
1845
1846 RtApi::tickStreamTime();
1847 return SUCCESS;
1848 }
1849
getErrorCode(OSStatus code)1850 const char* RtApiCore :: getErrorCode( OSStatus code )
1851 {
1852 switch( code ) {
1853
1854 case kAudioHardwareNotRunningError:
1855 return "kAudioHardwareNotRunningError";
1856
1857 case kAudioHardwareUnspecifiedError:
1858 return "kAudioHardwareUnspecifiedError";
1859
1860 case kAudioHardwareUnknownPropertyError:
1861 return "kAudioHardwareUnknownPropertyError";
1862
1863 case kAudioHardwareBadPropertySizeError:
1864 return "kAudioHardwareBadPropertySizeError";
1865
1866 case kAudioHardwareIllegalOperationError:
1867 return "kAudioHardwareIllegalOperationError";
1868
1869 case kAudioHardwareBadObjectError:
1870 return "kAudioHardwareBadObjectError";
1871
1872 case kAudioHardwareBadDeviceError:
1873 return "kAudioHardwareBadDeviceError";
1874
1875 case kAudioHardwareBadStreamError:
1876 return "kAudioHardwareBadStreamError";
1877
1878 case kAudioHardwareUnsupportedOperationError:
1879 return "kAudioHardwareUnsupportedOperationError";
1880
1881 case kAudioDeviceUnsupportedFormatError:
1882 return "kAudioDeviceUnsupportedFormatError";
1883
1884 case kAudioDevicePermissionsError:
1885 return "kAudioDevicePermissionsError";
1886
1887 default:
1888 return "CoreAudio unknown error";
1889 }
1890 }
1891
1892 //******************** End of __MACOSX_CORE__ *********************//
1893 #endif
1894
1895 #if defined(__UNIX_JACK__)
1896
1897 // JACK is a low-latency audio server, originally written for the
1898 // GNU/Linux operating system and now also ported to OS-X. It can
1899 // connect a number of different applications to an audio device, as
1900 // well as allowing them to share audio between themselves.
1901 //
1902 // When using JACK with RtAudio, "devices" refer to JACK clients that
1903 // have ports connected to the server. The JACK server is typically
1904 // started in a terminal as follows:
1905 //
1906 // .jackd -d alsa -d hw:0
1907 //
1908 // or through an interface program such as qjackctl. Many of the
1909 // parameters normally set for a stream are fixed by the JACK server
1910 // and can be specified when the JACK server is started. In
1911 // particular,
1912 //
1913 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1914 //
1915 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1916 // frames, and number of buffers = 4. Once the server is running, it
1917 // is not possible to override these values. If the values are not
1918 // specified in the command-line, the JACK server uses default values.
1919 //
1920 // The JACK server does not have to be running when an instance of
1921 // RtApiJack is created, though the function getDeviceCount() will
1922 // report 0 devices found until JACK has been started. When no
1923 // devices are available (i.e., the JACK server is not running), a
1924 // stream cannot be opened.
1925
1926 #include <jack/jack.h>
1927 #include <unistd.h>
1928 #include <cstdio>
1929
1930 // A structure to hold various information related to the Jack API
1931 // implementation.
1932 struct JackHandle {
1933 jack_client_t *client;
1934 jack_port_t **ports[2];
1935 std::string deviceName[2];
1936 bool xrun[2];
1937 pthread_cond_t condition;
1938 int drainCounter; // Tracks callback counts when draining
1939 bool internalDrain; // Indicates if stop is initiated from callback or not.
1940
JackHandleJackHandle1941 JackHandle()
1942 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1943 };
1944
jackSilentError(const char *)1945 static void jackSilentError( const char * ) {};
1946
RtApiJack()1947 RtApiJack :: RtApiJack()
1948 {
1949 // Nothing to do here.
1950 #if !defined(__RTAUDIO_DEBUG__)
1951 // Turn off Jack's internal error reporting.
1952 jack_set_error_function( &jackSilentError );
1953 #endif
1954 }
1955
~RtApiJack()1956 RtApiJack :: ~RtApiJack()
1957 {
1958 if ( stream_.state != STREAM_CLOSED ) closeStream();
1959 }
1960
getDeviceCount(void)1961 unsigned int RtApiJack :: getDeviceCount( void )
1962 {
1963 // See if we can become a jack client.
1964 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1965 jack_status_t *status = NULL;
1966 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1967 if ( client == 0 ) return 0;
1968
1969 const char **ports;
1970 std::string port, previousPort;
1971 unsigned int nChannels = 0, nDevices = 0;
1972 ports = jack_get_ports( client, NULL, NULL, 0 );
1973 if ( ports ) {
1974 // Parse the port names up to the first colon (:).
1975 size_t iColon = 0;
1976 do {
1977 port = (char *) ports[ nChannels ];
1978 iColon = port.find(":");
1979 if ( iColon != std::string::npos ) {
1980 port = port.substr( 0, iColon + 1 );
1981 if ( port != previousPort ) {
1982 nDevices++;
1983 previousPort = port;
1984 }
1985 }
1986 } while ( ports[++nChannels] );
1987 free( ports );
1988 }
1989
1990 jack_client_close( client );
1991 return nDevices;
1992 }
1993
getDeviceInfo(unsigned int device)1994 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1995 {
1996 RtAudio::DeviceInfo info;
1997 info.probed = false;
1998
1999 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2000 jack_status_t *status = NULL;
2001 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2002 if ( client == 0 ) {
2003 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2004 error( RtAudioError::WARNING );
2005 return info;
2006 }
2007
2008 const char **ports;
2009 std::string port, previousPort;
2010 unsigned int nPorts = 0, nDevices = 0;
2011 ports = jack_get_ports( client, NULL, NULL, 0 );
2012 if ( ports ) {
2013 // Parse the port names up to the first colon (:).
2014 size_t iColon = 0;
2015 do {
2016 port = (char *) ports[ nPorts ];
2017 iColon = port.find(":");
2018 if ( iColon != std::string::npos ) {
2019 port = port.substr( 0, iColon );
2020 if ( port != previousPort ) {
2021 if ( nDevices == device ) info.name = port;
2022 nDevices++;
2023 previousPort = port;
2024 }
2025 }
2026 } while ( ports[++nPorts] );
2027 free( ports );
2028 }
2029
2030 if ( device >= nDevices ) {
2031 jack_client_close( client );
2032 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2033 error( RtAudioError::INVALID_USE );
2034 return info;
2035 }
2036
2037 // Get the current jack server sample rate.
2038 info.sampleRates.clear();
2039
2040 info.preferredSampleRate = jack_get_sample_rate( client );
2041 info.sampleRates.push_back( info.preferredSampleRate );
2042
2043 // Count the available ports containing the client name as device
2044 // channels. Jack "input ports" equal RtAudio output channels.
2045 unsigned int nChannels = 0;
2046 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2047 if ( ports ) {
2048 while ( ports[ nChannels ] ) nChannels++;
2049 free( ports );
2050 info.outputChannels = nChannels;
2051 }
2052
2053 // Jack "output ports" equal RtAudio input channels.
2054 nChannels = 0;
2055 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2056 if ( ports ) {
2057 while ( ports[ nChannels ] ) nChannels++;
2058 free( ports );
2059 info.inputChannels = nChannels;
2060 }
2061
2062 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2063 jack_client_close(client);
2064 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2065 error( RtAudioError::WARNING );
2066 return info;
2067 }
2068
2069 // If device opens for both playback and capture, we determine the channels.
2070 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2071 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2072
2073 // Jack always uses 32-bit floats.
2074 info.nativeFormats = RTAUDIO_FLOAT32;
2075
2076 // Jack doesn't provide default devices so we'll use the first available one.
2077 if ( device == 0 && info.outputChannels > 0 )
2078 info.isDefaultOutput = true;
2079 if ( device == 0 && info.inputChannels > 0 )
2080 info.isDefaultInput = true;
2081
2082 jack_client_close(client);
2083 info.probed = true;
2084 return info;
2085 }
2086
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2087 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2088 {
2089 CallbackInfo *info = (CallbackInfo *) infoPointer;
2090
2091 RtApiJack *object = (RtApiJack *) info->object;
2092 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2093
2094 return 0;
2095 }
2096
2097 // This function will be called by a spawned thread when the Jack
2098 // server signals that it is shutting down. It is necessary to handle
2099 // it this way because the jackShutdown() function must return before
2100 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2101 static void *jackCloseStream( void *ptr )
2102 {
2103 CallbackInfo *info = (CallbackInfo *) ptr;
2104 RtApiJack *object = (RtApiJack *) info->object;
2105
2106 object->closeStream();
2107
2108 pthread_exit( NULL );
2109 }
jackShutdown(void * infoPointer)2110 static void jackShutdown( void *infoPointer )
2111 {
2112 CallbackInfo *info = (CallbackInfo *) infoPointer;
2113 RtApiJack *object = (RtApiJack *) info->object;
2114
2115 // Check current stream state. If stopped, then we'll assume this
2116 // was called as a result of a call to RtApiJack::stopStream (the
2117 // deactivation of a client handle causes this function to be called).
2118 // If not, we'll assume the Jack server is shutting down or some
2119 // other problem occurred and we should close the stream.
2120 if ( object->isStreamRunning() == false ) return;
2121
2122 ThreadHandle threadId;
2123 pthread_create( &threadId, NULL, jackCloseStream, info );
2124 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2125 }
2126
jackXrun(void * infoPointer)2127 static int jackXrun( void *infoPointer )
2128 {
2129 JackHandle *handle = (JackHandle *) infoPointer;
2130
2131 if ( handle->ports[0] ) handle->xrun[0] = true;
2132 if ( handle->ports[1] ) handle->xrun[1] = true;
2133
2134 return 0;
2135 }
2136
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2137 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2138 unsigned int firstChannel, unsigned int sampleRate,
2139 RtAudioFormat format, unsigned int *bufferSize,
2140 RtAudio::StreamOptions *options )
2141 {
2142 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2143
2144 // Look for jack server and try to become a client (only do once per stream).
2145 jack_client_t *client = 0;
2146 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2147 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2148 jack_status_t *status = NULL;
2149 if ( options && !options->streamName.empty() )
2150 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2151 else
2152 client = jack_client_open( "RtApiJack", jackoptions, status );
2153 if ( client == 0 ) {
2154 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2155 error( RtAudioError::WARNING );
2156 return FAILURE;
2157 }
2158 }
2159 else {
2160 // The handle must have been created on an earlier pass.
2161 client = handle->client;
2162 }
2163
2164 const char **ports;
2165 std::string port, previousPort, deviceName;
2166 unsigned int nPorts = 0, nDevices = 0;
2167 ports = jack_get_ports( client, NULL, NULL, 0 );
2168 if ( ports ) {
2169 // Parse the port names up to the first colon (:).
2170 size_t iColon = 0;
2171 do {
2172 port = (char *) ports[ nPorts ];
2173 iColon = port.find(":");
2174 if ( iColon != std::string::npos ) {
2175 port = port.substr( 0, iColon );
2176 if ( port != previousPort ) {
2177 if ( nDevices == device ) deviceName = port;
2178 nDevices++;
2179 previousPort = port;
2180 }
2181 }
2182 } while ( ports[++nPorts] );
2183 free( ports );
2184 }
2185
2186 if ( device >= nDevices ) {
2187 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2188 return FAILURE;
2189 }
2190
2191 // Count the available ports containing the client name as device
2192 // channels. Jack "input ports" equal RtAudio output channels.
2193 unsigned int nChannels = 0;
2194 unsigned long flag = JackPortIsInput;
2195 if ( mode == INPUT ) flag = JackPortIsOutput;
2196 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2197 if ( ports ) {
2198 while ( ports[ nChannels ] ) nChannels++;
2199 free( ports );
2200 }
2201
2202 // Compare the jack ports for specified client to the requested number of channels.
2203 if ( nChannels < (channels + firstChannel) ) {
2204 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2205 errorText_ = errorStream_.str();
2206 return FAILURE;
2207 }
2208
2209 // Check the jack server sample rate.
2210 unsigned int jackRate = jack_get_sample_rate( client );
2211 if ( sampleRate != jackRate ) {
2212 jack_client_close( client );
2213 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2214 errorText_ = errorStream_.str();
2215 return FAILURE;
2216 }
2217 stream_.sampleRate = jackRate;
2218
2219 // Get the latency of the JACK port.
2220 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2221 if ( ports[ firstChannel ] ) {
2222 // Added by Ge Wang
2223 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2224 // the range (usually the min and max are equal)
2225 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2226 // get the latency range
2227 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2228 // be optimistic, use the min!
2229 stream_.latency[mode] = latrange.min;
2230 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2231 }
2232 free( ports );
2233
2234 // The jack server always uses 32-bit floating-point data.
2235 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2236 stream_.userFormat = format;
2237
2238 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2239 else stream_.userInterleaved = true;
2240
2241 // Jack always uses non-interleaved buffers.
2242 stream_.deviceInterleaved[mode] = false;
2243
2244 // Jack always provides host byte-ordered data.
2245 stream_.doByteSwap[mode] = false;
2246
2247 // Get the buffer size. The buffer size and number of buffers
2248 // (periods) is set when the jack server is started.
2249 stream_.bufferSize = (int) jack_get_buffer_size( client );
2250 *bufferSize = stream_.bufferSize;
2251
2252 stream_.nDeviceChannels[mode] = channels;
2253 stream_.nUserChannels[mode] = channels;
2254
2255 // Set flags for buffer conversion.
2256 stream_.doConvertBuffer[mode] = false;
2257 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2258 stream_.doConvertBuffer[mode] = true;
2259 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2260 stream_.nUserChannels[mode] > 1 )
2261 stream_.doConvertBuffer[mode] = true;
2262
2263 // Allocate our JackHandle structure for the stream.
2264 if ( handle == 0 ) {
2265 try {
2266 handle = new JackHandle;
2267 }
2268 catch ( std::bad_alloc& ) {
2269 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2270 goto error;
2271 }
2272
2273 if ( pthread_cond_init(&handle->condition, NULL) ) {
2274 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2275 goto error;
2276 }
2277 stream_.apiHandle = (void *) handle;
2278 handle->client = client;
2279 }
2280 handle->deviceName[mode] = deviceName;
2281
2282 // Allocate necessary internal buffers.
2283 unsigned long bufferBytes;
2284 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2285 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2286 if ( stream_.userBuffer[mode] == NULL ) {
2287 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2288 goto error;
2289 }
2290
2291 if ( stream_.doConvertBuffer[mode] ) {
2292
2293 bool makeBuffer = true;
2294 if ( mode == OUTPUT )
2295 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2296 else { // mode == INPUT
2297 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2298 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2299 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2300 if ( bufferBytes < bytesOut ) makeBuffer = false;
2301 }
2302 }
2303
2304 if ( makeBuffer ) {
2305 bufferBytes *= *bufferSize;
2306 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2307 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2308 if ( stream_.deviceBuffer == NULL ) {
2309 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2310 goto error;
2311 }
2312 }
2313 }
2314
2315 // Allocate memory for the Jack ports (channels) identifiers.
2316 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2317 if ( handle->ports[mode] == NULL ) {
2318 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2319 goto error;
2320 }
2321
2322 stream_.device[mode] = device;
2323 stream_.channelOffset[mode] = firstChannel;
2324 stream_.state = STREAM_STOPPED;
2325 stream_.callbackInfo.object = (void *) this;
2326
2327 if ( stream_.mode == OUTPUT && mode == INPUT )
2328 // We had already set up the stream for output.
2329 stream_.mode = DUPLEX;
2330 else {
2331 stream_.mode = mode;
2332 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2333 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2334 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2335 }
2336
2337 // Register our ports.
2338 char label[64];
2339 if ( mode == OUTPUT ) {
2340 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2341 snprintf( label, 64, "outport %d", i );
2342 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2343 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2344 }
2345 }
2346 else {
2347 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2348 snprintf( label, 64, "inport %d", i );
2349 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2350 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2351 }
2352 }
2353
2354 // Setup the buffer conversion information structure. We don't use
2355 // buffers to do channel offsets, so we override that parameter
2356 // here.
2357 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2358
2359 return SUCCESS;
2360
2361 error:
2362 if ( handle ) {
2363 pthread_cond_destroy( &handle->condition );
2364 jack_client_close( handle->client );
2365
2366 if ( handle->ports[0] ) free( handle->ports[0] );
2367 if ( handle->ports[1] ) free( handle->ports[1] );
2368
2369 delete handle;
2370 stream_.apiHandle = 0;
2371 }
2372
2373 for ( int i=0; i<2; i++ ) {
2374 if ( stream_.userBuffer[i] ) {
2375 free( stream_.userBuffer[i] );
2376 stream_.userBuffer[i] = 0;
2377 }
2378 }
2379
2380 if ( stream_.deviceBuffer ) {
2381 free( stream_.deviceBuffer );
2382 stream_.deviceBuffer = 0;
2383 }
2384
2385 return FAILURE;
2386 }
2387
closeStream(void)2388 void RtApiJack :: closeStream( void )
2389 {
2390 if ( stream_.state == STREAM_CLOSED ) {
2391 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2392 error( RtAudioError::WARNING );
2393 return;
2394 }
2395
2396 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2397 if ( handle ) {
2398
2399 if ( stream_.state == STREAM_RUNNING )
2400 jack_deactivate( handle->client );
2401
2402 jack_client_close( handle->client );
2403 }
2404
2405 if ( handle ) {
2406 if ( handle->ports[0] ) free( handle->ports[0] );
2407 if ( handle->ports[1] ) free( handle->ports[1] );
2408 pthread_cond_destroy( &handle->condition );
2409 delete handle;
2410 stream_.apiHandle = 0;
2411 }
2412
2413 for ( int i=0; i<2; i++ ) {
2414 if ( stream_.userBuffer[i] ) {
2415 free( stream_.userBuffer[i] );
2416 stream_.userBuffer[i] = 0;
2417 }
2418 }
2419
2420 if ( stream_.deviceBuffer ) {
2421 free( stream_.deviceBuffer );
2422 stream_.deviceBuffer = 0;
2423 }
2424
2425 stream_.mode = UNINITIALIZED;
2426 stream_.state = STREAM_CLOSED;
2427 }
2428
startStream(void)2429 void RtApiJack :: startStream( void )
2430 {
2431 verifyStream();
2432 if ( stream_.state == STREAM_RUNNING ) {
2433 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2434 error( RtAudioError::WARNING );
2435 return;
2436 }
2437
2438 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2439 int result = jack_activate( handle->client );
2440 if ( result ) {
2441 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2442 goto unlock;
2443 }
2444
2445 const char **ports;
2446
2447 // Get the list of available ports.
2448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2449 result = 1;
2450 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2451 if ( ports == NULL) {
2452 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2453 goto unlock;
2454 }
2455
2456 // Now make the port connections. Since RtAudio wasn't designed to
2457 // allow the user to select particular channels of a device, we'll
2458 // just open the first "nChannels" ports with offset.
2459 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2460 result = 1;
2461 if ( ports[ stream_.channelOffset[0] + i ] )
2462 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2463 if ( result ) {
2464 free( ports );
2465 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2466 goto unlock;
2467 }
2468 }
2469 free(ports);
2470 }
2471
2472 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2473 result = 1;
2474 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2475 if ( ports == NULL) {
2476 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2477 goto unlock;
2478 }
2479
2480 // Now make the port connections. See note above.
2481 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2482 result = 1;
2483 if ( ports[ stream_.channelOffset[1] + i ] )
2484 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2485 if ( result ) {
2486 free( ports );
2487 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2488 goto unlock;
2489 }
2490 }
2491 free(ports);
2492 }
2493
2494 handle->drainCounter = 0;
2495 handle->internalDrain = false;
2496 stream_.state = STREAM_RUNNING;
2497
2498 unlock:
2499 if ( result == 0 ) return;
2500 error( RtAudioError::SYSTEM_ERROR );
2501 }
2502
stopStream(void)2503 void RtApiJack :: stopStream( void )
2504 {
2505 verifyStream();
2506 if ( stream_.state == STREAM_STOPPED ) {
2507 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2508 error( RtAudioError::WARNING );
2509 return;
2510 }
2511
2512 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2513 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2514
2515 if ( handle->drainCounter == 0 ) {
2516 handle->drainCounter = 2;
2517 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2518 }
2519 }
2520
2521 jack_deactivate( handle->client );
2522 stream_.state = STREAM_STOPPED;
2523 }
2524
abortStream(void)2525 void RtApiJack :: abortStream( void )
2526 {
2527 verifyStream();
2528 if ( stream_.state == STREAM_STOPPED ) {
2529 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2530 error( RtAudioError::WARNING );
2531 return;
2532 }
2533
2534 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2535 handle->drainCounter = 2;
2536
2537 stopStream();
2538 }
2539
2540 // This function will be called by a spawned thread when the user
2541 // callback function signals that the stream should be stopped or
2542 // aborted. It is necessary to handle it this way because the
2543 // callbackEvent() function must return before the jack_deactivate()
2544 // function will return.
jackStopStream(void * ptr)2545 static void *jackStopStream( void *ptr )
2546 {
2547 CallbackInfo *info = (CallbackInfo *) ptr;
2548 RtApiJack *object = (RtApiJack *) info->object;
2549
2550 object->stopStream();
2551 pthread_exit( NULL );
2552 }
2553
callbackEvent(unsigned long nframes)2554 bool RtApiJack :: callbackEvent( unsigned long nframes )
2555 {
2556 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2557 if ( stream_.state == STREAM_CLOSED ) {
2558 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2559 error( RtAudioError::WARNING );
2560 return FAILURE;
2561 }
2562 if ( stream_.bufferSize != nframes ) {
2563 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2564 error( RtAudioError::WARNING );
2565 return FAILURE;
2566 }
2567
2568 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2569 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2570
2571 // Check if we were draining the stream and signal is finished.
2572 if ( handle->drainCounter > 3 ) {
2573 ThreadHandle threadId;
2574
2575 stream_.state = STREAM_STOPPING;
2576 if ( handle->internalDrain == true )
2577 pthread_create( &threadId, NULL, jackStopStream, info );
2578 else
2579 pthread_cond_signal( &handle->condition );
2580 return SUCCESS;
2581 }
2582
2583 // Invoke user callback first, to get fresh output data.
2584 if ( handle->drainCounter == 0 ) {
2585 RtAudioCallback callback = (RtAudioCallback) info->callback;
2586 double streamTime = getStreamTime();
2587 RtAudioStreamStatus status = 0;
2588 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2589 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2590 handle->xrun[0] = false;
2591 }
2592 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2593 status |= RTAUDIO_INPUT_OVERFLOW;
2594 handle->xrun[1] = false;
2595 }
2596 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2597 stream_.bufferSize, streamTime, status, info->userData );
2598 if ( cbReturnValue == 2 ) {
2599 stream_.state = STREAM_STOPPING;
2600 handle->drainCounter = 2;
2601 ThreadHandle id;
2602 pthread_create( &id, NULL, jackStopStream, info );
2603 return SUCCESS;
2604 }
2605 else if ( cbReturnValue == 1 ) {
2606 handle->drainCounter = 1;
2607 handle->internalDrain = true;
2608 }
2609 }
2610
2611 jack_default_audio_sample_t *jackbuffer;
2612 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2613 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2614
2615 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2616
2617 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2618 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2619 memset( jackbuffer, 0, bufferBytes );
2620 }
2621
2622 }
2623 else if ( stream_.doConvertBuffer[0] ) {
2624
2625 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2626
2627 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2628 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2629 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2630 }
2631 }
2632 else { // no buffer conversion
2633 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2636 }
2637 }
2638 }
2639
2640 // Don't bother draining input
2641 if ( handle->drainCounter ) {
2642 handle->drainCounter++;
2643 goto unlock;
2644 }
2645
2646 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2647
2648 if ( stream_.doConvertBuffer[1] ) {
2649 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2650 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2651 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2652 }
2653 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2654 }
2655 else { // no buffer conversion
2656 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2657 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2658 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2659 }
2660 }
2661 }
2662
2663 unlock:
2664 RtApi::tickStreamTime();
2665 return SUCCESS;
2666 }
2667 //******************** End of __UNIX_JACK__ *********************//
2668 #endif
2669
2670 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2671
2672 // The ASIO API is designed around a callback scheme, so this
2673 // implementation is similar to that used for OS-X CoreAudio and Linux
2674 // Jack. The primary constraint with ASIO is that it only allows
2675 // access to a single driver at a time. Thus, it is not possible to
2676 // have more than one simultaneous RtAudio stream.
2677 //
2678 // This implementation also requires a number of external ASIO files
2679 // and a few global variables. The ASIO callback scheme does not
2680 // allow for the passing of user data, so we must create a global
2681 // pointer to our callbackInfo structure.
2682 //
2683 // On unix systems, we make use of a pthread condition variable.
2684 // Since there is no equivalent in Windows, I hacked something based
2685 // on information found in
2686 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2687
2688 #include "asiosys.h"
2689 #include "asio.h"
2690 #include "iasiothiscallresolver.h"
2691 #include "asiodrivers.h"
2692 #include <cmath>
2693
2694 static AsioDrivers drivers;
2695 static ASIOCallbacks asioCallbacks;
2696 static ASIODriverInfo driverInfo;
2697 static CallbackInfo *asioCallbackInfo;
2698 static bool asioXRun;
2699
2700 struct AsioHandle {
2701 int drainCounter; // Tracks callback counts when draining
2702 bool internalDrain; // Indicates if stop is initiated from callback or not.
2703 ASIOBufferInfo *bufferInfos;
2704 HANDLE condition;
2705
AsioHandleAsioHandle2706 AsioHandle()
2707 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2708 };
2709
2710 // Function declarations (definitions at end of section)
2711 static const char* getAsioErrorString( ASIOError result );
2712 static void sampleRateChanged( ASIOSampleRate sRate );
2713 static long asioMessages( long selector, long value, void* message, double* opt );
2714
RtApiAsio()2715 RtApiAsio :: RtApiAsio()
2716 {
2717 // ASIO cannot run on a multi-threaded appartment. You can call
2718 // CoInitialize beforehand, but it must be for appartment threading
2719 // (in which case, CoInitilialize will return S_FALSE here).
2720 coInitialized_ = false;
2721 HRESULT hr = CoInitialize( NULL );
2722 if ( FAILED(hr) ) {
2723 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2724 error( RtAudioError::WARNING );
2725 }
2726 coInitialized_ = true;
2727
2728 drivers.removeCurrentDriver();
2729 driverInfo.asioVersion = 2;
2730
2731 // See note in DirectSound implementation about GetDesktopWindow().
2732 driverInfo.sysRef = GetForegroundWindow();
2733 }
2734
~RtApiAsio()2735 RtApiAsio :: ~RtApiAsio()
2736 {
2737 if ( stream_.state != STREAM_CLOSED ) closeStream();
2738 if ( coInitialized_ ) CoUninitialize();
2739 }
2740
getDeviceCount(void)2741 unsigned int RtApiAsio :: getDeviceCount( void )
2742 {
2743 return (unsigned int) drivers.asioGetNumDev();
2744 }
2745
getDeviceInfo(unsigned int device)2746 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2747 {
2748 RtAudio::DeviceInfo info;
2749 info.probed = false;
2750
2751 // Get device ID
2752 unsigned int nDevices = getDeviceCount();
2753 if ( nDevices == 0 ) {
2754 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2755 error( RtAudioError::INVALID_USE );
2756 return info;
2757 }
2758
2759 if ( device >= nDevices ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2761 error( RtAudioError::INVALID_USE );
2762 return info;
2763 }
2764
2765 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2766 if ( stream_.state != STREAM_CLOSED ) {
2767 if ( device >= devices_.size() ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2769 error( RtAudioError::WARNING );
2770 return info;
2771 }
2772 return devices_[ device ];
2773 }
2774
2775 char driverName[32];
2776 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2777 if ( result != ASE_OK ) {
2778 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2779 errorText_ = errorStream_.str();
2780 error( RtAudioError::WARNING );
2781 return info;
2782 }
2783
2784 info.name = driverName;
2785
2786 if ( !drivers.loadDriver( driverName ) ) {
2787 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2788 errorText_ = errorStream_.str();
2789 error( RtAudioError::WARNING );
2790 return info;
2791 }
2792
2793 result = ASIOInit( &driverInfo );
2794 if ( result != ASE_OK ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2798 return info;
2799 }
2800
2801 // Determine the device channel information.
2802 long inputChannels, outputChannels;
2803 result = ASIOGetChannels( &inputChannels, &outputChannels );
2804 if ( result != ASE_OK ) {
2805 drivers.removeCurrentDriver();
2806 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2807 errorText_ = errorStream_.str();
2808 error( RtAudioError::WARNING );
2809 return info;
2810 }
2811
2812 info.outputChannels = outputChannels;
2813 info.inputChannels = inputChannels;
2814 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2815 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2816
2817 // Determine the supported sample rates.
2818 info.sampleRates.clear();
2819 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2820 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2821 if ( result == ASE_OK ) {
2822 info.sampleRates.push_back( SAMPLE_RATES[i] );
2823
2824 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2825 info.preferredSampleRate = SAMPLE_RATES[i];
2826 }
2827 }
2828
2829 // Determine supported data types ... just check first channel and assume rest are the same.
2830 ASIOChannelInfo channelInfo;
2831 channelInfo.channel = 0;
2832 channelInfo.isInput = true;
2833 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2834 result = ASIOGetChannelInfo( &channelInfo );
2835 if ( result != ASE_OK ) {
2836 drivers.removeCurrentDriver();
2837 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2838 errorText_ = errorStream_.str();
2839 error( RtAudioError::WARNING );
2840 return info;
2841 }
2842
2843 info.nativeFormats = 0;
2844 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2845 info.nativeFormats |= RTAUDIO_SINT16;
2846 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2847 info.nativeFormats |= RTAUDIO_SINT32;
2848 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2849 info.nativeFormats |= RTAUDIO_FLOAT32;
2850 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2851 info.nativeFormats |= RTAUDIO_FLOAT64;
2852 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2853 info.nativeFormats |= RTAUDIO_SINT24;
2854
2855 if ( info.outputChannels > 0 )
2856 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2857 if ( info.inputChannels > 0 )
2858 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2859
2860 info.probed = true;
2861 drivers.removeCurrentDriver();
2862 return info;
2863 }
2864
bufferSwitch(long index,ASIOBool)2865 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2866 {
2867 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2868 object->callbackEvent( index );
2869 }
2870
saveDeviceInfo(void)2871 void RtApiAsio :: saveDeviceInfo( void )
2872 {
2873 devices_.clear();
2874
2875 unsigned int nDevices = getDeviceCount();
2876 devices_.resize( nDevices );
2877 for ( unsigned int i=0; i<nDevices; i++ )
2878 devices_[i] = getDeviceInfo( i );
2879 }
2880
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2881 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2882 unsigned int firstChannel, unsigned int sampleRate,
2883 RtAudioFormat format, unsigned int *bufferSize,
2884 RtAudio::StreamOptions *options )
2885 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2886
2887 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2888
2889 // For ASIO, a duplex stream MUST use the same driver.
2890 if ( isDuplexInput && stream_.device[0] != device ) {
2891 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2892 return FAILURE;
2893 }
2894
2895 char driverName[32];
2896 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2897 if ( result != ASE_OK ) {
2898 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2899 errorText_ = errorStream_.str();
2900 return FAILURE;
2901 }
2902
2903 // Only load the driver once for duplex stream.
2904 if ( !isDuplexInput ) {
2905 // The getDeviceInfo() function will not work when a stream is open
2906 // because ASIO does not allow multiple devices to run at the same
2907 // time. Thus, we'll probe the system before opening a stream and
2908 // save the results for use by getDeviceInfo().
2909 this->saveDeviceInfo();
2910
2911 if ( !drivers.loadDriver( driverName ) ) {
2912 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2913 errorText_ = errorStream_.str();
2914 return FAILURE;
2915 }
2916
2917 result = ASIOInit( &driverInfo );
2918 if ( result != ASE_OK ) {
2919 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2920 errorText_ = errorStream_.str();
2921 return FAILURE;
2922 }
2923 }
2924
2925 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2926 bool buffersAllocated = false;
2927 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2928 unsigned int nChannels;
2929
2930
2931 // Check the device channel count.
2932 long inputChannels, outputChannels;
2933 result = ASIOGetChannels( &inputChannels, &outputChannels );
2934 if ( result != ASE_OK ) {
2935 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2936 errorText_ = errorStream_.str();
2937 goto error;
2938 }
2939
2940 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2941 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2942 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2943 errorText_ = errorStream_.str();
2944 goto error;
2945 }
2946 stream_.nDeviceChannels[mode] = channels;
2947 stream_.nUserChannels[mode] = channels;
2948 stream_.channelOffset[mode] = firstChannel;
2949
2950 // Verify the sample rate is supported.
2951 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2952 if ( result != ASE_OK ) {
2953 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2954 errorText_ = errorStream_.str();
2955 goto error;
2956 }
2957
2958 // Get the current sample rate
2959 ASIOSampleRate currentRate;
2960 result = ASIOGetSampleRate( ¤tRate );
2961 if ( result != ASE_OK ) {
2962 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2963 errorText_ = errorStream_.str();
2964 goto error;
2965 }
2966
2967 // Set the sample rate only if necessary
2968 if ( currentRate != sampleRate ) {
2969 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2970 if ( result != ASE_OK ) {
2971 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2972 errorText_ = errorStream_.str();
2973 goto error;
2974 }
2975 }
2976
2977 // Determine the driver data type.
2978 ASIOChannelInfo channelInfo;
2979 channelInfo.channel = 0;
2980 if ( mode == OUTPUT ) channelInfo.isInput = false;
2981 else channelInfo.isInput = true;
2982 result = ASIOGetChannelInfo( &channelInfo );
2983 if ( result != ASE_OK ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2985 errorText_ = errorStream_.str();
2986 goto error;
2987 }
2988
2989 // Assuming WINDOWS host is always little-endian.
2990 stream_.doByteSwap[mode] = false;
2991 stream_.userFormat = format;
2992 stream_.deviceFormat[mode] = 0;
2993 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
2994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
2995 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
2996 }
2997 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
2998 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
2999 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3000 }
3001 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3003 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3004 }
3005 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3007 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3008 }
3009 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3011 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3012 }
3013
3014 if ( stream_.deviceFormat[mode] == 0 ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3016 errorText_ = errorStream_.str();
3017 goto error;
3018 }
3019
3020 // Set the buffer size. For a duplex stream, this will end up
3021 // setting the buffer size based on the input constraints, which
3022 // should be ok.
3023 long minSize, maxSize, preferSize, granularity;
3024 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3025 if ( result != ASE_OK ) {
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3027 errorText_ = errorStream_.str();
3028 goto error;
3029 }
3030
3031 if ( isDuplexInput ) {
3032 // When this is the duplex input (output was opened before), then we have to use the same
3033 // buffersize as the output, because it might use the preferred buffer size, which most
3034 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3035 // So instead of throwing an error, make them equal. The caller uses the reference
3036 // to the "bufferSize" param as usual to set up processing buffers.
3037
3038 *bufferSize = stream_.bufferSize;
3039
3040 } else {
3041 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3042 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3043 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3044 else if ( granularity == -1 ) {
3045 // Make sure bufferSize is a power of two.
3046 int log2_of_min_size = 0;
3047 int log2_of_max_size = 0;
3048
3049 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3050 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3051 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3052 }
3053
3054 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3055 int min_delta_num = log2_of_min_size;
3056
3057 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3058 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3059 if (current_delta < min_delta) {
3060 min_delta = current_delta;
3061 min_delta_num = i;
3062 }
3063 }
3064
3065 *bufferSize = ( (unsigned int)1 << min_delta_num );
3066 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3067 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3068 }
3069 else if ( granularity != 0 ) {
3070 // Set to an even multiple of granularity, rounding up.
3071 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3072 }
3073 }
3074
3075 /*
3076 // we don't use it anymore, see above!
3077 // Just left it here for the case...
3078 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3079 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3080 goto error;
3081 }
3082 */
3083
3084 stream_.bufferSize = *bufferSize;
3085 stream_.nBuffers = 2;
3086
3087 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3088 else stream_.userInterleaved = true;
3089
3090 // ASIO always uses non-interleaved buffers.
3091 stream_.deviceInterleaved[mode] = false;
3092
3093 // Allocate, if necessary, our AsioHandle structure for the stream.
3094 if ( handle == 0 ) {
3095 try {
3096 handle = new AsioHandle;
3097 }
3098 catch ( std::bad_alloc& ) {
3099 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3100 goto error;
3101 }
3102 handle->bufferInfos = 0;
3103
3104 // Create a manual-reset event.
3105 handle->condition = CreateEvent( NULL, // no security
3106 TRUE, // manual-reset
3107 FALSE, // non-signaled initially
3108 NULL ); // unnamed
3109 stream_.apiHandle = (void *) handle;
3110 }
3111
3112 // Create the ASIO internal buffers. Since RtAudio sets up input
3113 // and output separately, we'll have to dispose of previously
3114 // created output buffers for a duplex stream.
3115 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3116 ASIODisposeBuffers();
3117 if ( handle->bufferInfos ) free( handle->bufferInfos );
3118 }
3119
3120 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3121 unsigned int i;
3122 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3123 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3124 if ( handle->bufferInfos == NULL ) {
3125 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3126 errorText_ = errorStream_.str();
3127 goto error;
3128 }
3129
3130 ASIOBufferInfo *infos;
3131 infos = handle->bufferInfos;
3132 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3133 infos->isInput = ASIOFalse;
3134 infos->channelNum = i + stream_.channelOffset[0];
3135 infos->buffers[0] = infos->buffers[1] = 0;
3136 }
3137 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3138 infos->isInput = ASIOTrue;
3139 infos->channelNum = i + stream_.channelOffset[1];
3140 infos->buffers[0] = infos->buffers[1] = 0;
3141 }
3142
3143 // prepare for callbacks
3144 stream_.sampleRate = sampleRate;
3145 stream_.device[mode] = device;
3146 stream_.mode = isDuplexInput ? DUPLEX : mode;
3147
3148 // store this class instance before registering callbacks, that are going to use it
3149 asioCallbackInfo = &stream_.callbackInfo;
3150 stream_.callbackInfo.object = (void *) this;
3151
3152 // Set up the ASIO callback structure and create the ASIO data buffers.
3153 asioCallbacks.bufferSwitch = &bufferSwitch;
3154 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3155 asioCallbacks.asioMessage = &asioMessages;
3156 asioCallbacks.bufferSwitchTimeInfo = NULL;
3157 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3158 if ( result != ASE_OK ) {
3159 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3160 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3161 // in that case, let's be naïve and try that instead
3162 *bufferSize = preferSize;
3163 stream_.bufferSize = *bufferSize;
3164 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3165 }
3166
3167 if ( result != ASE_OK ) {
3168 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3169 errorText_ = errorStream_.str();
3170 goto error;
3171 }
3172 buffersAllocated = true;
3173 stream_.state = STREAM_STOPPED;
3174
3175 // Set flags for buffer conversion.
3176 stream_.doConvertBuffer[mode] = false;
3177 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3178 stream_.doConvertBuffer[mode] = true;
3179 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3180 stream_.nUserChannels[mode] > 1 )
3181 stream_.doConvertBuffer[mode] = true;
3182
3183 // Allocate necessary internal buffers
3184 unsigned long bufferBytes;
3185 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3186 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3187 if ( stream_.userBuffer[mode] == NULL ) {
3188 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3189 goto error;
3190 }
3191
3192 if ( stream_.doConvertBuffer[mode] ) {
3193
3194 bool makeBuffer = true;
3195 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3196 if ( isDuplexInput && stream_.deviceBuffer ) {
3197 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3198 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3199 }
3200
3201 if ( makeBuffer ) {
3202 bufferBytes *= *bufferSize;
3203 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3204 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3205 if ( stream_.deviceBuffer == NULL ) {
3206 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3207 goto error;
3208 }
3209 }
3210 }
3211
3212 // Determine device latencies
3213 long inputLatency, outputLatency;
3214 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3215 if ( result != ASE_OK ) {
3216 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3217 errorText_ = errorStream_.str();
3218 error( RtAudioError::WARNING); // warn but don't fail
3219 }
3220 else {
3221 stream_.latency[0] = outputLatency;
3222 stream_.latency[1] = inputLatency;
3223 }
3224
3225 // Setup the buffer conversion information structure. We don't use
3226 // buffers to do channel offsets, so we override that parameter
3227 // here.
3228 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3229
3230 return SUCCESS;
3231
3232 error:
3233 if ( !isDuplexInput ) {
3234 // the cleanup for error in the duplex input, is done by RtApi::openStream
3235 // So we clean up for single channel only
3236
3237 if ( buffersAllocated )
3238 ASIODisposeBuffers();
3239
3240 drivers.removeCurrentDriver();
3241
3242 if ( handle ) {
3243 CloseHandle( handle->condition );
3244 if ( handle->bufferInfos )
3245 free( handle->bufferInfos );
3246
3247 delete handle;
3248 stream_.apiHandle = 0;
3249 }
3250
3251
3252 if ( stream_.userBuffer[mode] ) {
3253 free( stream_.userBuffer[mode] );
3254 stream_.userBuffer[mode] = 0;
3255 }
3256
3257 if ( stream_.deviceBuffer ) {
3258 free( stream_.deviceBuffer );
3259 stream_.deviceBuffer = 0;
3260 }
3261 }
3262
3263 return FAILURE;
3264 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3265
closeStream()3266 void RtApiAsio :: closeStream()
3267 {
3268 if ( stream_.state == STREAM_CLOSED ) {
3269 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3270 error( RtAudioError::WARNING );
3271 return;
3272 }
3273
3274 if ( stream_.state == STREAM_RUNNING ) {
3275 stream_.state = STREAM_STOPPED;
3276 ASIOStop();
3277 }
3278 ASIODisposeBuffers();
3279 drivers.removeCurrentDriver();
3280
3281 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3282 if ( handle ) {
3283 CloseHandle( handle->condition );
3284 if ( handle->bufferInfos )
3285 free( handle->bufferInfos );
3286 delete handle;
3287 stream_.apiHandle = 0;
3288 }
3289
3290 for ( int i=0; i<2; i++ ) {
3291 if ( stream_.userBuffer[i] ) {
3292 free( stream_.userBuffer[i] );
3293 stream_.userBuffer[i] = 0;
3294 }
3295 }
3296
3297 if ( stream_.deviceBuffer ) {
3298 free( stream_.deviceBuffer );
3299 stream_.deviceBuffer = 0;
3300 }
3301
3302 stream_.mode = UNINITIALIZED;
3303 stream_.state = STREAM_CLOSED;
3304 }
3305
3306 bool stopThreadCalled = false;
3307
startStream()3308 void RtApiAsio :: startStream()
3309 {
3310 verifyStream();
3311 if ( stream_.state == STREAM_RUNNING ) {
3312 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3313 error( RtAudioError::WARNING );
3314 return;
3315 }
3316
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3318 ASIOError result = ASIOStart();
3319 if ( result != ASE_OK ) {
3320 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3321 errorText_ = errorStream_.str();
3322 goto unlock;
3323 }
3324
3325 handle->drainCounter = 0;
3326 handle->internalDrain = false;
3327 ResetEvent( handle->condition );
3328 stream_.state = STREAM_RUNNING;
3329 asioXRun = false;
3330
3331 unlock:
3332 stopThreadCalled = false;
3333
3334 if ( result == ASE_OK ) return;
3335 error( RtAudioError::SYSTEM_ERROR );
3336 }
3337
stopStream()3338 void RtApiAsio :: stopStream()
3339 {
3340 verifyStream();
3341 if ( stream_.state == STREAM_STOPPED ) {
3342 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3343 error( RtAudioError::WARNING );
3344 return;
3345 }
3346
3347 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3348 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3349 if ( handle->drainCounter == 0 ) {
3350 handle->drainCounter = 2;
3351 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3352 }
3353 }
3354
3355 stream_.state = STREAM_STOPPED;
3356
3357 ASIOError result = ASIOStop();
3358 if ( result != ASE_OK ) {
3359 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3360 errorText_ = errorStream_.str();
3361 }
3362
3363 if ( result == ASE_OK ) return;
3364 error( RtAudioError::SYSTEM_ERROR );
3365 }
3366
abortStream()3367 void RtApiAsio :: abortStream()
3368 {
3369 verifyStream();
3370 if ( stream_.state == STREAM_STOPPED ) {
3371 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3372 error( RtAudioError::WARNING );
3373 return;
3374 }
3375
3376 // The following lines were commented-out because some behavior was
3377 // noted where the device buffers need to be zeroed to avoid
3378 // continuing sound, even when the device buffers are completely
3379 // disposed. So now, calling abort is the same as calling stop.
3380 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3381 // handle->drainCounter = 2;
3382 stopStream();
3383 }
3384
3385 // This function will be called by a spawned thread when the user
3386 // callback function signals that the stream should be stopped or
3387 // aborted. It is necessary to handle it this way because the
3388 // callbackEvent() function must return before the ASIOStop()
3389 // function will return.
asioStopStream(void * ptr)3390 static unsigned __stdcall asioStopStream( void *ptr )
3391 {
3392 CallbackInfo *info = (CallbackInfo *) ptr;
3393 RtApiAsio *object = (RtApiAsio *) info->object;
3394
3395 object->stopStream();
3396 _endthreadex( 0 );
3397 return 0;
3398 }
3399
callbackEvent(long bufferIndex)3400 bool RtApiAsio :: callbackEvent( long bufferIndex )
3401 {
3402 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3403 if ( stream_.state == STREAM_CLOSED ) {
3404 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3405 error( RtAudioError::WARNING );
3406 return FAILURE;
3407 }
3408
3409 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3410 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3411
3412 // Check if we were draining the stream and signal if finished.
3413 if ( handle->drainCounter > 3 ) {
3414
3415 stream_.state = STREAM_STOPPING;
3416 if ( handle->internalDrain == false )
3417 SetEvent( handle->condition );
3418 else { // spawn a thread to stop the stream
3419 unsigned threadId;
3420 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3421 &stream_.callbackInfo, 0, &threadId );
3422 }
3423 return SUCCESS;
3424 }
3425
3426 // Invoke user callback to get fresh output data UNLESS we are
3427 // draining stream.
3428 if ( handle->drainCounter == 0 ) {
3429 RtAudioCallback callback = (RtAudioCallback) info->callback;
3430 double streamTime = getStreamTime();
3431 RtAudioStreamStatus status = 0;
3432 if ( stream_.mode != INPUT && asioXRun == true ) {
3433 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3434 asioXRun = false;
3435 }
3436 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3437 status |= RTAUDIO_INPUT_OVERFLOW;
3438 asioXRun = false;
3439 }
3440 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3441 stream_.bufferSize, streamTime, status, info->userData );
3442 if ( cbReturnValue == 2 ) {
3443 stream_.state = STREAM_STOPPING;
3444 handle->drainCounter = 2;
3445 unsigned threadId;
3446 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3447 &stream_.callbackInfo, 0, &threadId );
3448 return SUCCESS;
3449 }
3450 else if ( cbReturnValue == 1 ) {
3451 handle->drainCounter = 1;
3452 handle->internalDrain = true;
3453 }
3454 }
3455
3456 unsigned int nChannels, bufferBytes, i, j;
3457 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3458 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3459
3460 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3461
3462 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3463
3464 for ( i=0, j=0; i<nChannels; i++ ) {
3465 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3466 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3467 }
3468
3469 }
3470 else if ( stream_.doConvertBuffer[0] ) {
3471
3472 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3473 if ( stream_.doByteSwap[0] )
3474 byteSwapBuffer( stream_.deviceBuffer,
3475 stream_.bufferSize * stream_.nDeviceChannels[0],
3476 stream_.deviceFormat[0] );
3477
3478 for ( i=0, j=0; i<nChannels; i++ ) {
3479 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3480 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3481 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3482 }
3483
3484 }
3485 else {
3486
3487 if ( stream_.doByteSwap[0] )
3488 byteSwapBuffer( stream_.userBuffer[0],
3489 stream_.bufferSize * stream_.nUserChannels[0],
3490 stream_.userFormat );
3491
3492 for ( i=0, j=0; i<nChannels; i++ ) {
3493 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3494 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3495 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3496 }
3497
3498 }
3499 }
3500
3501 // Don't bother draining input
3502 if ( handle->drainCounter ) {
3503 handle->drainCounter++;
3504 goto unlock;
3505 }
3506
3507 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3508
3509 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3510
3511 if (stream_.doConvertBuffer[1]) {
3512
3513 // Always interleave ASIO input data.
3514 for ( i=0, j=0; i<nChannels; i++ ) {
3515 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3516 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3517 handle->bufferInfos[i].buffers[bufferIndex],
3518 bufferBytes );
3519 }
3520
3521 if ( stream_.doByteSwap[1] )
3522 byteSwapBuffer( stream_.deviceBuffer,
3523 stream_.bufferSize * stream_.nDeviceChannels[1],
3524 stream_.deviceFormat[1] );
3525 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3526
3527 }
3528 else {
3529 for ( i=0, j=0; i<nChannels; i++ ) {
3530 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3531 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3532 handle->bufferInfos[i].buffers[bufferIndex],
3533 bufferBytes );
3534 }
3535 }
3536
3537 if ( stream_.doByteSwap[1] )
3538 byteSwapBuffer( stream_.userBuffer[1],
3539 stream_.bufferSize * stream_.nUserChannels[1],
3540 stream_.userFormat );
3541 }
3542 }
3543
3544 unlock:
3545 // The following call was suggested by Malte Clasen. While the API
3546 // documentation indicates it should not be required, some device
3547 // drivers apparently do not function correctly without it.
3548 ASIOOutputReady();
3549
3550 RtApi::tickStreamTime();
3551 return SUCCESS;
3552 }
3553
sampleRateChanged(ASIOSampleRate sRate)3554 static void sampleRateChanged( ASIOSampleRate sRate )
3555 {
3556 // The ASIO documentation says that this usually only happens during
3557 // external sync. Audio processing is not stopped by the driver,
3558 // actual sample rate might not have even changed, maybe only the
3559 // sample rate status of an AES/EBU or S/PDIF digital input at the
3560 // audio device.
3561
3562 RtApi *object = (RtApi *) asioCallbackInfo->object;
3563 try {
3564 object->stopStream();
3565 }
3566 catch ( RtAudioError &exception ) {
3567 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3568 return;
3569 }
3570
3571 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3572 }
3573
asioMessages(long selector,long value,void *,double *)3574 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3575 {
3576 long ret = 0;
3577
3578 switch( selector ) {
3579 case kAsioSelectorSupported:
3580 if ( value == kAsioResetRequest
3581 || value == kAsioEngineVersion
3582 || value == kAsioResyncRequest
3583 || value == kAsioLatenciesChanged
3584 // The following three were added for ASIO 2.0, you don't
3585 // necessarily have to support them.
3586 || value == kAsioSupportsTimeInfo
3587 || value == kAsioSupportsTimeCode
3588 || value == kAsioSupportsInputMonitor)
3589 ret = 1L;
3590 break;
3591 case kAsioResetRequest:
3592 // Defer the task and perform the reset of the driver during the
3593 // next "safe" situation. You cannot reset the driver right now,
3594 // as this code is called from the driver. Reset the driver is
3595 // done by completely destruct is. I.e. ASIOStop(),
3596 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3597 // driver again.
3598 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3599 ret = 1L;
3600 break;
3601 case kAsioResyncRequest:
3602 // This informs the application that the driver encountered some
3603 // non-fatal data loss. It is used for synchronization purposes
3604 // of different media. Added mainly to work around the Win16Mutex
3605 // problems in Windows 95/98 with the Windows Multimedia system,
3606 // which could lose data because the Mutex was held too long by
3607 // another thread. However a driver can issue it in other
3608 // situations, too.
3609 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3610 asioXRun = true;
3611 ret = 1L;
3612 break;
3613 case kAsioLatenciesChanged:
3614 // This will inform the host application that the drivers were
3615 // latencies changed. Beware, it this does not mean that the
3616 // buffer sizes have changed! You might need to update internal
3617 // delay data.
3618 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3619 ret = 1L;
3620 break;
3621 case kAsioEngineVersion:
3622 // Return the supported ASIO version of the host application. If
3623 // a host application does not implement this selector, ASIO 1.0
3624 // is assumed by the driver.
3625 ret = 2L;
3626 break;
3627 case kAsioSupportsTimeInfo:
3628 // Informs the driver whether the
3629 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3630 // For compatibility with ASIO 1.0 drivers the host application
3631 // should always support the "old" bufferSwitch method, too.
3632 ret = 0;
3633 break;
3634 case kAsioSupportsTimeCode:
3635 // Informs the driver whether application is interested in time
3636 // code info. If an application does not need to know about time
3637 // code, the driver has less work to do.
3638 ret = 0;
3639 break;
3640 }
3641 return ret;
3642 }
3643
getAsioErrorString(ASIOError result)3644 static const char* getAsioErrorString( ASIOError result )
3645 {
3646 struct Messages
3647 {
3648 ASIOError value;
3649 const char*message;
3650 };
3651
3652 static const Messages m[] =
3653 {
3654 { ASE_NotPresent, "Hardware input or output is not present or available." },
3655 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3656 { ASE_InvalidParameter, "Invalid input parameter." },
3657 { ASE_InvalidMode, "Invalid mode." },
3658 { ASE_SPNotAdvancing, "Sample position not advancing." },
3659 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3660 { ASE_NoMemory, "Not enough memory to complete the request." }
3661 };
3662
3663 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3664 if ( m[i].value == result ) return m[i].message;
3665
3666 return "Unknown error.";
3667 }
3668
3669 //******************** End of __WINDOWS_ASIO__ *********************//
3670 #endif
3671
3672
3673 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3674
3675 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3676 // - Introduces support for the Windows WASAPI API
3677 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3678 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3679 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3680
3681 #ifndef INITGUID
3682 #define INITGUID
3683 #endif
3684 #include <audioclient.h>
3685 #include <avrt.h>
3686 #include <mmdeviceapi.h>
3687 #include <functiondiscoverykeys_devpkey.h>
3688
3689 //=============================================================================
3690
3691 #define SAFE_RELEASE( objectPtr )\
3692 if ( objectPtr )\
3693 {\
3694 objectPtr->Release();\
3695 objectPtr = NULL;\
3696 }
3697
3698 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3699
3700 //-----------------------------------------------------------------------------
3701
3702 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3703 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3704 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3705 // provide intermediate storage for read / write synchronization.
3706 class WasapiBuffer
3707 {
3708 public:
WasapiBuffer()3709 WasapiBuffer()
3710 : buffer_( NULL ),
3711 bufferSize_( 0 ),
3712 inIndex_( 0 ),
3713 outIndex_( 0 ) {}
3714
~WasapiBuffer()3715 ~WasapiBuffer() {
3716 free( buffer_ );
3717 }
3718
3719 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3720 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3721 free( buffer_ );
3722
3723 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3724
3725 bufferSize_ = bufferSize;
3726 inIndex_ = 0;
3727 outIndex_ = 0;
3728 }
3729
3730 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3731 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3732 {
3733 if ( !buffer || // incoming buffer is NULL
3734 bufferSize == 0 || // incoming buffer has no data
3735 bufferSize > bufferSize_ ) // incoming buffer too large
3736 {
3737 return false;
3738 }
3739
3740 unsigned int relOutIndex = outIndex_;
3741 unsigned int inIndexEnd = inIndex_ + bufferSize;
3742 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3743 relOutIndex += bufferSize_;
3744 }
3745
3746 // "in" index can end on the "out" index but cannot begin at it
3747 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3748 return false; // not enough space between "in" index and "out" index
3749 }
3750
3751 // copy buffer from external to internal
3752 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3753 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3754 int fromInSize = bufferSize - fromZeroSize;
3755
3756 switch( format )
3757 {
3758 case RTAUDIO_SINT8:
3759 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3760 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3761 break;
3762 case RTAUDIO_SINT16:
3763 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3764 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3765 break;
3766 case RTAUDIO_SINT24:
3767 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3768 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3769 break;
3770 case RTAUDIO_SINT32:
3771 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3772 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3773 break;
3774 case RTAUDIO_FLOAT32:
3775 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3776 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3777 break;
3778 case RTAUDIO_FLOAT64:
3779 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3780 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3781 break;
3782 }
3783
3784 // update "in" index
3785 inIndex_ += bufferSize;
3786 inIndex_ %= bufferSize_;
3787
3788 return true;
3789 }
3790
3791 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3792 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3793 {
3794 if ( !buffer || // incoming buffer is NULL
3795 bufferSize == 0 || // incoming buffer has no data
3796 bufferSize > bufferSize_ ) // incoming buffer too large
3797 {
3798 return false;
3799 }
3800
3801 unsigned int relInIndex = inIndex_;
3802 unsigned int outIndexEnd = outIndex_ + bufferSize;
3803 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3804 relInIndex += bufferSize_;
3805 }
3806
3807 // "out" index can begin at and end on the "in" index
3808 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3809 return false; // not enough space between "out" index and "in" index
3810 }
3811
3812 // copy buffer from internal to external
3813 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3814 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3815 int fromOutSize = bufferSize - fromZeroSize;
3816
3817 switch( format )
3818 {
3819 case RTAUDIO_SINT8:
3820 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3821 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3822 break;
3823 case RTAUDIO_SINT16:
3824 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3825 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3826 break;
3827 case RTAUDIO_SINT24:
3828 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3829 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3830 break;
3831 case RTAUDIO_SINT32:
3832 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3833 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3834 break;
3835 case RTAUDIO_FLOAT32:
3836 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3837 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3838 break;
3839 case RTAUDIO_FLOAT64:
3840 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3841 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3842 break;
3843 }
3844
3845 // update "out" index
3846 outIndex_ += bufferSize;
3847 outIndex_ %= bufferSize_;
3848
3849 return true;
3850 }
3851
3852 private:
3853 char* buffer_;
3854 unsigned int bufferSize_;
3855 unsigned int inIndex_;
3856 unsigned int outIndex_;
3857 };
3858
3859 //-----------------------------------------------------------------------------
3860
3861 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3862 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3863 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3864 // This sample rate converter favors speed over quality, and works best with conversions between
3865 // one rate and its multiple.
convertBufferWasapi(char * outBuffer,const char * inBuffer,const unsigned int & channelCount,const unsigned int & inSampleRate,const unsigned int & outSampleRate,const unsigned int & inSampleCount,unsigned int & outSampleCount,const RtAudioFormat & format)3866 void convertBufferWasapi( char* outBuffer,
3867 const char* inBuffer,
3868 const unsigned int& channelCount,
3869 const unsigned int& inSampleRate,
3870 const unsigned int& outSampleRate,
3871 const unsigned int& inSampleCount,
3872 unsigned int& outSampleCount,
3873 const RtAudioFormat& format )
3874 {
3875 // calculate the new outSampleCount and relative sampleStep
3876 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3877 float sampleStep = 1.0f / sampleRatio;
3878 float inSampleFraction = 0.0f;
3879
3880 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
3881
3882 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3883 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3884 {
3885 unsigned int inSample = ( unsigned int ) inSampleFraction;
3886
3887 switch ( format )
3888 {
3889 case RTAUDIO_SINT8:
3890 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3891 break;
3892 case RTAUDIO_SINT16:
3893 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3894 break;
3895 case RTAUDIO_SINT24:
3896 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3897 break;
3898 case RTAUDIO_SINT32:
3899 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3900 break;
3901 case RTAUDIO_FLOAT32:
3902 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3903 break;
3904 case RTAUDIO_FLOAT64:
3905 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3906 break;
3907 }
3908
3909 // jump to next in sample
3910 inSampleFraction += sampleStep;
3911 }
3912 }
3913
3914 //-----------------------------------------------------------------------------
3915
3916 // A structure to hold various information related to the WASAPI implementation.
3917 struct WasapiHandle
3918 {
3919 IAudioClient* captureAudioClient;
3920 IAudioClient* renderAudioClient;
3921 IAudioCaptureClient* captureClient;
3922 IAudioRenderClient* renderClient;
3923 HANDLE captureEvent;
3924 HANDLE renderEvent;
3925
WasapiHandleWasapiHandle3926 WasapiHandle()
3927 : captureAudioClient( NULL ),
3928 renderAudioClient( NULL ),
3929 captureClient( NULL ),
3930 renderClient( NULL ),
3931 captureEvent( NULL ),
3932 renderEvent( NULL ) {}
3933 };
3934
3935 //=============================================================================
3936
RtApiWasapi()3937 RtApiWasapi::RtApiWasapi()
3938 : coInitialized_( false ), deviceEnumerator_( NULL )
3939 {
3940 // WASAPI can run either apartment or multi-threaded
3941 HRESULT hr = CoInitialize( NULL );
3942 if ( !FAILED( hr ) )
3943 coInitialized_ = true;
3944
3945 // Instantiate device enumerator
3946 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3947 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3948 ( void** ) &deviceEnumerator_ );
3949
3950 if ( FAILED( hr ) ) {
3951 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3952 error( RtAudioError::DRIVER_ERROR );
3953 }
3954 }
3955
3956 //-----------------------------------------------------------------------------
3957
~RtApiWasapi()3958 RtApiWasapi::~RtApiWasapi()
3959 {
3960 if ( stream_.state != STREAM_CLOSED )
3961 closeStream();
3962
3963 SAFE_RELEASE( deviceEnumerator_ );
3964
3965 // If this object previously called CoInitialize()
3966 if ( coInitialized_ )
3967 CoUninitialize();
3968 }
3969
3970 //=============================================================================
3971
getDeviceCount(void)3972 unsigned int RtApiWasapi::getDeviceCount( void )
3973 {
3974 unsigned int captureDeviceCount = 0;
3975 unsigned int renderDeviceCount = 0;
3976
3977 IMMDeviceCollection* captureDevices = NULL;
3978 IMMDeviceCollection* renderDevices = NULL;
3979
3980 // Count capture devices
3981 errorText_.clear();
3982 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3983 if ( FAILED( hr ) ) {
3984 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3985 goto Exit;
3986 }
3987
3988 hr = captureDevices->GetCount( &captureDeviceCount );
3989 if ( FAILED( hr ) ) {
3990 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3991 goto Exit;
3992 }
3993
3994 // Count render devices
3995 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3996 if ( FAILED( hr ) ) {
3997 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3998 goto Exit;
3999 }
4000
4001 hr = renderDevices->GetCount( &renderDeviceCount );
4002 if ( FAILED( hr ) ) {
4003 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4004 goto Exit;
4005 }
4006
4007 Exit:
4008 // release all references
4009 SAFE_RELEASE( captureDevices );
4010 SAFE_RELEASE( renderDevices );
4011
4012 if ( errorText_.empty() )
4013 return captureDeviceCount + renderDeviceCount;
4014
4015 error( RtAudioError::DRIVER_ERROR );
4016 return 0;
4017 }
4018
4019 //-----------------------------------------------------------------------------
4020
getDeviceInfo(unsigned int device)4021 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4022 {
4023 RtAudio::DeviceInfo info;
4024 unsigned int captureDeviceCount = 0;
4025 unsigned int renderDeviceCount = 0;
4026 std::string defaultDeviceName;
4027 bool isCaptureDevice = false;
4028
4029 PROPVARIANT deviceNameProp;
4030 PROPVARIANT defaultDeviceNameProp;
4031
4032 IMMDeviceCollection* captureDevices = NULL;
4033 IMMDeviceCollection* renderDevices = NULL;
4034 IMMDevice* devicePtr = NULL;
4035 IMMDevice* defaultDevicePtr = NULL;
4036 IAudioClient* audioClient = NULL;
4037 IPropertyStore* devicePropStore = NULL;
4038 IPropertyStore* defaultDevicePropStore = NULL;
4039
4040 WAVEFORMATEX* deviceFormat = NULL;
4041 WAVEFORMATEX* closestMatchFormat = NULL;
4042
4043 // probed
4044 info.probed = false;
4045
4046 // Count capture devices
4047 errorText_.clear();
4048 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4049 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4050 if ( FAILED( hr ) ) {
4051 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4052 goto Exit;
4053 }
4054
4055 hr = captureDevices->GetCount( &captureDeviceCount );
4056 if ( FAILED( hr ) ) {
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4058 goto Exit;
4059 }
4060
4061 // Count render devices
4062 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4063 if ( FAILED( hr ) ) {
4064 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4065 goto Exit;
4066 }
4067
4068 hr = renderDevices->GetCount( &renderDeviceCount );
4069 if ( FAILED( hr ) ) {
4070 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4071 goto Exit;
4072 }
4073
4074 // validate device index
4075 if ( device >= captureDeviceCount + renderDeviceCount ) {
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4077 errorType = RtAudioError::INVALID_USE;
4078 goto Exit;
4079 }
4080
4081 // determine whether index falls within capture or render devices
4082 if ( device >= renderDeviceCount ) {
4083 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4084 if ( FAILED( hr ) ) {
4085 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4086 goto Exit;
4087 }
4088 isCaptureDevice = true;
4089 }
4090 else {
4091 hr = renderDevices->Item( device, &devicePtr );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4094 goto Exit;
4095 }
4096 isCaptureDevice = false;
4097 }
4098
4099 // get default device name
4100 if ( isCaptureDevice ) {
4101 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4102 if ( FAILED( hr ) ) {
4103 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4104 goto Exit;
4105 }
4106 }
4107 else {
4108 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4109 if ( FAILED( hr ) ) {
4110 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4111 goto Exit;
4112 }
4113 }
4114
4115 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4116 if ( FAILED( hr ) ) {
4117 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4118 goto Exit;
4119 }
4120 PropVariantInit( &defaultDeviceNameProp );
4121
4122 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4123 if ( FAILED( hr ) ) {
4124 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4125 goto Exit;
4126 }
4127
4128 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4129
4130 // name
4131 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4132 if ( FAILED( hr ) ) {
4133 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4134 goto Exit;
4135 }
4136
4137 PropVariantInit( &deviceNameProp );
4138
4139 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4140 if ( FAILED( hr ) ) {
4141 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4142 goto Exit;
4143 }
4144
4145 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4146
4147 // is default
4148 if ( isCaptureDevice ) {
4149 info.isDefaultInput = info.name == defaultDeviceName;
4150 info.isDefaultOutput = false;
4151 }
4152 else {
4153 info.isDefaultInput = false;
4154 info.isDefaultOutput = info.name == defaultDeviceName;
4155 }
4156
4157 // channel count
4158 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4159 if ( FAILED( hr ) ) {
4160 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4161 goto Exit;
4162 }
4163
4164 hr = audioClient->GetMixFormat( &deviceFormat );
4165 if ( FAILED( hr ) ) {
4166 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4167 goto Exit;
4168 }
4169
4170 if ( isCaptureDevice ) {
4171 info.inputChannels = deviceFormat->nChannels;
4172 info.outputChannels = 0;
4173 info.duplexChannels = 0;
4174 }
4175 else {
4176 info.inputChannels = 0;
4177 info.outputChannels = deviceFormat->nChannels;
4178 info.duplexChannels = 0;
4179 }
4180
4181 // sample rates
4182 info.sampleRates.clear();
4183
4184 // allow support for all sample rates as we have a built-in sample rate converter
4185 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4186 info.sampleRates.push_back( SAMPLE_RATES[i] );
4187 }
4188 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4189
4190 // native format
4191 info.nativeFormats = 0;
4192
4193 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4194 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4195 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4196 {
4197 if ( deviceFormat->wBitsPerSample == 32 ) {
4198 info.nativeFormats |= RTAUDIO_FLOAT32;
4199 }
4200 else if ( deviceFormat->wBitsPerSample == 64 ) {
4201 info.nativeFormats |= RTAUDIO_FLOAT64;
4202 }
4203 }
4204 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4205 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4206 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4207 {
4208 if ( deviceFormat->wBitsPerSample == 8 ) {
4209 info.nativeFormats |= RTAUDIO_SINT8;
4210 }
4211 else if ( deviceFormat->wBitsPerSample == 16 ) {
4212 info.nativeFormats |= RTAUDIO_SINT16;
4213 }
4214 else if ( deviceFormat->wBitsPerSample == 24 ) {
4215 info.nativeFormats |= RTAUDIO_SINT24;
4216 }
4217 else if ( deviceFormat->wBitsPerSample == 32 ) {
4218 info.nativeFormats |= RTAUDIO_SINT32;
4219 }
4220 }
4221
4222 // probed
4223 info.probed = true;
4224
4225 Exit:
4226 // release all references
4227 PropVariantClear( &deviceNameProp );
4228 PropVariantClear( &defaultDeviceNameProp );
4229
4230 SAFE_RELEASE( captureDevices );
4231 SAFE_RELEASE( renderDevices );
4232 SAFE_RELEASE( devicePtr );
4233 SAFE_RELEASE( defaultDevicePtr );
4234 SAFE_RELEASE( audioClient );
4235 SAFE_RELEASE( devicePropStore );
4236 SAFE_RELEASE( defaultDevicePropStore );
4237
4238 CoTaskMemFree( deviceFormat );
4239 CoTaskMemFree( closestMatchFormat );
4240
4241 if ( !errorText_.empty() )
4242 error( errorType );
4243 return info;
4244 }
4245
4246 //-----------------------------------------------------------------------------
4247
getDefaultOutputDevice(void)4248 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4249 {
4250 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4251 if ( getDeviceInfo( i ).isDefaultOutput ) {
4252 return i;
4253 }
4254 }
4255
4256 return 0;
4257 }
4258
4259 //-----------------------------------------------------------------------------
4260
getDefaultInputDevice(void)4261 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4262 {
4263 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4264 if ( getDeviceInfo( i ).isDefaultInput ) {
4265 return i;
4266 }
4267 }
4268
4269 return 0;
4270 }
4271
4272 //-----------------------------------------------------------------------------
4273
closeStream(void)4274 void RtApiWasapi::closeStream( void )
4275 {
4276 if ( stream_.state == STREAM_CLOSED ) {
4277 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4278 error( RtAudioError::WARNING );
4279 return;
4280 }
4281
4282 if ( stream_.state != STREAM_STOPPED )
4283 stopStream();
4284
4285 // clean up stream memory
4286 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4287 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4288
4289 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4290 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4291
4292 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4293 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4294
4295 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4296 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4297
4298 delete ( WasapiHandle* ) stream_.apiHandle;
4299 stream_.apiHandle = NULL;
4300
4301 for ( int i = 0; i < 2; i++ ) {
4302 if ( stream_.userBuffer[i] ) {
4303 free( stream_.userBuffer[i] );
4304 stream_.userBuffer[i] = 0;
4305 }
4306 }
4307
4308 if ( stream_.deviceBuffer ) {
4309 free( stream_.deviceBuffer );
4310 stream_.deviceBuffer = 0;
4311 }
4312
4313 // update stream state
4314 stream_.state = STREAM_CLOSED;
4315 }
4316
4317 //-----------------------------------------------------------------------------
4318
startStream(void)4319 void RtApiWasapi::startStream( void )
4320 {
4321 verifyStream();
4322
4323 if ( stream_.state == STREAM_RUNNING ) {
4324 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4325 error( RtAudioError::WARNING );
4326 return;
4327 }
4328
4329 // update stream state
4330 stream_.state = STREAM_RUNNING;
4331
4332 // create WASAPI stream thread
4333 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4334
4335 if ( !stream_.callbackInfo.thread ) {
4336 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4337 error( RtAudioError::THREAD_ERROR );
4338 }
4339 else {
4340 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4341 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4342 }
4343 }
4344
4345 //-----------------------------------------------------------------------------
4346
stopStream(void)4347 void RtApiWasapi::stopStream( void )
4348 {
4349 verifyStream();
4350
4351 if ( stream_.state == STREAM_STOPPED ) {
4352 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4353 error( RtAudioError::WARNING );
4354 return;
4355 }
4356
4357 // inform stream thread by setting stream state to STREAM_STOPPING
4358 stream_.state = STREAM_STOPPING;
4359
4360 // wait until stream thread is stopped
4361 while( stream_.state != STREAM_STOPPED ) {
4362 Sleep( 1 );
4363 }
4364
4365 // Wait for the last buffer to play before stopping.
4366 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4367
4368 // stop capture client if applicable
4369 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4370 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4371 if ( FAILED( hr ) ) {
4372 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4373 error( RtAudioError::DRIVER_ERROR );
4374 return;
4375 }
4376 }
4377
4378 // stop render client if applicable
4379 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4380 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4381 if ( FAILED( hr ) ) {
4382 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4383 error( RtAudioError::DRIVER_ERROR );
4384 return;
4385 }
4386 }
4387
4388 // close thread handle
4389 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4390 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4391 error( RtAudioError::THREAD_ERROR );
4392 return;
4393 }
4394
4395 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4396 }
4397
4398 //-----------------------------------------------------------------------------
4399
abortStream(void)4400 void RtApiWasapi::abortStream( void )
4401 {
4402 verifyStream();
4403
4404 if ( stream_.state == STREAM_STOPPED ) {
4405 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4406 error( RtAudioError::WARNING );
4407 return;
4408 }
4409
4410 // inform stream thread by setting stream state to STREAM_STOPPING
4411 stream_.state = STREAM_STOPPING;
4412
4413 // wait until stream thread is stopped
4414 while ( stream_.state != STREAM_STOPPED ) {
4415 Sleep( 1 );
4416 }
4417
4418 // stop capture client if applicable
4419 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4420 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4421 if ( FAILED( hr ) ) {
4422 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4423 error( RtAudioError::DRIVER_ERROR );
4424 return;
4425 }
4426 }
4427
4428 // stop render client if applicable
4429 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4430 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4431 if ( FAILED( hr ) ) {
4432 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4433 error( RtAudioError::DRIVER_ERROR );
4434 return;
4435 }
4436 }
4437
4438 // close thread handle
4439 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4440 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4441 error( RtAudioError::THREAD_ERROR );
4442 return;
4443 }
4444
4445 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4446 }
4447
4448 //-----------------------------------------------------------------------------
4449
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4450 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4451 unsigned int firstChannel, unsigned int sampleRate,
4452 RtAudioFormat format, unsigned int* bufferSize,
4453 RtAudio::StreamOptions* options )
4454 {
4455 bool methodResult = FAILURE;
4456 unsigned int captureDeviceCount = 0;
4457 unsigned int renderDeviceCount = 0;
4458
4459 IMMDeviceCollection* captureDevices = NULL;
4460 IMMDeviceCollection* renderDevices = NULL;
4461 IMMDevice* devicePtr = NULL;
4462 WAVEFORMATEX* deviceFormat = NULL;
4463 unsigned int bufferBytes;
4464 stream_.state = STREAM_STOPPED;
4465
4466 // create API Handle if not already created
4467 if ( !stream_.apiHandle )
4468 stream_.apiHandle = ( void* ) new WasapiHandle();
4469
4470 // Count capture devices
4471 errorText_.clear();
4472 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4473 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4474 if ( FAILED( hr ) ) {
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4476 goto Exit;
4477 }
4478
4479 hr = captureDevices->GetCount( &captureDeviceCount );
4480 if ( FAILED( hr ) ) {
4481 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4482 goto Exit;
4483 }
4484
4485 // Count render devices
4486 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4487 if ( FAILED( hr ) ) {
4488 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4489 goto Exit;
4490 }
4491
4492 hr = renderDevices->GetCount( &renderDeviceCount );
4493 if ( FAILED( hr ) ) {
4494 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4495 goto Exit;
4496 }
4497
4498 // validate device index
4499 if ( device >= captureDeviceCount + renderDeviceCount ) {
4500 errorType = RtAudioError::INVALID_USE;
4501 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4502 goto Exit;
4503 }
4504
4505 // determine whether index falls within capture or render devices
4506 if ( device >= renderDeviceCount ) {
4507 if ( mode != INPUT ) {
4508 errorType = RtAudioError::INVALID_USE;
4509 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4510 goto Exit;
4511 }
4512
4513 // retrieve captureAudioClient from devicePtr
4514 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4515
4516 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4517 if ( FAILED( hr ) ) {
4518 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4519 goto Exit;
4520 }
4521
4522 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4523 NULL, ( void** ) &captureAudioClient );
4524 if ( FAILED( hr ) ) {
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4526 goto Exit;
4527 }
4528
4529 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4530 if ( FAILED( hr ) ) {
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4532 goto Exit;
4533 }
4534
4535 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4536 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4537 }
4538 else {
4539 if ( mode != OUTPUT ) {
4540 errorType = RtAudioError::INVALID_USE;
4541 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4542 goto Exit;
4543 }
4544
4545 // retrieve renderAudioClient from devicePtr
4546 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4547
4548 hr = renderDevices->Item( device, &devicePtr );
4549 if ( FAILED( hr ) ) {
4550 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4551 goto Exit;
4552 }
4553
4554 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4555 NULL, ( void** ) &renderAudioClient );
4556 if ( FAILED( hr ) ) {
4557 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4558 goto Exit;
4559 }
4560
4561 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4562 if ( FAILED( hr ) ) {
4563 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4564 goto Exit;
4565 }
4566
4567 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4568 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4569 }
4570
4571 // fill stream data
4572 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4573 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4574 stream_.mode = DUPLEX;
4575 }
4576 else {
4577 stream_.mode = mode;
4578 }
4579
4580 stream_.device[mode] = device;
4581 stream_.doByteSwap[mode] = false;
4582 stream_.sampleRate = sampleRate;
4583 stream_.bufferSize = *bufferSize;
4584 stream_.nBuffers = 1;
4585 stream_.nUserChannels[mode] = channels;
4586 stream_.channelOffset[mode] = firstChannel;
4587 stream_.userFormat = format;
4588 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4589
4590 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4591 stream_.userInterleaved = false;
4592 else
4593 stream_.userInterleaved = true;
4594 stream_.deviceInterleaved[mode] = true;
4595
4596 // Set flags for buffer conversion.
4597 stream_.doConvertBuffer[mode] = false;
4598 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4599 stream_.nUserChannels != stream_.nDeviceChannels )
4600 stream_.doConvertBuffer[mode] = true;
4601 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4602 stream_.nUserChannels[mode] > 1 )
4603 stream_.doConvertBuffer[mode] = true;
4604
4605 if ( stream_.doConvertBuffer[mode] )
4606 setConvertInfo( mode, 0 );
4607
4608 // Allocate necessary internal buffers
4609 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4610
4611 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4612 if ( !stream_.userBuffer[mode] ) {
4613 errorType = RtAudioError::MEMORY_ERROR;
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4615 goto Exit;
4616 }
4617
4618 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4619 stream_.callbackInfo.priority = 15;
4620 else
4621 stream_.callbackInfo.priority = 0;
4622
4623 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4624 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4625
4626 methodResult = SUCCESS;
4627
4628 Exit:
4629 //clean up
4630 SAFE_RELEASE( captureDevices );
4631 SAFE_RELEASE( renderDevices );
4632 SAFE_RELEASE( devicePtr );
4633 CoTaskMemFree( deviceFormat );
4634
4635 // if method failed, close the stream
4636 if ( methodResult == FAILURE )
4637 closeStream();
4638
4639 if ( !errorText_.empty() )
4640 error( errorType );
4641 return methodResult;
4642 }
4643
4644 //=============================================================================
4645
runWasapiThread(void * wasapiPtr)4646 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4647 {
4648 if ( wasapiPtr )
4649 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4650
4651 return 0;
4652 }
4653
stopWasapiThread(void * wasapiPtr)4654 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4655 {
4656 if ( wasapiPtr )
4657 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4658
4659 return 0;
4660 }
4661
abortWasapiThread(void * wasapiPtr)4662 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4663 {
4664 if ( wasapiPtr )
4665 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4666
4667 return 0;
4668 }
4669
4670 //-----------------------------------------------------------------------------
4671
wasapiThread()4672 void RtApiWasapi::wasapiThread()
4673 {
4674 // as this is a new thread, we must CoInitialize it
4675 CoInitialize( NULL );
4676
4677 HRESULT hr;
4678
4679 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4680 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4681 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4682 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4683 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4684 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4685
4686 WAVEFORMATEX* captureFormat = NULL;
4687 WAVEFORMATEX* renderFormat = NULL;
4688 float captureSrRatio = 0.0f;
4689 float renderSrRatio = 0.0f;
4690 WasapiBuffer captureBuffer;
4691 WasapiBuffer renderBuffer;
4692
4693 // declare local stream variables
4694 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4695 BYTE* streamBuffer = NULL;
4696 unsigned long captureFlags = 0;
4697 unsigned int bufferFrameCount = 0;
4698 unsigned int numFramesPadding = 0;
4699 unsigned int convBufferSize = 0;
4700 bool callbackPushed = false;
4701 bool callbackPulled = false;
4702 bool callbackStopped = false;
4703 int callbackResult = 0;
4704
4705 // convBuffer is used to store converted buffers between WASAPI and the user
4706 char* convBuffer = NULL;
4707 unsigned int convBuffSize = 0;
4708 unsigned int deviceBuffSize = 0;
4709
4710 errorText_.clear();
4711 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4712
4713 // Attempt to assign "Pro Audio" characteristic to thread
4714 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4715 if ( AvrtDll ) {
4716 DWORD taskIndex = 0;
4717 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4718 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4719 FreeLibrary( AvrtDll );
4720 }
4721
4722 // start capture stream if applicable
4723 if ( captureAudioClient ) {
4724 hr = captureAudioClient->GetMixFormat( &captureFormat );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4727 goto Exit;
4728 }
4729
4730 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4731
4732 // initialize capture stream according to desire buffer size
4733 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4734 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4735
4736 if ( !captureClient ) {
4737 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4738 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4739 desiredBufferPeriod,
4740 desiredBufferPeriod,
4741 captureFormat,
4742 NULL );
4743 if ( FAILED( hr ) ) {
4744 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4745 goto Exit;
4746 }
4747
4748 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4749 ( void** ) &captureClient );
4750 if ( FAILED( hr ) ) {
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4752 goto Exit;
4753 }
4754
4755 // configure captureEvent to trigger on every available capture buffer
4756 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4757 if ( !captureEvent ) {
4758 errorType = RtAudioError::SYSTEM_ERROR;
4759 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4760 goto Exit;
4761 }
4762
4763 hr = captureAudioClient->SetEventHandle( captureEvent );
4764 if ( FAILED( hr ) ) {
4765 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4766 goto Exit;
4767 }
4768
4769 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4770 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4771 }
4772
4773 unsigned int inBufferSize = 0;
4774 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4777 goto Exit;
4778 }
4779
4780 // scale outBufferSize according to stream->user sample rate ratio
4781 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4782 inBufferSize *= stream_.nDeviceChannels[INPUT];
4783
4784 // set captureBuffer size
4785 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4786
4787 // reset the capture stream
4788 hr = captureAudioClient->Reset();
4789 if ( FAILED( hr ) ) {
4790 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4791 goto Exit;
4792 }
4793
4794 // start the capture stream
4795 hr = captureAudioClient->Start();
4796 if ( FAILED( hr ) ) {
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4798 goto Exit;
4799 }
4800 }
4801
4802 // start render stream if applicable
4803 if ( renderAudioClient ) {
4804 hr = renderAudioClient->GetMixFormat( &renderFormat );
4805 if ( FAILED( hr ) ) {
4806 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4807 goto Exit;
4808 }
4809
4810 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4811
4812 // initialize render stream according to desire buffer size
4813 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4814 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4815
4816 if ( !renderClient ) {
4817 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4818 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4819 desiredBufferPeriod,
4820 desiredBufferPeriod,
4821 renderFormat,
4822 NULL );
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4825 goto Exit;
4826 }
4827
4828 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4829 ( void** ) &renderClient );
4830 if ( FAILED( hr ) ) {
4831 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4832 goto Exit;
4833 }
4834
4835 // configure renderEvent to trigger on every available render buffer
4836 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4837 if ( !renderEvent ) {
4838 errorType = RtAudioError::SYSTEM_ERROR;
4839 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4840 goto Exit;
4841 }
4842
4843 hr = renderAudioClient->SetEventHandle( renderEvent );
4844 if ( FAILED( hr ) ) {
4845 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4846 goto Exit;
4847 }
4848
4849 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4850 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4851 }
4852
4853 unsigned int outBufferSize = 0;
4854 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4855 if ( FAILED( hr ) ) {
4856 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4857 goto Exit;
4858 }
4859
4860 // scale inBufferSize according to user->stream sample rate ratio
4861 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4862 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4863
4864 // set renderBuffer size
4865 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4866
4867 // reset the render stream
4868 hr = renderAudioClient->Reset();
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4871 goto Exit;
4872 }
4873
4874 // start the render stream
4875 hr = renderAudioClient->Start();
4876 if ( FAILED( hr ) ) {
4877 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4878 goto Exit;
4879 }
4880 }
4881
4882 if ( stream_.mode == INPUT ) {
4883 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4884 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4885 }
4886 else if ( stream_.mode == OUTPUT ) {
4887 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4888 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4889 }
4890 else if ( stream_.mode == DUPLEX ) {
4891 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4892 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4893 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4894 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4895 }
4896
4897 convBuffer = ( char* ) malloc( convBuffSize );
4898 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4899 if ( !convBuffer || !stream_.deviceBuffer ) {
4900 errorType = RtAudioError::MEMORY_ERROR;
4901 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4902 goto Exit;
4903 }
4904
4905 // stream process loop
4906 while ( stream_.state != STREAM_STOPPING ) {
4907 if ( !callbackPulled ) {
4908 // Callback Input
4909 // ==============
4910 // 1. Pull callback buffer from inputBuffer
4911 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
4912 // Convert callback buffer to user format
4913
4914 if ( captureAudioClient ) {
4915 // Pull callback buffer from inputBuffer
4916 callbackPulled = captureBuffer.pullBuffer( convBuffer,
4917 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
4918 stream_.deviceFormat[INPUT] );
4919
4920 if ( callbackPulled ) {
4921 // Convert callback buffer to user sample rate
4922 convertBufferWasapi( stream_.deviceBuffer,
4923 convBuffer,
4924 stream_.nDeviceChannels[INPUT],
4925 captureFormat->nSamplesPerSec,
4926 stream_.sampleRate,
4927 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
4928 convBufferSize,
4929 stream_.deviceFormat[INPUT] );
4930
4931 if ( stream_.doConvertBuffer[INPUT] ) {
4932 // Convert callback buffer to user format
4933 convertBuffer( stream_.userBuffer[INPUT],
4934 stream_.deviceBuffer,
4935 stream_.convertInfo[INPUT] );
4936 }
4937 else {
4938 // no further conversion, simple copy deviceBuffer to userBuffer
4939 memcpy( stream_.userBuffer[INPUT],
4940 stream_.deviceBuffer,
4941 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4942 }
4943 }
4944 }
4945 else {
4946 // if there is no capture stream, set callbackPulled flag
4947 callbackPulled = true;
4948 }
4949
4950 // Execute Callback
4951 // ================
4952 // 1. Execute user callback method
4953 // 2. Handle return value from callback
4954
4955 // if callback has not requested the stream to stop
4956 if ( callbackPulled && !callbackStopped ) {
4957 // Execute user callback method
4958 callbackResult = callback( stream_.userBuffer[OUTPUT],
4959 stream_.userBuffer[INPUT],
4960 stream_.bufferSize,
4961 getStreamTime(),
4962 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4963 stream_.callbackInfo.userData );
4964
4965 // Handle return value from callback
4966 if ( callbackResult == 1 ) {
4967 // instantiate a thread to stop this thread
4968 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4969 if ( !threadHandle ) {
4970 errorType = RtAudioError::THREAD_ERROR;
4971 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4972 goto Exit;
4973 }
4974 else if ( !CloseHandle( threadHandle ) ) {
4975 errorType = RtAudioError::THREAD_ERROR;
4976 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4977 goto Exit;
4978 }
4979
4980 callbackStopped = true;
4981 }
4982 else if ( callbackResult == 2 ) {
4983 // instantiate a thread to stop this thread
4984 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4985 if ( !threadHandle ) {
4986 errorType = RtAudioError::THREAD_ERROR;
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4988 goto Exit;
4989 }
4990 else if ( !CloseHandle( threadHandle ) ) {
4991 errorType = RtAudioError::THREAD_ERROR;
4992 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4993 goto Exit;
4994 }
4995
4996 callbackStopped = true;
4997 }
4998 }
4999 }
5000
5001 // Callback Output
5002 // ===============
5003 // 1. Convert callback buffer to stream format
5004 // 2. Convert callback buffer to stream sample rate and channel count
5005 // 3. Push callback buffer into outputBuffer
5006
5007 if ( renderAudioClient && callbackPulled ) {
5008 if ( stream_.doConvertBuffer[OUTPUT] ) {
5009 // Convert callback buffer to stream format
5010 convertBuffer( stream_.deviceBuffer,
5011 stream_.userBuffer[OUTPUT],
5012 stream_.convertInfo[OUTPUT] );
5013
5014 }
5015
5016 // Convert callback buffer to stream sample rate
5017 convertBufferWasapi( convBuffer,
5018 stream_.deviceBuffer,
5019 stream_.nDeviceChannels[OUTPUT],
5020 stream_.sampleRate,
5021 renderFormat->nSamplesPerSec,
5022 stream_.bufferSize,
5023 convBufferSize,
5024 stream_.deviceFormat[OUTPUT] );
5025
5026 // Push callback buffer into outputBuffer
5027 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5028 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5029 stream_.deviceFormat[OUTPUT] );
5030 }
5031 else {
5032 // if there is no render stream, set callbackPushed flag
5033 callbackPushed = true;
5034 }
5035
5036 // Stream Capture
5037 // ==============
5038 // 1. Get capture buffer from stream
5039 // 2. Push capture buffer into inputBuffer
5040 // 3. If 2. was successful: Release capture buffer
5041
5042 if ( captureAudioClient ) {
5043 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5044 if ( !callbackPulled ) {
5045 WaitForSingleObject( captureEvent, INFINITE );
5046 }
5047
5048 // Get capture buffer from stream
5049 hr = captureClient->GetBuffer( &streamBuffer,
5050 &bufferFrameCount,
5051 &captureFlags, NULL, NULL );
5052 if ( FAILED( hr ) ) {
5053 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5054 goto Exit;
5055 }
5056
5057 if ( bufferFrameCount != 0 ) {
5058 // Push capture buffer into inputBuffer
5059 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5060 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5061 stream_.deviceFormat[INPUT] ) )
5062 {
5063 // Release capture buffer
5064 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5065 if ( FAILED( hr ) ) {
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5067 goto Exit;
5068 }
5069 }
5070 else
5071 {
5072 // Inform WASAPI that capture was unsuccessful
5073 hr = captureClient->ReleaseBuffer( 0 );
5074 if ( FAILED( hr ) ) {
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5076 goto Exit;
5077 }
5078 }
5079 }
5080 else
5081 {
5082 // Inform WASAPI that capture was unsuccessful
5083 hr = captureClient->ReleaseBuffer( 0 );
5084 if ( FAILED( hr ) ) {
5085 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5086 goto Exit;
5087 }
5088 }
5089 }
5090
5091 // Stream Render
5092 // =============
5093 // 1. Get render buffer from stream
5094 // 2. Pull next buffer from outputBuffer
5095 // 3. If 2. was successful: Fill render buffer with next buffer
5096 // Release render buffer
5097
5098 if ( renderAudioClient ) {
5099 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5100 if ( callbackPulled && !callbackPushed ) {
5101 WaitForSingleObject( renderEvent, INFINITE );
5102 }
5103
5104 // Get render buffer from stream
5105 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5106 if ( FAILED( hr ) ) {
5107 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5108 goto Exit;
5109 }
5110
5111 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5112 if ( FAILED( hr ) ) {
5113 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5114 goto Exit;
5115 }
5116
5117 bufferFrameCount -= numFramesPadding;
5118
5119 if ( bufferFrameCount != 0 ) {
5120 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5121 if ( FAILED( hr ) ) {
5122 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5123 goto Exit;
5124 }
5125
5126 // Pull next buffer from outputBuffer
5127 // Fill render buffer with next buffer
5128 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5129 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5130 stream_.deviceFormat[OUTPUT] ) )
5131 {
5132 // Release render buffer
5133 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5134 if ( FAILED( hr ) ) {
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5136 goto Exit;
5137 }
5138 }
5139 else
5140 {
5141 // Inform WASAPI that render was unsuccessful
5142 hr = renderClient->ReleaseBuffer( 0, 0 );
5143 if ( FAILED( hr ) ) {
5144 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5145 goto Exit;
5146 }
5147 }
5148 }
5149 else
5150 {
5151 // Inform WASAPI that render was unsuccessful
5152 hr = renderClient->ReleaseBuffer( 0, 0 );
5153 if ( FAILED( hr ) ) {
5154 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5155 goto Exit;
5156 }
5157 }
5158 }
5159
5160 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5161 if ( callbackPushed ) {
5162 callbackPulled = false;
5163 // tick stream time
5164 RtApi::tickStreamTime();
5165 }
5166
5167 }
5168
5169 Exit:
5170 // clean up
5171 CoTaskMemFree( captureFormat );
5172 CoTaskMemFree( renderFormat );
5173
5174 free ( convBuffer );
5175
5176 CoUninitialize();
5177
5178 // update stream state
5179 stream_.state = STREAM_STOPPED;
5180
5181 if ( errorText_.empty() )
5182 return;
5183 else
5184 error( errorType );
5185 }
5186
5187 //******************** End of __WINDOWS_WASAPI__ *********************//
5188 #endif
5189
5190
5191 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5192
5193 // Modified by Robin Davies, October 2005
5194 // - Improvements to DirectX pointer chasing.
5195 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5196 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5197 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5198 // Changed device query structure for RtAudio 4.0.7, January 2010
5199
5200 #include <dsound.h>
5201 #include <assert.h>
5202 #include <algorithm>
5203
5204 #if defined(__MINGW32__)
5205 // missing from latest mingw winapi
5206 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5207 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5208 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5209 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5210 #endif
5211
5212 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5213
5214 #ifdef _MSC_VER // if Microsoft Visual C++
5215 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5216 #endif
5217
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5218 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5219 {
5220 if ( pointer > bufferSize ) pointer -= bufferSize;
5221 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5222 if ( pointer < earlierPointer ) pointer += bufferSize;
5223 return pointer >= earlierPointer && pointer < laterPointer;
5224 }
5225
5226 // A structure to hold various information related to the DirectSound
5227 // API implementation.
5228 struct DsHandle {
5229 unsigned int drainCounter; // Tracks callback counts when draining
5230 bool internalDrain; // Indicates if stop is initiated from callback or not.
5231 void *id[2];
5232 void *buffer[2];
5233 bool xrun[2];
5234 UINT bufferPointer[2];
5235 DWORD dsBufferSize[2];
5236 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5237 HANDLE condition;
5238
DsHandleDsHandle5239 DsHandle()
5240 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5241 };
5242
5243 // Declarations for utility functions, callbacks, and structures
5244 // specific to the DirectSound implementation.
5245 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5246 LPCTSTR description,
5247 LPCTSTR module,
5248 LPVOID lpContext );
5249
5250 static const char* getErrorString( int code );
5251
5252 static unsigned __stdcall callbackHandler( void *ptr );
5253
5254 struct DsDevice {
5255 LPGUID id[2];
5256 bool validId[2];
5257 bool found;
5258 std::string name;
5259
DsDeviceDsDevice5260 DsDevice()
5261 : found(false) { validId[0] = false; validId[1] = false; }
5262 };
5263
5264 struct DsProbeData {
5265 bool isInput;
5266 std::vector<struct DsDevice>* dsDevices;
5267 };
5268
RtApiDs()5269 RtApiDs :: RtApiDs()
5270 {
5271 // Dsound will run both-threaded. If CoInitialize fails, then just
5272 // accept whatever the mainline chose for a threading model.
5273 coInitialized_ = false;
5274 HRESULT hr = CoInitialize( NULL );
5275 if ( !FAILED( hr ) ) coInitialized_ = true;
5276 }
5277
~RtApiDs()5278 RtApiDs :: ~RtApiDs()
5279 {
5280 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5281 if ( stream_.state != STREAM_CLOSED ) closeStream();
5282 }
5283
5284 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5285 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5286 {
5287 return 0;
5288 }
5289
5290 // The DirectSound default input is always the first input device,
5291 // which is the first capture device enumerated.
getDefaultInputDevice(void)5292 unsigned int RtApiDs :: getDefaultInputDevice( void )
5293 {
5294 return 0;
5295 }
5296
getDeviceCount(void)5297 unsigned int RtApiDs :: getDeviceCount( void )
5298 {
5299 // Set query flag for previously found devices to false, so that we
5300 // can check for any devices that have disappeared.
5301 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5302 dsDevices[i].found = false;
5303
5304 // Query DirectSound devices.
5305 struct DsProbeData probeInfo;
5306 probeInfo.isInput = false;
5307 probeInfo.dsDevices = &dsDevices;
5308 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5309 if ( FAILED( result ) ) {
5310 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5311 errorText_ = errorStream_.str();
5312 error( RtAudioError::WARNING );
5313 }
5314
5315 // Query DirectSoundCapture devices.
5316 probeInfo.isInput = true;
5317 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5318 if ( FAILED( result ) ) {
5319 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5320 errorText_ = errorStream_.str();
5321 error( RtAudioError::WARNING );
5322 }
5323
5324 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5325 for ( unsigned int i=0; i<dsDevices.size(); ) {
5326 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5327 else i++;
5328 }
5329
5330 return static_cast<unsigned int>(dsDevices.size());
5331 }
5332
getDeviceInfo(unsigned int device)5333 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5334 {
5335 RtAudio::DeviceInfo info;
5336 info.probed = false;
5337
5338 if ( dsDevices.size() == 0 ) {
5339 // Force a query of all devices
5340 getDeviceCount();
5341 if ( dsDevices.size() == 0 ) {
5342 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5343 error( RtAudioError::INVALID_USE );
5344 return info;
5345 }
5346 }
5347
5348 if ( device >= dsDevices.size() ) {
5349 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5350 error( RtAudioError::INVALID_USE );
5351 return info;
5352 }
5353
5354 HRESULT result;
5355 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5356
5357 LPDIRECTSOUND output;
5358 DSCAPS outCaps;
5359 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5360 if ( FAILED( result ) ) {
5361 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5362 errorText_ = errorStream_.str();
5363 error( RtAudioError::WARNING );
5364 goto probeInput;
5365 }
5366
5367 outCaps.dwSize = sizeof( outCaps );
5368 result = output->GetCaps( &outCaps );
5369 if ( FAILED( result ) ) {
5370 output->Release();
5371 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5372 errorText_ = errorStream_.str();
5373 error( RtAudioError::WARNING );
5374 goto probeInput;
5375 }
5376
5377 // Get output channel information.
5378 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5379
5380 // Get sample rate information.
5381 info.sampleRates.clear();
5382 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5383 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5384 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5385 info.sampleRates.push_back( SAMPLE_RATES[k] );
5386
5387 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5388 info.preferredSampleRate = SAMPLE_RATES[k];
5389 }
5390 }
5391
5392 // Get format information.
5393 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5394 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5395
5396 output->Release();
5397
5398 if ( getDefaultOutputDevice() == device )
5399 info.isDefaultOutput = true;
5400
5401 if ( dsDevices[ device ].validId[1] == false ) {
5402 info.name = dsDevices[ device ].name;
5403 info.probed = true;
5404 return info;
5405 }
5406
5407 probeInput:
5408
5409 LPDIRECTSOUNDCAPTURE input;
5410 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5411 if ( FAILED( result ) ) {
5412 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5413 errorText_ = errorStream_.str();
5414 error( RtAudioError::WARNING );
5415 return info;
5416 }
5417
5418 DSCCAPS inCaps;
5419 inCaps.dwSize = sizeof( inCaps );
5420 result = input->GetCaps( &inCaps );
5421 if ( FAILED( result ) ) {
5422 input->Release();
5423 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5424 errorText_ = errorStream_.str();
5425 error( RtAudioError::WARNING );
5426 return info;
5427 }
5428
5429 // Get input channel information.
5430 info.inputChannels = inCaps.dwChannels;
5431
5432 // Get sample rate and format information.
5433 std::vector<unsigned int> rates;
5434 if ( inCaps.dwChannels >= 2 ) {
5435 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5436 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5437 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5438 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5439 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5440 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5441 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5442 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5443
5444 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5445 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5446 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5447 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5448 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5449 }
5450 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5451 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5452 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5453 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5454 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5455 }
5456 }
5457 else if ( inCaps.dwChannels == 1 ) {
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5462 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5463 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5464 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5465 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5466
5467 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5468 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5469 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5470 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5471 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5472 }
5473 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5474 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5475 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5476 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5477 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5478 }
5479 }
5480 else info.inputChannels = 0; // technically, this would be an error
5481
5482 input->Release();
5483
5484 if ( info.inputChannels == 0 ) return info;
5485
5486 // Copy the supported rates to the info structure but avoid duplication.
5487 bool found;
5488 for ( unsigned int i=0; i<rates.size(); i++ ) {
5489 found = false;
5490 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5491 if ( rates[i] == info.sampleRates[j] ) {
5492 found = true;
5493 break;
5494 }
5495 }
5496 if ( found == false ) info.sampleRates.push_back( rates[i] );
5497 }
5498 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5499
5500 // If device opens for both playback and capture, we determine the channels.
5501 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5502 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5503
5504 if ( device == 0 ) info.isDefaultInput = true;
5505
5506 // Copy name and return.
5507 info.name = dsDevices[ device ].name;
5508 info.probed = true;
5509 return info;
5510 }
5511
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5512 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5513 unsigned int firstChannel, unsigned int sampleRate,
5514 RtAudioFormat format, unsigned int *bufferSize,
5515 RtAudio::StreamOptions *options )
5516 {
5517 if ( channels + firstChannel > 2 ) {
5518 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5519 return FAILURE;
5520 }
5521
5522 size_t nDevices = dsDevices.size();
5523 if ( nDevices == 0 ) {
5524 // This should not happen because a check is made before this function is called.
5525 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5526 return FAILURE;
5527 }
5528
5529 if ( device >= nDevices ) {
5530 // This should not happen because a check is made before this function is called.
5531 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5532 return FAILURE;
5533 }
5534
5535 if ( mode == OUTPUT ) {
5536 if ( dsDevices[ device ].validId[0] == false ) {
5537 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5538 errorText_ = errorStream_.str();
5539 return FAILURE;
5540 }
5541 }
5542 else { // mode == INPUT
5543 if ( dsDevices[ device ].validId[1] == false ) {
5544 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5545 errorText_ = errorStream_.str();
5546 return FAILURE;
5547 }
5548 }
5549
5550 // According to a note in PortAudio, using GetDesktopWindow()
5551 // instead of GetForegroundWindow() is supposed to avoid problems
5552 // that occur when the application's window is not the foreground
5553 // window. Also, if the application window closes before the
5554 // DirectSound buffer, DirectSound can crash. In the past, I had
5555 // problems when using GetDesktopWindow() but it seems fine now
5556 // (January 2010). I'll leave it commented here.
5557 // HWND hWnd = GetForegroundWindow();
5558 HWND hWnd = GetDesktopWindow();
5559
5560 // Check the numberOfBuffers parameter and limit the lowest value to
5561 // two. This is a judgement call and a value of two is probably too
5562 // low for capture, but it should work for playback.
5563 int nBuffers = 0;
5564 if ( options ) nBuffers = options->numberOfBuffers;
5565 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5566 if ( nBuffers < 2 ) nBuffers = 3;
5567
5568 // Check the lower range of the user-specified buffer size and set
5569 // (arbitrarily) to a lower bound of 32.
5570 if ( *bufferSize < 32 ) *bufferSize = 32;
5571
5572 // Create the wave format structure. The data format setting will
5573 // be determined later.
5574 WAVEFORMATEX waveFormat;
5575 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5576 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5577 waveFormat.nChannels = channels + firstChannel;
5578 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5579
5580 // Determine the device buffer size. By default, we'll use the value
5581 // defined above (32K), but we will grow it to make allowances for
5582 // very large software buffer sizes.
5583 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5584 DWORD dsPointerLeadTime = 0;
5585
5586 void *ohandle = 0, *bhandle = 0;
5587 HRESULT result;
5588 if ( mode == OUTPUT ) {
5589
5590 LPDIRECTSOUND output;
5591 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5592 if ( FAILED( result ) ) {
5593 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5594 errorText_ = errorStream_.str();
5595 return FAILURE;
5596 }
5597
5598 DSCAPS outCaps;
5599 outCaps.dwSize = sizeof( outCaps );
5600 result = output->GetCaps( &outCaps );
5601 if ( FAILED( result ) ) {
5602 output->Release();
5603 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5604 errorText_ = errorStream_.str();
5605 return FAILURE;
5606 }
5607
5608 // Check channel information.
5609 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5610 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5611 errorText_ = errorStream_.str();
5612 return FAILURE;
5613 }
5614
5615 // Check format information. Use 16-bit format unless not
5616 // supported or user requests 8-bit.
5617 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5618 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5619 waveFormat.wBitsPerSample = 16;
5620 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5621 }
5622 else {
5623 waveFormat.wBitsPerSample = 8;
5624 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5625 }
5626 stream_.userFormat = format;
5627
5628 // Update wave format structure and buffer information.
5629 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5630 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5631 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5632
5633 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5634 while ( dsPointerLeadTime * 2U > dsBufferSize )
5635 dsBufferSize *= 2;
5636
5637 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5638 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5639 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5640 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5641 if ( FAILED( result ) ) {
5642 output->Release();
5643 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5644 errorText_ = errorStream_.str();
5645 return FAILURE;
5646 }
5647
5648 // Even though we will write to the secondary buffer, we need to
5649 // access the primary buffer to set the correct output format
5650 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5651 // buffer description.
5652 DSBUFFERDESC bufferDescription;
5653 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5654 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5655 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5656
5657 // Obtain the primary buffer
5658 LPDIRECTSOUNDBUFFER buffer;
5659 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5660 if ( FAILED( result ) ) {
5661 output->Release();
5662 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5663 errorText_ = errorStream_.str();
5664 return FAILURE;
5665 }
5666
5667 // Set the primary DS buffer sound format.
5668 result = buffer->SetFormat( &waveFormat );
5669 if ( FAILED( result ) ) {
5670 output->Release();
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5672 errorText_ = errorStream_.str();
5673 return FAILURE;
5674 }
5675
5676 // Setup the secondary DS buffer description.
5677 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5678 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5679 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5680 DSBCAPS_GLOBALFOCUS |
5681 DSBCAPS_GETCURRENTPOSITION2 |
5682 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5683 bufferDescription.dwBufferBytes = dsBufferSize;
5684 bufferDescription.lpwfxFormat = &waveFormat;
5685
5686 // Try to create the secondary DS buffer. If that doesn't work,
5687 // try to use software mixing. Otherwise, there's a problem.
5688 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5689 if ( FAILED( result ) ) {
5690 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5691 DSBCAPS_GLOBALFOCUS |
5692 DSBCAPS_GETCURRENTPOSITION2 |
5693 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5694 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5695 if ( FAILED( result ) ) {
5696 output->Release();
5697 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5698 errorText_ = errorStream_.str();
5699 return FAILURE;
5700 }
5701 }
5702
5703 // Get the buffer size ... might be different from what we specified.
5704 DSBCAPS dsbcaps;
5705 dsbcaps.dwSize = sizeof( DSBCAPS );
5706 result = buffer->GetCaps( &dsbcaps );
5707 if ( FAILED( result ) ) {
5708 output->Release();
5709 buffer->Release();
5710 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5711 errorText_ = errorStream_.str();
5712 return FAILURE;
5713 }
5714
5715 dsBufferSize = dsbcaps.dwBufferBytes;
5716
5717 // Lock the DS buffer
5718 LPVOID audioPtr;
5719 DWORD dataLen;
5720 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5721 if ( FAILED( result ) ) {
5722 output->Release();
5723 buffer->Release();
5724 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5725 errorText_ = errorStream_.str();
5726 return FAILURE;
5727 }
5728
5729 // Zero the DS buffer
5730 ZeroMemory( audioPtr, dataLen );
5731
5732 // Unlock the DS buffer
5733 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5734 if ( FAILED( result ) ) {
5735 output->Release();
5736 buffer->Release();
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5738 errorText_ = errorStream_.str();
5739 return FAILURE;
5740 }
5741
5742 ohandle = (void *) output;
5743 bhandle = (void *) buffer;
5744 }
5745
5746 if ( mode == INPUT ) {
5747
5748 LPDIRECTSOUNDCAPTURE input;
5749 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5750 if ( FAILED( result ) ) {
5751 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5752 errorText_ = errorStream_.str();
5753 return FAILURE;
5754 }
5755
5756 DSCCAPS inCaps;
5757 inCaps.dwSize = sizeof( inCaps );
5758 result = input->GetCaps( &inCaps );
5759 if ( FAILED( result ) ) {
5760 input->Release();
5761 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5762 errorText_ = errorStream_.str();
5763 return FAILURE;
5764 }
5765
5766 // Check channel information.
5767 if ( inCaps.dwChannels < channels + firstChannel ) {
5768 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5769 return FAILURE;
5770 }
5771
5772 // Check format information. Use 16-bit format unless user
5773 // requests 8-bit.
5774 DWORD deviceFormats;
5775 if ( channels + firstChannel == 2 ) {
5776 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5777 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5778 waveFormat.wBitsPerSample = 8;
5779 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5780 }
5781 else { // assume 16-bit is supported
5782 waveFormat.wBitsPerSample = 16;
5783 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5784 }
5785 }
5786 else { // channel == 1
5787 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5788 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5789 waveFormat.wBitsPerSample = 8;
5790 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5791 }
5792 else { // assume 16-bit is supported
5793 waveFormat.wBitsPerSample = 16;
5794 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5795 }
5796 }
5797 stream_.userFormat = format;
5798
5799 // Update wave format structure and buffer information.
5800 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5801 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5802 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5803
5804 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5805 while ( dsPointerLeadTime * 2U > dsBufferSize )
5806 dsBufferSize *= 2;
5807
5808 // Setup the secondary DS buffer description.
5809 DSCBUFFERDESC bufferDescription;
5810 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5811 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5812 bufferDescription.dwFlags = 0;
5813 bufferDescription.dwReserved = 0;
5814 bufferDescription.dwBufferBytes = dsBufferSize;
5815 bufferDescription.lpwfxFormat = &waveFormat;
5816
5817 // Create the capture buffer.
5818 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5819 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5820 if ( FAILED( result ) ) {
5821 input->Release();
5822 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5823 errorText_ = errorStream_.str();
5824 return FAILURE;
5825 }
5826
5827 // Get the buffer size ... might be different from what we specified.
5828 DSCBCAPS dscbcaps;
5829 dscbcaps.dwSize = sizeof( DSCBCAPS );
5830 result = buffer->GetCaps( &dscbcaps );
5831 if ( FAILED( result ) ) {
5832 input->Release();
5833 buffer->Release();
5834 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5835 errorText_ = errorStream_.str();
5836 return FAILURE;
5837 }
5838
5839 dsBufferSize = dscbcaps.dwBufferBytes;
5840
5841 // NOTE: We could have a problem here if this is a duplex stream
5842 // and the play and capture hardware buffer sizes are different
5843 // (I'm actually not sure if that is a problem or not).
5844 // Currently, we are not verifying that.
5845
5846 // Lock the capture buffer
5847 LPVOID audioPtr;
5848 DWORD dataLen;
5849 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5850 if ( FAILED( result ) ) {
5851 input->Release();
5852 buffer->Release();
5853 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5854 errorText_ = errorStream_.str();
5855 return FAILURE;
5856 }
5857
5858 // Zero the buffer
5859 ZeroMemory( audioPtr, dataLen );
5860
5861 // Unlock the buffer
5862 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5863 if ( FAILED( result ) ) {
5864 input->Release();
5865 buffer->Release();
5866 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5867 errorText_ = errorStream_.str();
5868 return FAILURE;
5869 }
5870
5871 ohandle = (void *) input;
5872 bhandle = (void *) buffer;
5873 }
5874
5875 // Set various stream parameters
5876 DsHandle *handle = 0;
5877 stream_.nDeviceChannels[mode] = channels + firstChannel;
5878 stream_.nUserChannels[mode] = channels;
5879 stream_.bufferSize = *bufferSize;
5880 stream_.channelOffset[mode] = firstChannel;
5881 stream_.deviceInterleaved[mode] = true;
5882 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5883 else stream_.userInterleaved = true;
5884
5885 // Set flag for buffer conversion
5886 stream_.doConvertBuffer[mode] = false;
5887 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5888 stream_.doConvertBuffer[mode] = true;
5889 if (stream_.userFormat != stream_.deviceFormat[mode])
5890 stream_.doConvertBuffer[mode] = true;
5891 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5892 stream_.nUserChannels[mode] > 1 )
5893 stream_.doConvertBuffer[mode] = true;
5894
5895 // Allocate necessary internal buffers
5896 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5897 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5898 if ( stream_.userBuffer[mode] == NULL ) {
5899 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5900 goto error;
5901 }
5902
5903 if ( stream_.doConvertBuffer[mode] ) {
5904
5905 bool makeBuffer = true;
5906 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5907 if ( mode == INPUT ) {
5908 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5909 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5910 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5911 }
5912 }
5913
5914 if ( makeBuffer ) {
5915 bufferBytes *= *bufferSize;
5916 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5917 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5918 if ( stream_.deviceBuffer == NULL ) {
5919 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5920 goto error;
5921 }
5922 }
5923 }
5924
5925 // Allocate our DsHandle structures for the stream.
5926 if ( stream_.apiHandle == 0 ) {
5927 try {
5928 handle = new DsHandle;
5929 }
5930 catch ( std::bad_alloc& ) {
5931 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5932 goto error;
5933 }
5934
5935 // Create a manual-reset event.
5936 handle->condition = CreateEvent( NULL, // no security
5937 TRUE, // manual-reset
5938 FALSE, // non-signaled initially
5939 NULL ); // unnamed
5940 stream_.apiHandle = (void *) handle;
5941 }
5942 else
5943 handle = (DsHandle *) stream_.apiHandle;
5944 handle->id[mode] = ohandle;
5945 handle->buffer[mode] = bhandle;
5946 handle->dsBufferSize[mode] = dsBufferSize;
5947 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5948
5949 stream_.device[mode] = device;
5950 stream_.state = STREAM_STOPPED;
5951 if ( stream_.mode == OUTPUT && mode == INPUT )
5952 // We had already set up an output stream.
5953 stream_.mode = DUPLEX;
5954 else
5955 stream_.mode = mode;
5956 stream_.nBuffers = nBuffers;
5957 stream_.sampleRate = sampleRate;
5958
5959 // Setup the buffer conversion information structure.
5960 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5961
5962 // Setup the callback thread.
5963 if ( stream_.callbackInfo.isRunning == false ) {
5964 unsigned threadId;
5965 stream_.callbackInfo.isRunning = true;
5966 stream_.callbackInfo.object = (void *) this;
5967 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5968 &stream_.callbackInfo, 0, &threadId );
5969 if ( stream_.callbackInfo.thread == 0 ) {
5970 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5971 goto error;
5972 }
5973
5974 // Boost DS thread priority
5975 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5976 }
5977 return SUCCESS;
5978
5979 error:
5980 if ( handle ) {
5981 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5982 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5983 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5984 if ( buffer ) buffer->Release();
5985 object->Release();
5986 }
5987 if ( handle->buffer[1] ) {
5988 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5989 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5990 if ( buffer ) buffer->Release();
5991 object->Release();
5992 }
5993 CloseHandle( handle->condition );
5994 delete handle;
5995 stream_.apiHandle = 0;
5996 }
5997
5998 for ( int i=0; i<2; i++ ) {
5999 if ( stream_.userBuffer[i] ) {
6000 free( stream_.userBuffer[i] );
6001 stream_.userBuffer[i] = 0;
6002 }
6003 }
6004
6005 if ( stream_.deviceBuffer ) {
6006 free( stream_.deviceBuffer );
6007 stream_.deviceBuffer = 0;
6008 }
6009
6010 stream_.state = STREAM_CLOSED;
6011 return FAILURE;
6012 }
6013
closeStream()6014 void RtApiDs :: closeStream()
6015 {
6016 if ( stream_.state == STREAM_CLOSED ) {
6017 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6018 error( RtAudioError::WARNING );
6019 return;
6020 }
6021
6022 // Stop the callback thread.
6023 stream_.callbackInfo.isRunning = false;
6024 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6025 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6026
6027 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6028 if ( handle ) {
6029 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6030 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6031 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6032 if ( buffer ) {
6033 buffer->Stop();
6034 buffer->Release();
6035 }
6036 object->Release();
6037 }
6038 if ( handle->buffer[1] ) {
6039 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6040 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6041 if ( buffer ) {
6042 buffer->Stop();
6043 buffer->Release();
6044 }
6045 object->Release();
6046 }
6047 CloseHandle( handle->condition );
6048 delete handle;
6049 stream_.apiHandle = 0;
6050 }
6051
6052 for ( int i=0; i<2; i++ ) {
6053 if ( stream_.userBuffer[i] ) {
6054 free( stream_.userBuffer[i] );
6055 stream_.userBuffer[i] = 0;
6056 }
6057 }
6058
6059 if ( stream_.deviceBuffer ) {
6060 free( stream_.deviceBuffer );
6061 stream_.deviceBuffer = 0;
6062 }
6063
6064 stream_.mode = UNINITIALIZED;
6065 stream_.state = STREAM_CLOSED;
6066 }
6067
startStream()6068 void RtApiDs :: startStream()
6069 {
6070 verifyStream();
6071 if ( stream_.state == STREAM_RUNNING ) {
6072 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6073 error( RtAudioError::WARNING );
6074 return;
6075 }
6076
6077 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6078
6079 // Increase scheduler frequency on lesser windows (a side-effect of
6080 // increasing timer accuracy). On greater windows (Win2K or later),
6081 // this is already in effect.
6082 timeBeginPeriod( 1 );
6083
6084 buffersRolling = false;
6085 duplexPrerollBytes = 0;
6086
6087 if ( stream_.mode == DUPLEX ) {
6088 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6089 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6090 }
6091
6092 HRESULT result = 0;
6093 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6094
6095 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6096 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6097 if ( FAILED( result ) ) {
6098 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6099 errorText_ = errorStream_.str();
6100 goto unlock;
6101 }
6102 }
6103
6104 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6105
6106 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6107 result = buffer->Start( DSCBSTART_LOOPING );
6108 if ( FAILED( result ) ) {
6109 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6110 errorText_ = errorStream_.str();
6111 goto unlock;
6112 }
6113 }
6114
6115 handle->drainCounter = 0;
6116 handle->internalDrain = false;
6117 ResetEvent( handle->condition );
6118 stream_.state = STREAM_RUNNING;
6119
6120 unlock:
6121 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6122 }
6123
stopStream()6124 void RtApiDs :: stopStream()
6125 {
6126 verifyStream();
6127 if ( stream_.state == STREAM_STOPPED ) {
6128 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6129 error( RtAudioError::WARNING );
6130 return;
6131 }
6132
6133 HRESULT result = 0;
6134 LPVOID audioPtr;
6135 DWORD dataLen;
6136 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6137 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6138 if ( handle->drainCounter == 0 ) {
6139 handle->drainCounter = 2;
6140 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6141 }
6142
6143 stream_.state = STREAM_STOPPED;
6144
6145 MUTEX_LOCK( &stream_.mutex );
6146
6147 // Stop the buffer and clear memory
6148 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6149 result = buffer->Stop();
6150 if ( FAILED( result ) ) {
6151 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6152 errorText_ = errorStream_.str();
6153 goto unlock;
6154 }
6155
6156 // Lock the buffer and clear it so that if we start to play again,
6157 // we won't have old data playing.
6158 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6159 if ( FAILED( result ) ) {
6160 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6161 errorText_ = errorStream_.str();
6162 goto unlock;
6163 }
6164
6165 // Zero the DS buffer
6166 ZeroMemory( audioPtr, dataLen );
6167
6168 // Unlock the DS buffer
6169 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6170 if ( FAILED( result ) ) {
6171 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6172 errorText_ = errorStream_.str();
6173 goto unlock;
6174 }
6175
6176 // If we start playing again, we must begin at beginning of buffer.
6177 handle->bufferPointer[0] = 0;
6178 }
6179
6180 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6181 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6182 audioPtr = NULL;
6183 dataLen = 0;
6184
6185 stream_.state = STREAM_STOPPED;
6186
6187 if ( stream_.mode != DUPLEX )
6188 MUTEX_LOCK( &stream_.mutex );
6189
6190 result = buffer->Stop();
6191 if ( FAILED( result ) ) {
6192 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6193 errorText_ = errorStream_.str();
6194 goto unlock;
6195 }
6196
6197 // Lock the buffer and clear it so that if we start to play again,
6198 // we won't have old data playing.
6199 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6200 if ( FAILED( result ) ) {
6201 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6202 errorText_ = errorStream_.str();
6203 goto unlock;
6204 }
6205
6206 // Zero the DS buffer
6207 ZeroMemory( audioPtr, dataLen );
6208
6209 // Unlock the DS buffer
6210 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6211 if ( FAILED( result ) ) {
6212 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6213 errorText_ = errorStream_.str();
6214 goto unlock;
6215 }
6216
6217 // If we start recording again, we must begin at beginning of buffer.
6218 handle->bufferPointer[1] = 0;
6219 }
6220
6221 unlock:
6222 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6223 MUTEX_UNLOCK( &stream_.mutex );
6224
6225 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6226 }
6227
abortStream()6228 void RtApiDs :: abortStream()
6229 {
6230 verifyStream();
6231 if ( stream_.state == STREAM_STOPPED ) {
6232 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6233 error( RtAudioError::WARNING );
6234 return;
6235 }
6236
6237 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6238 handle->drainCounter = 2;
6239
6240 stopStream();
6241 }
6242
callbackEvent()6243 void RtApiDs :: callbackEvent()
6244 {
6245 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6246 Sleep( 50 ); // sleep 50 milliseconds
6247 return;
6248 }
6249
6250 if ( stream_.state == STREAM_CLOSED ) {
6251 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6252 error( RtAudioError::WARNING );
6253 return;
6254 }
6255
6256 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6257 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6258
6259 // Check if we were draining the stream and signal is finished.
6260 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6261
6262 stream_.state = STREAM_STOPPING;
6263 if ( handle->internalDrain == false )
6264 SetEvent( handle->condition );
6265 else
6266 stopStream();
6267 return;
6268 }
6269
6270 // Invoke user callback to get fresh output data UNLESS we are
6271 // draining stream.
6272 if ( handle->drainCounter == 0 ) {
6273 RtAudioCallback callback = (RtAudioCallback) info->callback;
6274 double streamTime = getStreamTime();
6275 RtAudioStreamStatus status = 0;
6276 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6277 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6278 handle->xrun[0] = false;
6279 }
6280 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6281 status |= RTAUDIO_INPUT_OVERFLOW;
6282 handle->xrun[1] = false;
6283 }
6284 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6285 stream_.bufferSize, streamTime, status, info->userData );
6286 if ( cbReturnValue == 2 ) {
6287 stream_.state = STREAM_STOPPING;
6288 handle->drainCounter = 2;
6289 abortStream();
6290 return;
6291 }
6292 else if ( cbReturnValue == 1 ) {
6293 handle->drainCounter = 1;
6294 handle->internalDrain = true;
6295 }
6296 }
6297
6298 HRESULT result;
6299 DWORD currentWritePointer, safeWritePointer;
6300 DWORD currentReadPointer, safeReadPointer;
6301 UINT nextWritePointer;
6302
6303 LPVOID buffer1 = NULL;
6304 LPVOID buffer2 = NULL;
6305 DWORD bufferSize1 = 0;
6306 DWORD bufferSize2 = 0;
6307
6308 char *buffer;
6309 long bufferBytes;
6310
6311 MUTEX_LOCK( &stream_.mutex );
6312 if ( stream_.state == STREAM_STOPPED ) {
6313 MUTEX_UNLOCK( &stream_.mutex );
6314 return;
6315 }
6316
6317 if ( buffersRolling == false ) {
6318 if ( stream_.mode == DUPLEX ) {
6319 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6320
6321 // It takes a while for the devices to get rolling. As a result,
6322 // there's no guarantee that the capture and write device pointers
6323 // will move in lockstep. Wait here for both devices to start
6324 // rolling, and then set our buffer pointers accordingly.
6325 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6326 // bytes later than the write buffer.
6327
6328 // Stub: a serious risk of having a pre-emptive scheduling round
6329 // take place between the two GetCurrentPosition calls... but I'm
6330 // really not sure how to solve the problem. Temporarily boost to
6331 // Realtime priority, maybe; but I'm not sure what priority the
6332 // DirectSound service threads run at. We *should* be roughly
6333 // within a ms or so of correct.
6334
6335 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6336 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6337
6338 DWORD startSafeWritePointer, startSafeReadPointer;
6339
6340 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6341 if ( FAILED( result ) ) {
6342 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6343 errorText_ = errorStream_.str();
6344 MUTEX_UNLOCK( &stream_.mutex );
6345 error( RtAudioError::SYSTEM_ERROR );
6346 return;
6347 }
6348 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6349 if ( FAILED( result ) ) {
6350 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6351 errorText_ = errorStream_.str();
6352 MUTEX_UNLOCK( &stream_.mutex );
6353 error( RtAudioError::SYSTEM_ERROR );
6354 return;
6355 }
6356 while ( true ) {
6357 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6358 if ( FAILED( result ) ) {
6359 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6360 errorText_ = errorStream_.str();
6361 MUTEX_UNLOCK( &stream_.mutex );
6362 error( RtAudioError::SYSTEM_ERROR );
6363 return;
6364 }
6365 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6366 if ( FAILED( result ) ) {
6367 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6368 errorText_ = errorStream_.str();
6369 MUTEX_UNLOCK( &stream_.mutex );
6370 error( RtAudioError::SYSTEM_ERROR );
6371 return;
6372 }
6373 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6374 Sleep( 1 );
6375 }
6376
6377 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6378
6379 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6380 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6381 handle->bufferPointer[1] = safeReadPointer;
6382 }
6383 else if ( stream_.mode == OUTPUT ) {
6384
6385 // Set the proper nextWritePosition after initial startup.
6386 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6387 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6388 if ( FAILED( result ) ) {
6389 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6390 errorText_ = errorStream_.str();
6391 MUTEX_UNLOCK( &stream_.mutex );
6392 error( RtAudioError::SYSTEM_ERROR );
6393 return;
6394 }
6395 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6396 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6397 }
6398
6399 buffersRolling = true;
6400 }
6401
6402 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6403
6404 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6405
6406 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6407 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6408 bufferBytes *= formatBytes( stream_.userFormat );
6409 memset( stream_.userBuffer[0], 0, bufferBytes );
6410 }
6411
6412 // Setup parameters and do buffer conversion if necessary.
6413 if ( stream_.doConvertBuffer[0] ) {
6414 buffer = stream_.deviceBuffer;
6415 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6416 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6417 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6418 }
6419 else {
6420 buffer = stream_.userBuffer[0];
6421 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6422 bufferBytes *= formatBytes( stream_.userFormat );
6423 }
6424
6425 // No byte swapping necessary in DirectSound implementation.
6426
6427 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6428 // unsigned. So, we need to convert our signed 8-bit data here to
6429 // unsigned.
6430 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6431 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6432
6433 DWORD dsBufferSize = handle->dsBufferSize[0];
6434 nextWritePointer = handle->bufferPointer[0];
6435
6436 DWORD endWrite, leadPointer;
6437 while ( true ) {
6438 // Find out where the read and "safe write" pointers are.
6439 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6440 if ( FAILED( result ) ) {
6441 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6442 errorText_ = errorStream_.str();
6443 MUTEX_UNLOCK( &stream_.mutex );
6444 error( RtAudioError::SYSTEM_ERROR );
6445 return;
6446 }
6447
6448 // We will copy our output buffer into the region between
6449 // safeWritePointer and leadPointer. If leadPointer is not
6450 // beyond the next endWrite position, wait until it is.
6451 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6452 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6453 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6454 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6455 endWrite = nextWritePointer + bufferBytes;
6456
6457 // Check whether the entire write region is behind the play pointer.
6458 if ( leadPointer >= endWrite ) break;
6459
6460 // If we are here, then we must wait until the leadPointer advances
6461 // beyond the end of our next write region. We use the
6462 // Sleep() function to suspend operation until that happens.
6463 double millis = ( endWrite - leadPointer ) * 1000.0;
6464 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6465 if ( millis < 1.0 ) millis = 1.0;
6466 Sleep( (DWORD) millis );
6467 }
6468
6469 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6470 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6471 // We've strayed into the forbidden zone ... resync the read pointer.
6472 handle->xrun[0] = true;
6473 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6474 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6475 handle->bufferPointer[0] = nextWritePointer;
6476 endWrite = nextWritePointer + bufferBytes;
6477 }
6478
6479 // Lock free space in the buffer
6480 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6481 &bufferSize1, &buffer2, &bufferSize2, 0 );
6482 if ( FAILED( result ) ) {
6483 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6484 errorText_ = errorStream_.str();
6485 MUTEX_UNLOCK( &stream_.mutex );
6486 error( RtAudioError::SYSTEM_ERROR );
6487 return;
6488 }
6489
6490 // Copy our buffer into the DS buffer
6491 CopyMemory( buffer1, buffer, bufferSize1 );
6492 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6493
6494 // Update our buffer offset and unlock sound buffer
6495 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6496 if ( FAILED( result ) ) {
6497 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6498 errorText_ = errorStream_.str();
6499 MUTEX_UNLOCK( &stream_.mutex );
6500 error( RtAudioError::SYSTEM_ERROR );
6501 return;
6502 }
6503 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6504 handle->bufferPointer[0] = nextWritePointer;
6505 }
6506
6507 // Don't bother draining input
6508 if ( handle->drainCounter ) {
6509 handle->drainCounter++;
6510 goto unlock;
6511 }
6512
6513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6514
6515 // Setup parameters.
6516 if ( stream_.doConvertBuffer[1] ) {
6517 buffer = stream_.deviceBuffer;
6518 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6519 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6520 }
6521 else {
6522 buffer = stream_.userBuffer[1];
6523 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6524 bufferBytes *= formatBytes( stream_.userFormat );
6525 }
6526
6527 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6528 long nextReadPointer = handle->bufferPointer[1];
6529 DWORD dsBufferSize = handle->dsBufferSize[1];
6530
6531 // Find out where the write and "safe read" pointers are.
6532 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6533 if ( FAILED( result ) ) {
6534 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6535 errorText_ = errorStream_.str();
6536 MUTEX_UNLOCK( &stream_.mutex );
6537 error( RtAudioError::SYSTEM_ERROR );
6538 return;
6539 }
6540
6541 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6542 DWORD endRead = nextReadPointer + bufferBytes;
6543
6544 // Handling depends on whether we are INPUT or DUPLEX.
6545 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6546 // then a wait here will drag the write pointers into the forbidden zone.
6547 //
6548 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6549 // it's in a safe position. This causes dropouts, but it seems to be the only
6550 // practical way to sync up the read and write pointers reliably, given the
6551 // the very complex relationship between phase and increment of the read and write
6552 // pointers.
6553 //
6554 // In order to minimize audible dropouts in DUPLEX mode, we will
6555 // provide a pre-roll period of 0.5 seconds in which we return
6556 // zeros from the read buffer while the pointers sync up.
6557
6558 if ( stream_.mode == DUPLEX ) {
6559 if ( safeReadPointer < endRead ) {
6560 if ( duplexPrerollBytes <= 0 ) {
6561 // Pre-roll time over. Be more agressive.
6562 int adjustment = endRead-safeReadPointer;
6563
6564 handle->xrun[1] = true;
6565 // Two cases:
6566 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6567 // and perform fine adjustments later.
6568 // - small adjustments: back off by twice as much.
6569 if ( adjustment >= 2*bufferBytes )
6570 nextReadPointer = safeReadPointer-2*bufferBytes;
6571 else
6572 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6573
6574 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6575
6576 }
6577 else {
6578 // In pre=roll time. Just do it.
6579 nextReadPointer = safeReadPointer - bufferBytes;
6580 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6581 }
6582 endRead = nextReadPointer + bufferBytes;
6583 }
6584 }
6585 else { // mode == INPUT
6586 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6587 // See comments for playback.
6588 double millis = (endRead - safeReadPointer) * 1000.0;
6589 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6590 if ( millis < 1.0 ) millis = 1.0;
6591 Sleep( (DWORD) millis );
6592
6593 // Wake up and find out where we are now.
6594 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6595 if ( FAILED( result ) ) {
6596 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6597 errorText_ = errorStream_.str();
6598 MUTEX_UNLOCK( &stream_.mutex );
6599 error( RtAudioError::SYSTEM_ERROR );
6600 return;
6601 }
6602
6603 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6604 }
6605 }
6606
6607 // Lock free space in the buffer
6608 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6609 &bufferSize1, &buffer2, &bufferSize2, 0 );
6610 if ( FAILED( result ) ) {
6611 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6612 errorText_ = errorStream_.str();
6613 MUTEX_UNLOCK( &stream_.mutex );
6614 error( RtAudioError::SYSTEM_ERROR );
6615 return;
6616 }
6617
6618 if ( duplexPrerollBytes <= 0 ) {
6619 // Copy our buffer into the DS buffer
6620 CopyMemory( buffer, buffer1, bufferSize1 );
6621 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6622 }
6623 else {
6624 memset( buffer, 0, bufferSize1 );
6625 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6626 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6627 }
6628
6629 // Update our buffer offset and unlock sound buffer
6630 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6631 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6632 if ( FAILED( result ) ) {
6633 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6634 errorText_ = errorStream_.str();
6635 MUTEX_UNLOCK( &stream_.mutex );
6636 error( RtAudioError::SYSTEM_ERROR );
6637 return;
6638 }
6639 handle->bufferPointer[1] = nextReadPointer;
6640
6641 // No byte swapping necessary in DirectSound implementation.
6642
6643 // If necessary, convert 8-bit data from unsigned to signed.
6644 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6645 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6646
6647 // Do buffer conversion if necessary.
6648 if ( stream_.doConvertBuffer[1] )
6649 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6650 }
6651
6652 unlock:
6653 MUTEX_UNLOCK( &stream_.mutex );
6654 RtApi::tickStreamTime();
6655 }
6656
6657 // Definitions for utility functions and callbacks
6658 // specific to the DirectSound implementation.
6659
callbackHandler(void * ptr)6660 static unsigned __stdcall callbackHandler( void *ptr )
6661 {
6662 CallbackInfo *info = (CallbackInfo *) ptr;
6663 RtApiDs *object = (RtApiDs *) info->object;
6664 bool* isRunning = &info->isRunning;
6665
6666 while ( *isRunning == true ) {
6667 object->callbackEvent();
6668 }
6669
6670 _endthreadex( 0 );
6671 return 0;
6672 }
6673
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6674 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6675 LPCTSTR description,
6676 LPCTSTR /*module*/,
6677 LPVOID lpContext )
6678 {
6679 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6680 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6681
6682 HRESULT hr;
6683 bool validDevice = false;
6684 if ( probeInfo.isInput == true ) {
6685 DSCCAPS caps;
6686 LPDIRECTSOUNDCAPTURE object;
6687
6688 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6689 if ( hr != DS_OK ) return TRUE;
6690
6691 caps.dwSize = sizeof(caps);
6692 hr = object->GetCaps( &caps );
6693 if ( hr == DS_OK ) {
6694 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6695 validDevice = true;
6696 }
6697 object->Release();
6698 }
6699 else {
6700 DSCAPS caps;
6701 LPDIRECTSOUND object;
6702 hr = DirectSoundCreate( lpguid, &object, NULL );
6703 if ( hr != DS_OK ) return TRUE;
6704
6705 caps.dwSize = sizeof(caps);
6706 hr = object->GetCaps( &caps );
6707 if ( hr == DS_OK ) {
6708 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6709 validDevice = true;
6710 }
6711 object->Release();
6712 }
6713
6714 // If good device, then save its name and guid.
6715 std::string name = convertCharPointerToStdString( description );
6716 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6717 if ( lpguid == NULL )
6718 name = "Default Device";
6719 if ( validDevice ) {
6720 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6721 if ( dsDevices[i].name == name ) {
6722 dsDevices[i].found = true;
6723 if ( probeInfo.isInput ) {
6724 dsDevices[i].id[1] = lpguid;
6725 dsDevices[i].validId[1] = true;
6726 }
6727 else {
6728 dsDevices[i].id[0] = lpguid;
6729 dsDevices[i].validId[0] = true;
6730 }
6731 return TRUE;
6732 }
6733 }
6734
6735 DsDevice device;
6736 device.name = name;
6737 device.found = true;
6738 if ( probeInfo.isInput ) {
6739 device.id[1] = lpguid;
6740 device.validId[1] = true;
6741 }
6742 else {
6743 device.id[0] = lpguid;
6744 device.validId[0] = true;
6745 }
6746 dsDevices.push_back( device );
6747 }
6748
6749 return TRUE;
6750 }
6751
getErrorString(int code)6752 static const char* getErrorString( int code )
6753 {
6754 switch ( code ) {
6755
6756 case DSERR_ALLOCATED:
6757 return "Already allocated";
6758
6759 case DSERR_CONTROLUNAVAIL:
6760 return "Control unavailable";
6761
6762 case DSERR_INVALIDPARAM:
6763 return "Invalid parameter";
6764
6765 case DSERR_INVALIDCALL:
6766 return "Invalid call";
6767
6768 case DSERR_GENERIC:
6769 return "Generic error";
6770
6771 case DSERR_PRIOLEVELNEEDED:
6772 return "Priority level needed";
6773
6774 case DSERR_OUTOFMEMORY:
6775 return "Out of memory";
6776
6777 case DSERR_BADFORMAT:
6778 return "The sample rate or the channel format is not supported";
6779
6780 case DSERR_UNSUPPORTED:
6781 return "Not supported";
6782
6783 case DSERR_NODRIVER:
6784 return "No driver";
6785
6786 case DSERR_ALREADYINITIALIZED:
6787 return "Already initialized";
6788
6789 case DSERR_NOAGGREGATION:
6790 return "No aggregation";
6791
6792 case DSERR_BUFFERLOST:
6793 return "Buffer lost";
6794
6795 case DSERR_OTHERAPPHASPRIO:
6796 return "Another application already has priority";
6797
6798 case DSERR_UNINITIALIZED:
6799 return "Uninitialized";
6800
6801 default:
6802 return "DirectSound unknown error";
6803 }
6804 }
6805 //******************** End of __WINDOWS_DS__ *********************//
6806 #endif
6807
6808
6809 #if defined(__LINUX_ALSA__)
6810
6811 #include <alsa/asoundlib.h>
6812 #include <unistd.h>
6813
6814 // A structure to hold various information related to the ALSA API
6815 // implementation.
6816 struct AlsaHandle {
6817 snd_pcm_t *handles[2];
6818 bool synchronized;
6819 bool xrun[2];
6820 pthread_cond_t runnable_cv;
6821 bool runnable;
6822
AlsaHandleAlsaHandle6823 AlsaHandle()
6824 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6825 };
6826
6827 static void *alsaCallbackHandler( void * ptr );
6828
RtApiAlsa()6829 RtApiAlsa :: RtApiAlsa()
6830 {
6831 // Nothing to do here.
6832 }
6833
~RtApiAlsa()6834 RtApiAlsa :: ~RtApiAlsa()
6835 {
6836 if ( stream_.state != STREAM_CLOSED ) closeStream();
6837 }
6838
getDeviceCount(void)6839 unsigned int RtApiAlsa :: getDeviceCount( void )
6840 {
6841 unsigned nDevices = 0;
6842 int result, subdevice, card;
6843 char name[64];
6844 snd_ctl_t *handle;
6845
6846 // Count cards and devices
6847 card = -1;
6848 snd_card_next( &card );
6849 while ( card >= 0 ) {
6850 sprintf( name, "hw:%d", card );
6851 result = snd_ctl_open( &handle, name, 0 );
6852 if ( result < 0 ) {
6853 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6854 errorText_ = errorStream_.str();
6855 error( RtAudioError::WARNING );
6856 goto nextcard;
6857 }
6858 subdevice = -1;
6859 while( 1 ) {
6860 result = snd_ctl_pcm_next_device( handle, &subdevice );
6861 if ( result < 0 ) {
6862 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6863 errorText_ = errorStream_.str();
6864 error( RtAudioError::WARNING );
6865 break;
6866 }
6867 if ( subdevice < 0 )
6868 break;
6869 nDevices++;
6870 }
6871 nextcard:
6872 snd_ctl_close( handle );
6873 snd_card_next( &card );
6874 }
6875
6876 result = snd_ctl_open( &handle, "default", 0 );
6877 if (result == 0) {
6878 nDevices++;
6879 snd_ctl_close( handle );
6880 }
6881
6882 return nDevices;
6883 }
6884
getDeviceInfo(unsigned int device)6885 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6886 {
6887 RtAudio::DeviceInfo info;
6888 info.probed = false;
6889
6890 unsigned nDevices = 0;
6891 int result, subdevice, card;
6892 char name[64];
6893 snd_ctl_t *chandle;
6894
6895 // Count cards and devices
6896 card = -1;
6897 subdevice = -1;
6898 snd_card_next( &card );
6899 while ( card >= 0 ) {
6900 sprintf( name, "hw:%d", card );
6901 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6902 if ( result < 0 ) {
6903 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6904 errorText_ = errorStream_.str();
6905 error( RtAudioError::WARNING );
6906 goto nextcard;
6907 }
6908 subdevice = -1;
6909 while( 1 ) {
6910 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6911 if ( result < 0 ) {
6912 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6913 errorText_ = errorStream_.str();
6914 error( RtAudioError::WARNING );
6915 break;
6916 }
6917 if ( subdevice < 0 ) break;
6918 if ( nDevices == device ) {
6919 sprintf( name, "hw:%d,%d", card, subdevice );
6920 goto foundDevice;
6921 }
6922 nDevices++;
6923 }
6924 nextcard:
6925 snd_ctl_close( chandle );
6926 snd_card_next( &card );
6927 }
6928
6929 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6930 if ( result == 0 ) {
6931 if ( nDevices == device ) {
6932 strcpy( name, "default" );
6933 goto foundDevice;
6934 }
6935 nDevices++;
6936 }
6937
6938 if ( nDevices == 0 ) {
6939 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6940 error( RtAudioError::INVALID_USE );
6941 return info;
6942 }
6943
6944 if ( device >= nDevices ) {
6945 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6946 error( RtAudioError::INVALID_USE );
6947 return info;
6948 }
6949
6950 foundDevice:
6951
6952 // If a stream is already open, we cannot probe the stream devices.
6953 // Thus, use the saved results.
6954 if ( stream_.state != STREAM_CLOSED &&
6955 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6956 snd_ctl_close( chandle );
6957 if ( device >= devices_.size() ) {
6958 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6959 error( RtAudioError::WARNING );
6960 return info;
6961 }
6962 return devices_[ device ];
6963 }
6964
6965 int openMode = SND_PCM_ASYNC;
6966 snd_pcm_stream_t stream;
6967 snd_pcm_info_t *pcminfo;
6968 snd_pcm_info_alloca( &pcminfo );
6969 snd_pcm_t *phandle;
6970 snd_pcm_hw_params_t *params;
6971 snd_pcm_hw_params_alloca( ¶ms );
6972
6973 // First try for playback unless default device (which has subdev -1)
6974 stream = SND_PCM_STREAM_PLAYBACK;
6975 snd_pcm_info_set_stream( pcminfo, stream );
6976 if ( subdevice != -1 ) {
6977 snd_pcm_info_set_device( pcminfo, subdevice );
6978 snd_pcm_info_set_subdevice( pcminfo, 0 );
6979
6980 result = snd_ctl_pcm_info( chandle, pcminfo );
6981 if ( result < 0 ) {
6982 // Device probably doesn't support playback.
6983 goto captureProbe;
6984 }
6985 }
6986
6987 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6988 if ( result < 0 ) {
6989 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6990 errorText_ = errorStream_.str();
6991 error( RtAudioError::WARNING );
6992 goto captureProbe;
6993 }
6994
6995 // The device is open ... fill the parameter structure.
6996 result = snd_pcm_hw_params_any( phandle, params );
6997 if ( result < 0 ) {
6998 snd_pcm_close( phandle );
6999 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7000 errorText_ = errorStream_.str();
7001 error( RtAudioError::WARNING );
7002 goto captureProbe;
7003 }
7004
7005 // Get output channel information.
7006 unsigned int value;
7007 result = snd_pcm_hw_params_get_channels_max( params, &value );
7008 if ( result < 0 ) {
7009 snd_pcm_close( phandle );
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7011 errorText_ = errorStream_.str();
7012 error( RtAudioError::WARNING );
7013 goto captureProbe;
7014 }
7015 info.outputChannels = value;
7016 snd_pcm_close( phandle );
7017
7018 captureProbe:
7019 stream = SND_PCM_STREAM_CAPTURE;
7020 snd_pcm_info_set_stream( pcminfo, stream );
7021
7022 // Now try for capture unless default device (with subdev = -1)
7023 if ( subdevice != -1 ) {
7024 result = snd_ctl_pcm_info( chandle, pcminfo );
7025 snd_ctl_close( chandle );
7026 if ( result < 0 ) {
7027 // Device probably doesn't support capture.
7028 if ( info.outputChannels == 0 ) return info;
7029 goto probeParameters;
7030 }
7031 }
7032 else
7033 snd_ctl_close( chandle );
7034
7035 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7036 if ( result < 0 ) {
7037 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7038 errorText_ = errorStream_.str();
7039 error( RtAudioError::WARNING );
7040 if ( info.outputChannels == 0 ) return info;
7041 goto probeParameters;
7042 }
7043
7044 // The device is open ... fill the parameter structure.
7045 result = snd_pcm_hw_params_any( phandle, params );
7046 if ( result < 0 ) {
7047 snd_pcm_close( phandle );
7048 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7049 errorText_ = errorStream_.str();
7050 error( RtAudioError::WARNING );
7051 if ( info.outputChannels == 0 ) return info;
7052 goto probeParameters;
7053 }
7054
7055 result = snd_pcm_hw_params_get_channels_max( params, &value );
7056 if ( result < 0 ) {
7057 snd_pcm_close( phandle );
7058 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7059 errorText_ = errorStream_.str();
7060 error( RtAudioError::WARNING );
7061 if ( info.outputChannels == 0 ) return info;
7062 goto probeParameters;
7063 }
7064 info.inputChannels = value;
7065 snd_pcm_close( phandle );
7066
7067 // If device opens for both playback and capture, we determine the channels.
7068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7070
7071 // ALSA doesn't provide default devices so we'll use the first available one.
7072 if ( device == 0 && info.outputChannels > 0 )
7073 info.isDefaultOutput = true;
7074 if ( device == 0 && info.inputChannels > 0 )
7075 info.isDefaultInput = true;
7076
7077 probeParameters:
7078 // At this point, we just need to figure out the supported data
7079 // formats and sample rates. We'll proceed by opening the device in
7080 // the direction with the maximum number of channels, or playback if
7081 // they are equal. This might limit our sample rate options, but so
7082 // be it.
7083
7084 if ( info.outputChannels >= info.inputChannels )
7085 stream = SND_PCM_STREAM_PLAYBACK;
7086 else
7087 stream = SND_PCM_STREAM_CAPTURE;
7088 snd_pcm_info_set_stream( pcminfo, stream );
7089
7090 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7091 if ( result < 0 ) {
7092 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7093 errorText_ = errorStream_.str();
7094 error( RtAudioError::WARNING );
7095 return info;
7096 }
7097
7098 // The device is open ... fill the parameter structure.
7099 result = snd_pcm_hw_params_any( phandle, params );
7100 if ( result < 0 ) {
7101 snd_pcm_close( phandle );
7102 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7103 errorText_ = errorStream_.str();
7104 error( RtAudioError::WARNING );
7105 return info;
7106 }
7107
7108 // Test our discrete set of sample rate values.
7109 info.sampleRates.clear();
7110 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7111 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7112 info.sampleRates.push_back( SAMPLE_RATES[i] );
7113
7114 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7115 info.preferredSampleRate = SAMPLE_RATES[i];
7116 }
7117 }
7118 if ( info.sampleRates.size() == 0 ) {
7119 snd_pcm_close( phandle );
7120 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7121 errorText_ = errorStream_.str();
7122 error( RtAudioError::WARNING );
7123 return info;
7124 }
7125
7126 // Probe the supported data formats ... we don't care about endian-ness just yet
7127 snd_pcm_format_t format;
7128 info.nativeFormats = 0;
7129 format = SND_PCM_FORMAT_S8;
7130 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7131 info.nativeFormats |= RTAUDIO_SINT8;
7132 format = SND_PCM_FORMAT_S16;
7133 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7134 info.nativeFormats |= RTAUDIO_SINT16;
7135 format = SND_PCM_FORMAT_S24;
7136 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7137 info.nativeFormats |= RTAUDIO_SINT24;
7138 format = SND_PCM_FORMAT_S32;
7139 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7140 info.nativeFormats |= RTAUDIO_SINT32;
7141 format = SND_PCM_FORMAT_FLOAT;
7142 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7143 info.nativeFormats |= RTAUDIO_FLOAT32;
7144 format = SND_PCM_FORMAT_FLOAT64;
7145 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7146 info.nativeFormats |= RTAUDIO_FLOAT64;
7147
7148 // Check that we have at least one supported format
7149 if ( info.nativeFormats == 0 ) {
7150 snd_pcm_close( phandle );
7151 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7152 errorText_ = errorStream_.str();
7153 error( RtAudioError::WARNING );
7154 return info;
7155 }
7156
7157 // Get the device name
7158 char *cardname;
7159 result = snd_card_get_name( card, &cardname );
7160 if ( result >= 0 ) {
7161 sprintf( name, "hw:%s,%d", cardname, subdevice );
7162 free( cardname );
7163 }
7164 info.name = name;
7165
7166 // That's all ... close the device and return
7167 snd_pcm_close( phandle );
7168 info.probed = true;
7169 return info;
7170 }
7171
saveDeviceInfo(void)7172 void RtApiAlsa :: saveDeviceInfo( void )
7173 {
7174 devices_.clear();
7175
7176 unsigned int nDevices = getDeviceCount();
7177 devices_.resize( nDevices );
7178 for ( unsigned int i=0; i<nDevices; i++ )
7179 devices_[i] = getDeviceInfo( i );
7180 }
7181
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7182 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7183 unsigned int firstChannel, unsigned int sampleRate,
7184 RtAudioFormat format, unsigned int *bufferSize,
7185 RtAudio::StreamOptions *options )
7186
7187 {
7188 #if defined(__RTAUDIO_DEBUG__)
7189 snd_output_t *out;
7190 snd_output_stdio_attach(&out, stderr, 0);
7191 #endif
7192
7193 // I'm not using the "plug" interface ... too much inconsistent behavior.
7194
7195 unsigned nDevices = 0;
7196 int result, subdevice, card;
7197 char name[64];
7198 snd_ctl_t *chandle;
7199
7200 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7201 snprintf(name, sizeof(name), "%s", "default");
7202 else {
7203 // Count cards and devices
7204 card = -1;
7205 snd_card_next( &card );
7206 while ( card >= 0 ) {
7207 sprintf( name, "hw:%d", card );
7208 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7209 if ( result < 0 ) {
7210 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7211 errorText_ = errorStream_.str();
7212 return FAILURE;
7213 }
7214 subdevice = -1;
7215 while( 1 ) {
7216 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7217 if ( result < 0 ) break;
7218 if ( subdevice < 0 ) break;
7219 if ( nDevices == device ) {
7220 sprintf( name, "hw:%d,%d", card, subdevice );
7221 snd_ctl_close( chandle );
7222 goto foundDevice;
7223 }
7224 nDevices++;
7225 }
7226 snd_ctl_close( chandle );
7227 snd_card_next( &card );
7228 }
7229
7230 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7231 if ( result == 0 ) {
7232 if ( nDevices == device ) {
7233 strcpy( name, "default" );
7234 goto foundDevice;
7235 }
7236 nDevices++;
7237 }
7238
7239 if ( nDevices == 0 ) {
7240 // This should not happen because a check is made before this function is called.
7241 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7242 return FAILURE;
7243 }
7244
7245 if ( device >= nDevices ) {
7246 // This should not happen because a check is made before this function is called.
7247 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7248 return FAILURE;
7249 }
7250 }
7251
7252 foundDevice:
7253
7254 // The getDeviceInfo() function will not work for a device that is
7255 // already open. Thus, we'll probe the system before opening a
7256 // stream and save the results for use by getDeviceInfo().
7257 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7258 this->saveDeviceInfo();
7259
7260 snd_pcm_stream_t stream;
7261 if ( mode == OUTPUT )
7262 stream = SND_PCM_STREAM_PLAYBACK;
7263 else
7264 stream = SND_PCM_STREAM_CAPTURE;
7265
7266 snd_pcm_t *phandle;
7267 int openMode = SND_PCM_ASYNC;
7268 result = snd_pcm_open( &phandle, name, stream, openMode );
7269 if ( result < 0 ) {
7270 if ( mode == OUTPUT )
7271 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7272 else
7273 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7274 errorText_ = errorStream_.str();
7275 return FAILURE;
7276 }
7277
7278 // Fill the parameter structure.
7279 snd_pcm_hw_params_t *hw_params;
7280 snd_pcm_hw_params_alloca( &hw_params );
7281 result = snd_pcm_hw_params_any( phandle, hw_params );
7282 if ( result < 0 ) {
7283 snd_pcm_close( phandle );
7284 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7285 errorText_ = errorStream_.str();
7286 return FAILURE;
7287 }
7288
7289 #if defined(__RTAUDIO_DEBUG__)
7290 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7291 snd_pcm_hw_params_dump( hw_params, out );
7292 #endif
7293
7294 // Set access ... check user preference.
7295 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7296 stream_.userInterleaved = false;
7297 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7298 if ( result < 0 ) {
7299 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7300 stream_.deviceInterleaved[mode] = true;
7301 }
7302 else
7303 stream_.deviceInterleaved[mode] = false;
7304 }
7305 else {
7306 stream_.userInterleaved = true;
7307 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7308 if ( result < 0 ) {
7309 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7310 stream_.deviceInterleaved[mode] = false;
7311 }
7312 else
7313 stream_.deviceInterleaved[mode] = true;
7314 }
7315
7316 if ( result < 0 ) {
7317 snd_pcm_close( phandle );
7318 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7319 errorText_ = errorStream_.str();
7320 return FAILURE;
7321 }
7322
7323 // Determine how to set the device format.
7324 stream_.userFormat = format;
7325 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7326
7327 if ( format == RTAUDIO_SINT8 )
7328 deviceFormat = SND_PCM_FORMAT_S8;
7329 else if ( format == RTAUDIO_SINT16 )
7330 deviceFormat = SND_PCM_FORMAT_S16;
7331 else if ( format == RTAUDIO_SINT24 )
7332 deviceFormat = SND_PCM_FORMAT_S24;
7333 else if ( format == RTAUDIO_SINT32 )
7334 deviceFormat = SND_PCM_FORMAT_S32;
7335 else if ( format == RTAUDIO_FLOAT32 )
7336 deviceFormat = SND_PCM_FORMAT_FLOAT;
7337 else if ( format == RTAUDIO_FLOAT64 )
7338 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7339
7340 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7341 stream_.deviceFormat[mode] = format;
7342 goto setFormat;
7343 }
7344
7345 // The user requested format is not natively supported by the device.
7346 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7347 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7348 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7349 goto setFormat;
7350 }
7351
7352 deviceFormat = SND_PCM_FORMAT_FLOAT;
7353 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7354 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7355 goto setFormat;
7356 }
7357
7358 deviceFormat = SND_PCM_FORMAT_S32;
7359 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7360 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7361 goto setFormat;
7362 }
7363
7364 deviceFormat = SND_PCM_FORMAT_S24;
7365 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7366 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7367 goto setFormat;
7368 }
7369
7370 deviceFormat = SND_PCM_FORMAT_S16;
7371 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7372 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7373 goto setFormat;
7374 }
7375
7376 deviceFormat = SND_PCM_FORMAT_S8;
7377 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7378 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7379 goto setFormat;
7380 }
7381
7382 // If we get here, no supported format was found.
7383 snd_pcm_close( phandle );
7384 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7385 errorText_ = errorStream_.str();
7386 return FAILURE;
7387
7388 setFormat:
7389 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7390 if ( result < 0 ) {
7391 snd_pcm_close( phandle );
7392 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7393 errorText_ = errorStream_.str();
7394 return FAILURE;
7395 }
7396
7397 // Determine whether byte-swaping is necessary.
7398 stream_.doByteSwap[mode] = false;
7399 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7400 result = snd_pcm_format_cpu_endian( deviceFormat );
7401 if ( result == 0 )
7402 stream_.doByteSwap[mode] = true;
7403 else if (result < 0) {
7404 snd_pcm_close( phandle );
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7407 return FAILURE;
7408 }
7409 }
7410
7411 // Set the sample rate.
7412 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7413 if ( result < 0 ) {
7414 snd_pcm_close( phandle );
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7416 errorText_ = errorStream_.str();
7417 return FAILURE;
7418 }
7419
7420 // Determine the number of channels for this device. We support a possible
7421 // minimum device channel number > than the value requested by the user.
7422 stream_.nUserChannels[mode] = channels;
7423 unsigned int value;
7424 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7425 unsigned int deviceChannels = value;
7426 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7427 snd_pcm_close( phandle );
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7429 errorText_ = errorStream_.str();
7430 return FAILURE;
7431 }
7432
7433 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7434 if ( result < 0 ) {
7435 snd_pcm_close( phandle );
7436 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7437 errorText_ = errorStream_.str();
7438 return FAILURE;
7439 }
7440 deviceChannels = value;
7441 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7442 stream_.nDeviceChannels[mode] = deviceChannels;
7443
7444 // Set the device channels.
7445 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7446 if ( result < 0 ) {
7447 snd_pcm_close( phandle );
7448 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7449 errorText_ = errorStream_.str();
7450 return FAILURE;
7451 }
7452
7453 // Set the buffer (or period) size.
7454 int dir = 0;
7455 snd_pcm_uframes_t periodSize = *bufferSize;
7456 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7457 if ( result < 0 ) {
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7460 errorText_ = errorStream_.str();
7461 return FAILURE;
7462 }
7463 *bufferSize = periodSize;
7464
7465 // Set the buffer number, which in ALSA is referred to as the "period".
7466 unsigned int periods = 0;
7467 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7468 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7469 if ( periods < 2 ) periods = 4; // a fairly safe default value
7470 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7471 if ( result < 0 ) {
7472 snd_pcm_close( phandle );
7473 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7474 errorText_ = errorStream_.str();
7475 return FAILURE;
7476 }
7477
7478 // If attempting to setup a duplex stream, the bufferSize parameter
7479 // MUST be the same in both directions!
7480 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7481 snd_pcm_close( phandle );
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7483 errorText_ = errorStream_.str();
7484 return FAILURE;
7485 }
7486
7487 stream_.bufferSize = *bufferSize;
7488
7489 // Install the hardware configuration
7490 result = snd_pcm_hw_params( phandle, hw_params );
7491 if ( result < 0 ) {
7492 snd_pcm_close( phandle );
7493 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7494 errorText_ = errorStream_.str();
7495 return FAILURE;
7496 }
7497
7498 #if defined(__RTAUDIO_DEBUG__)
7499 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7500 snd_pcm_hw_params_dump( hw_params, out );
7501 #endif
7502
7503 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7504 snd_pcm_sw_params_t *sw_params = NULL;
7505 snd_pcm_sw_params_alloca( &sw_params );
7506 snd_pcm_sw_params_current( phandle, sw_params );
7507 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7508 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7509 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7510
7511 // The following two settings were suggested by Theo Veenker
7512 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7513 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7514
7515 // here are two options for a fix
7516 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7517 snd_pcm_uframes_t val;
7518 snd_pcm_sw_params_get_boundary( sw_params, &val );
7519 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7520
7521 result = snd_pcm_sw_params( phandle, sw_params );
7522 if ( result < 0 ) {
7523 snd_pcm_close( phandle );
7524 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7525 errorText_ = errorStream_.str();
7526 return FAILURE;
7527 }
7528
7529 #if defined(__RTAUDIO_DEBUG__)
7530 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7531 snd_pcm_sw_params_dump( sw_params, out );
7532 #endif
7533
7534 // Set flags for buffer conversion
7535 stream_.doConvertBuffer[mode] = false;
7536 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7537 stream_.doConvertBuffer[mode] = true;
7538 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7539 stream_.doConvertBuffer[mode] = true;
7540 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7541 stream_.nUserChannels[mode] > 1 )
7542 stream_.doConvertBuffer[mode] = true;
7543
7544 // Allocate the ApiHandle if necessary and then save.
7545 AlsaHandle *apiInfo = 0;
7546 if ( stream_.apiHandle == 0 ) {
7547 try {
7548 apiInfo = (AlsaHandle *) new AlsaHandle;
7549 }
7550 catch ( std::bad_alloc& ) {
7551 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7552 goto error;
7553 }
7554
7555 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7556 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7557 goto error;
7558 }
7559
7560 stream_.apiHandle = (void *) apiInfo;
7561 apiInfo->handles[0] = 0;
7562 apiInfo->handles[1] = 0;
7563 }
7564 else {
7565 apiInfo = (AlsaHandle *) stream_.apiHandle;
7566 }
7567 apiInfo->handles[mode] = phandle;
7568 phandle = 0;
7569
7570 // Allocate necessary internal buffers.
7571 unsigned long bufferBytes;
7572 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7573 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7574 if ( stream_.userBuffer[mode] == NULL ) {
7575 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7576 goto error;
7577 }
7578
7579 if ( stream_.doConvertBuffer[mode] ) {
7580
7581 bool makeBuffer = true;
7582 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7583 if ( mode == INPUT ) {
7584 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7585 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7586 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7587 }
7588 }
7589
7590 if ( makeBuffer ) {
7591 bufferBytes *= *bufferSize;
7592 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7593 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7594 if ( stream_.deviceBuffer == NULL ) {
7595 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7596 goto error;
7597 }
7598 }
7599 }
7600
7601 stream_.sampleRate = sampleRate;
7602 stream_.nBuffers = periods;
7603 stream_.device[mode] = device;
7604 stream_.state = STREAM_STOPPED;
7605
7606 // Setup the buffer conversion information structure.
7607 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7608
7609 // Setup thread if necessary.
7610 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7611 // We had already set up an output stream.
7612 stream_.mode = DUPLEX;
7613 // Link the streams if possible.
7614 apiInfo->synchronized = false;
7615 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7616 apiInfo->synchronized = true;
7617 else {
7618 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7619 error( RtAudioError::WARNING );
7620 }
7621 }
7622 else {
7623 stream_.mode = mode;
7624
7625 // Setup callback thread.
7626 stream_.callbackInfo.object = (void *) this;
7627
7628 // Set the thread attributes for joinable and realtime scheduling
7629 // priority (optional). The higher priority will only take affect
7630 // if the program is run as root or suid. Note, under Linux
7631 // processes with CAP_SYS_NICE privilege, a user can change
7632 // scheduling policy and priority (thus need not be root). See
7633 // POSIX "capabilities".
7634 pthread_attr_t attr;
7635 pthread_attr_init( &attr );
7636 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7637
7638 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7639 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7640 // We previously attempted to increase the audio callback priority
7641 // to SCHED_RR here via the attributes. However, while no errors
7642 // were reported in doing so, it did not work. So, now this is
7643 // done in the alsaCallbackHandler function.
7644 stream_.callbackInfo.doRealtime = true;
7645 int priority = options->priority;
7646 int min = sched_get_priority_min( SCHED_RR );
7647 int max = sched_get_priority_max( SCHED_RR );
7648 if ( priority < min ) priority = min;
7649 else if ( priority > max ) priority = max;
7650 stream_.callbackInfo.priority = priority;
7651 }
7652 #endif
7653
7654 stream_.callbackInfo.isRunning = true;
7655 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7656 pthread_attr_destroy( &attr );
7657 if ( result ) {
7658 stream_.callbackInfo.isRunning = false;
7659 errorText_ = "RtApiAlsa::error creating callback thread!";
7660 goto error;
7661 }
7662 }
7663
7664 return SUCCESS;
7665
7666 error:
7667 if ( apiInfo ) {
7668 pthread_cond_destroy( &apiInfo->runnable_cv );
7669 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7670 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7671 delete apiInfo;
7672 stream_.apiHandle = 0;
7673 }
7674
7675 if ( phandle) snd_pcm_close( phandle );
7676
7677 for ( int i=0; i<2; i++ ) {
7678 if ( stream_.userBuffer[i] ) {
7679 free( stream_.userBuffer[i] );
7680 stream_.userBuffer[i] = 0;
7681 }
7682 }
7683
7684 if ( stream_.deviceBuffer ) {
7685 free( stream_.deviceBuffer );
7686 stream_.deviceBuffer = 0;
7687 }
7688
7689 stream_.state = STREAM_CLOSED;
7690 return FAILURE;
7691 }
7692
closeStream()7693 void RtApiAlsa :: closeStream()
7694 {
7695 if ( stream_.state == STREAM_CLOSED ) {
7696 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7697 error( RtAudioError::WARNING );
7698 return;
7699 }
7700
7701 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7702 stream_.callbackInfo.isRunning = false;
7703 MUTEX_LOCK( &stream_.mutex );
7704 if ( stream_.state == STREAM_STOPPED ) {
7705 apiInfo->runnable = true;
7706 pthread_cond_signal( &apiInfo->runnable_cv );
7707 }
7708 MUTEX_UNLOCK( &stream_.mutex );
7709 pthread_join( stream_.callbackInfo.thread, NULL );
7710
7711 if ( stream_.state == STREAM_RUNNING ) {
7712 stream_.state = STREAM_STOPPED;
7713 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7714 snd_pcm_drop( apiInfo->handles[0] );
7715 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7716 snd_pcm_drop( apiInfo->handles[1] );
7717 }
7718
7719 if ( apiInfo ) {
7720 pthread_cond_destroy( &apiInfo->runnable_cv );
7721 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7722 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7723 delete apiInfo;
7724 stream_.apiHandle = 0;
7725 }
7726
7727 for ( int i=0; i<2; i++ ) {
7728 if ( stream_.userBuffer[i] ) {
7729 free( stream_.userBuffer[i] );
7730 stream_.userBuffer[i] = 0;
7731 }
7732 }
7733
7734 if ( stream_.deviceBuffer ) {
7735 free( stream_.deviceBuffer );
7736 stream_.deviceBuffer = 0;
7737 }
7738
7739 stream_.mode = UNINITIALIZED;
7740 stream_.state = STREAM_CLOSED;
7741 }
7742
startStream()7743 void RtApiAlsa :: startStream()
7744 {
7745 // This method calls snd_pcm_prepare if the device isn't already in that state.
7746
7747 verifyStream();
7748 if ( stream_.state == STREAM_RUNNING ) {
7749 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7750 error( RtAudioError::WARNING );
7751 return;
7752 }
7753
7754 MUTEX_LOCK( &stream_.mutex );
7755
7756 int result = 0;
7757 snd_pcm_state_t state;
7758 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7759 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7761 state = snd_pcm_state( handle[0] );
7762 if ( state != SND_PCM_STATE_PREPARED ) {
7763 result = snd_pcm_prepare( handle[0] );
7764 if ( result < 0 ) {
7765 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7766 errorText_ = errorStream_.str();
7767 goto unlock;
7768 }
7769 }
7770 }
7771
7772 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7773 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7774 state = snd_pcm_state( handle[1] );
7775 if ( state != SND_PCM_STATE_PREPARED ) {
7776 result = snd_pcm_prepare( handle[1] );
7777 if ( result < 0 ) {
7778 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7779 errorText_ = errorStream_.str();
7780 goto unlock;
7781 }
7782 }
7783 }
7784
7785 stream_.state = STREAM_RUNNING;
7786
7787 unlock:
7788 apiInfo->runnable = true;
7789 pthread_cond_signal( &apiInfo->runnable_cv );
7790 MUTEX_UNLOCK( &stream_.mutex );
7791
7792 if ( result >= 0 ) return;
7793 error( RtAudioError::SYSTEM_ERROR );
7794 }
7795
stopStream()7796 void RtApiAlsa :: stopStream()
7797 {
7798 verifyStream();
7799 if ( stream_.state == STREAM_STOPPED ) {
7800 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7801 error( RtAudioError::WARNING );
7802 return;
7803 }
7804
7805 stream_.state = STREAM_STOPPED;
7806 MUTEX_LOCK( &stream_.mutex );
7807
7808 int result = 0;
7809 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7810 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7811 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7812 if ( apiInfo->synchronized )
7813 result = snd_pcm_drop( handle[0] );
7814 else
7815 result = snd_pcm_drain( handle[0] );
7816 if ( result < 0 ) {
7817 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7818 errorText_ = errorStream_.str();
7819 goto unlock;
7820 }
7821 }
7822
7823 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7824 result = snd_pcm_drop( handle[1] );
7825 if ( result < 0 ) {
7826 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7827 errorText_ = errorStream_.str();
7828 goto unlock;
7829 }
7830 }
7831
7832 unlock:
7833 apiInfo->runnable = false; // fixes high CPU usage when stopped
7834 MUTEX_UNLOCK( &stream_.mutex );
7835
7836 if ( result >= 0 ) return;
7837 error( RtAudioError::SYSTEM_ERROR );
7838 }
7839
abortStream()7840 void RtApiAlsa :: abortStream()
7841 {
7842 verifyStream();
7843 if ( stream_.state == STREAM_STOPPED ) {
7844 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7845 error( RtAudioError::WARNING );
7846 return;
7847 }
7848
7849 stream_.state = STREAM_STOPPED;
7850 MUTEX_LOCK( &stream_.mutex );
7851
7852 int result = 0;
7853 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7854 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7855 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7856 result = snd_pcm_drop( handle[0] );
7857 if ( result < 0 ) {
7858 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7859 errorText_ = errorStream_.str();
7860 goto unlock;
7861 }
7862 }
7863
7864 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7865 result = snd_pcm_drop( handle[1] );
7866 if ( result < 0 ) {
7867 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7868 errorText_ = errorStream_.str();
7869 goto unlock;
7870 }
7871 }
7872
7873 unlock:
7874 apiInfo->runnable = false; // fixes high CPU usage when stopped
7875 MUTEX_UNLOCK( &stream_.mutex );
7876
7877 if ( result >= 0 ) return;
7878 error( RtAudioError::SYSTEM_ERROR );
7879 }
7880
callbackEvent()7881 void RtApiAlsa :: callbackEvent()
7882 {
7883 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7884 if ( stream_.state == STREAM_STOPPED ) {
7885 MUTEX_LOCK( &stream_.mutex );
7886 while ( !apiInfo->runnable )
7887 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7888
7889 if ( stream_.state != STREAM_RUNNING ) {
7890 MUTEX_UNLOCK( &stream_.mutex );
7891 return;
7892 }
7893 MUTEX_UNLOCK( &stream_.mutex );
7894 }
7895
7896 if ( stream_.state == STREAM_CLOSED ) {
7897 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7898 error( RtAudioError::WARNING );
7899 return;
7900 }
7901
7902 int doStopStream = 0;
7903 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7904 double streamTime = getStreamTime();
7905 RtAudioStreamStatus status = 0;
7906 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7907 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7908 apiInfo->xrun[0] = false;
7909 }
7910 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7911 status |= RTAUDIO_INPUT_OVERFLOW;
7912 apiInfo->xrun[1] = false;
7913 }
7914 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7915 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7916
7917 if ( doStopStream == 2 ) {
7918 abortStream();
7919 return;
7920 }
7921
7922 MUTEX_LOCK( &stream_.mutex );
7923
7924 // The state might change while waiting on a mutex.
7925 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7926
7927 int result;
7928 char *buffer;
7929 int channels;
7930 snd_pcm_t **handle;
7931 snd_pcm_sframes_t frames;
7932 RtAudioFormat format;
7933 handle = (snd_pcm_t **) apiInfo->handles;
7934
7935 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7936
7937 // Setup parameters.
7938 if ( stream_.doConvertBuffer[1] ) {
7939 buffer = stream_.deviceBuffer;
7940 channels = stream_.nDeviceChannels[1];
7941 format = stream_.deviceFormat[1];
7942 }
7943 else {
7944 buffer = stream_.userBuffer[1];
7945 channels = stream_.nUserChannels[1];
7946 format = stream_.userFormat;
7947 }
7948
7949 // Read samples from device in interleaved/non-interleaved format.
7950 if ( stream_.deviceInterleaved[1] )
7951 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7952 else {
7953 void *bufs[channels];
7954 size_t offset = stream_.bufferSize * formatBytes( format );
7955 for ( int i=0; i<channels; i++ )
7956 bufs[i] = (void *) (buffer + (i * offset));
7957 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7958 }
7959
7960 if ( result < (int) stream_.bufferSize ) {
7961 // Either an error or overrun occured.
7962 if ( result == -EPIPE ) {
7963 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7964 if ( state == SND_PCM_STATE_XRUN ) {
7965 apiInfo->xrun[1] = true;
7966 result = snd_pcm_prepare( handle[1] );
7967 if ( result < 0 ) {
7968 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7969 errorText_ = errorStream_.str();
7970 }
7971 }
7972 else {
7973 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7974 errorText_ = errorStream_.str();
7975 }
7976 }
7977 else {
7978 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7979 errorText_ = errorStream_.str();
7980 }
7981 error( RtAudioError::WARNING );
7982 goto tryOutput;
7983 }
7984
7985 // Do byte swapping if necessary.
7986 if ( stream_.doByteSwap[1] )
7987 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7988
7989 // Do buffer conversion if necessary.
7990 if ( stream_.doConvertBuffer[1] )
7991 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7992
7993 // Check stream latency
7994 result = snd_pcm_delay( handle[1], &frames );
7995 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7996 }
7997
7998 tryOutput:
7999
8000 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8001
8002 // Setup parameters and do buffer conversion if necessary.
8003 if ( stream_.doConvertBuffer[0] ) {
8004 buffer = stream_.deviceBuffer;
8005 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8006 channels = stream_.nDeviceChannels[0];
8007 format = stream_.deviceFormat[0];
8008 }
8009 else {
8010 buffer = stream_.userBuffer[0];
8011 channels = stream_.nUserChannels[0];
8012 format = stream_.userFormat;
8013 }
8014
8015 // Do byte swapping if necessary.
8016 if ( stream_.doByteSwap[0] )
8017 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8018
8019 // Write samples to device in interleaved/non-interleaved format.
8020 if ( stream_.deviceInterleaved[0] )
8021 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8022 else {
8023 void *bufs[channels];
8024 size_t offset = stream_.bufferSize * formatBytes( format );
8025 for ( int i=0; i<channels; i++ )
8026 bufs[i] = (void *) (buffer + (i * offset));
8027 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8028 }
8029
8030 if ( result < (int) stream_.bufferSize ) {
8031 // Either an error or underrun occured.
8032 if ( result == -EPIPE ) {
8033 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8034 if ( state == SND_PCM_STATE_XRUN ) {
8035 apiInfo->xrun[0] = true;
8036 result = snd_pcm_prepare( handle[0] );
8037 if ( result < 0 ) {
8038 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8039 errorText_ = errorStream_.str();
8040 }
8041 else
8042 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8043 }
8044 else {
8045 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8046 errorText_ = errorStream_.str();
8047 }
8048 }
8049 else {
8050 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8051 errorText_ = errorStream_.str();
8052 }
8053 error( RtAudioError::WARNING );
8054 goto unlock;
8055 }
8056
8057 // Check stream latency
8058 result = snd_pcm_delay( handle[0], &frames );
8059 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8060 }
8061
8062 unlock:
8063 MUTEX_UNLOCK( &stream_.mutex );
8064
8065 RtApi::tickStreamTime();
8066 if ( doStopStream == 1 ) this->stopStream();
8067 }
8068
alsaCallbackHandler(void * ptr)8069 static void *alsaCallbackHandler( void *ptr )
8070 {
8071 CallbackInfo *info = (CallbackInfo *) ptr;
8072 RtApiAlsa *object = (RtApiAlsa *) info->object;
8073 bool *isRunning = &info->isRunning;
8074
8075 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8076 if ( info->doRealtime ) {
8077 pthread_t tID = pthread_self(); // ID of this thread
8078 sched_param prio = { info->priority }; // scheduling priority of thread
8079 pthread_setschedparam( tID, SCHED_RR, &prio );
8080 }
8081 #endif
8082
8083 while ( *isRunning == true ) {
8084 pthread_testcancel();
8085 object->callbackEvent();
8086 }
8087
8088 pthread_exit( NULL );
8089 }
8090
8091 //******************** End of __LINUX_ALSA__ *********************//
8092 #endif
8093
8094 #if defined(__LINUX_PULSE__)
8095
8096 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8097 // and Tristan Matthews.
8098
8099 #include <pulse/error.h>
8100 #include <pulse/simple.h>
8101 #include <cstdio>
8102
8103 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8104 44100, 48000, 96000, 0};
8105
8106 struct rtaudio_pa_format_mapping_t {
8107 RtAudioFormat rtaudio_format;
8108 pa_sample_format_t pa_format;
8109 };
8110
8111 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8112 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8113 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8114 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8115 {0, PA_SAMPLE_INVALID}};
8116
8117 struct PulseAudioHandle {
8118 pa_simple *s_play;
8119 pa_simple *s_rec;
8120 pthread_t thread;
8121 pthread_cond_t runnable_cv;
8122 bool runnable;
PulseAudioHandlePulseAudioHandle8123 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8124 };
8125
~RtApiPulse()8126 RtApiPulse::~RtApiPulse()
8127 {
8128 if ( stream_.state != STREAM_CLOSED )
8129 closeStream();
8130 }
8131
getDeviceCount(void)8132 unsigned int RtApiPulse::getDeviceCount( void )
8133 {
8134 return 1;
8135 }
8136
getDeviceInfo(unsigned int)8137 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8138 {
8139 RtAudio::DeviceInfo info;
8140 info.probed = true;
8141 info.name = "PulseAudio";
8142 info.outputChannels = 2;
8143 info.inputChannels = 2;
8144 info.duplexChannels = 2;
8145 info.isDefaultOutput = true;
8146 info.isDefaultInput = true;
8147
8148 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8149 info.sampleRates.push_back( *sr );
8150
8151 info.preferredSampleRate = 48000;
8152 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8153
8154 return info;
8155 }
8156
pulseaudio_callback(void * user)8157 static void *pulseaudio_callback( void * user )
8158 {
8159 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8160 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8161 volatile bool *isRunning = &cbi->isRunning;
8162
8163 while ( *isRunning ) {
8164 pthread_testcancel();
8165 context->callbackEvent();
8166 }
8167
8168 pthread_exit( NULL );
8169 }
8170
closeStream(void)8171 void RtApiPulse::closeStream( void )
8172 {
8173 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8174
8175 stream_.callbackInfo.isRunning = false;
8176 if ( pah ) {
8177 MUTEX_LOCK( &stream_.mutex );
8178 if ( stream_.state == STREAM_STOPPED ) {
8179 pah->runnable = true;
8180 pthread_cond_signal( &pah->runnable_cv );
8181 }
8182 MUTEX_UNLOCK( &stream_.mutex );
8183
8184 pthread_join( pah->thread, 0 );
8185 if ( pah->s_play ) {
8186 pa_simple_flush( pah->s_play, NULL );
8187 pa_simple_free( pah->s_play );
8188 }
8189 if ( pah->s_rec )
8190 pa_simple_free( pah->s_rec );
8191
8192 pthread_cond_destroy( &pah->runnable_cv );
8193 delete pah;
8194 stream_.apiHandle = 0;
8195 }
8196
8197 if ( stream_.userBuffer[0] ) {
8198 free( stream_.userBuffer[0] );
8199 stream_.userBuffer[0] = 0;
8200 }
8201 if ( stream_.userBuffer[1] ) {
8202 free( stream_.userBuffer[1] );
8203 stream_.userBuffer[1] = 0;
8204 }
8205
8206 stream_.state = STREAM_CLOSED;
8207 stream_.mode = UNINITIALIZED;
8208 }
8209
callbackEvent(void)8210 void RtApiPulse::callbackEvent( void )
8211 {
8212 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8213
8214 if ( stream_.state == STREAM_STOPPED ) {
8215 MUTEX_LOCK( &stream_.mutex );
8216 while ( !pah->runnable )
8217 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8218
8219 if ( stream_.state != STREAM_RUNNING ) {
8220 MUTEX_UNLOCK( &stream_.mutex );
8221 return;
8222 }
8223 MUTEX_UNLOCK( &stream_.mutex );
8224 }
8225
8226 if ( stream_.state == STREAM_CLOSED ) {
8227 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8228 "this shouldn't happen!";
8229 error( RtAudioError::WARNING );
8230 return;
8231 }
8232
8233 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8234 double streamTime = getStreamTime();
8235 RtAudioStreamStatus status = 0;
8236 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8237 stream_.bufferSize, streamTime, status,
8238 stream_.callbackInfo.userData );
8239
8240 if ( doStopStream == 2 ) {
8241 abortStream();
8242 return;
8243 }
8244
8245 MUTEX_LOCK( &stream_.mutex );
8246 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8247 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8248
8249 if ( stream_.state != STREAM_RUNNING )
8250 goto unlock;
8251
8252 int pa_error;
8253 size_t bytes;
8254 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8255 if ( stream_.doConvertBuffer[OUTPUT] ) {
8256 convertBuffer( stream_.deviceBuffer,
8257 stream_.userBuffer[OUTPUT],
8258 stream_.convertInfo[OUTPUT] );
8259 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8260 formatBytes( stream_.deviceFormat[OUTPUT] );
8261 } else
8262 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8263 formatBytes( stream_.userFormat );
8264
8265 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8266 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8267 pa_strerror( pa_error ) << ".";
8268 errorText_ = errorStream_.str();
8269 error( RtAudioError::WARNING );
8270 }
8271 }
8272
8273 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8274 if ( stream_.doConvertBuffer[INPUT] )
8275 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8276 formatBytes( stream_.deviceFormat[INPUT] );
8277 else
8278 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8279 formatBytes( stream_.userFormat );
8280
8281 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8282 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8283 pa_strerror( pa_error ) << ".";
8284 errorText_ = errorStream_.str();
8285 error( RtAudioError::WARNING );
8286 }
8287 if ( stream_.doConvertBuffer[INPUT] ) {
8288 convertBuffer( stream_.userBuffer[INPUT],
8289 stream_.deviceBuffer,
8290 stream_.convertInfo[INPUT] );
8291 }
8292 }
8293
8294 unlock:
8295 MUTEX_UNLOCK( &stream_.mutex );
8296 RtApi::tickStreamTime();
8297
8298 if ( doStopStream == 1 )
8299 stopStream();
8300 }
8301
startStream(void)8302 void RtApiPulse::startStream( void )
8303 {
8304 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8305
8306 if ( stream_.state == STREAM_CLOSED ) {
8307 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8308 error( RtAudioError::INVALID_USE );
8309 return;
8310 }
8311 if ( stream_.state == STREAM_RUNNING ) {
8312 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8313 error( RtAudioError::WARNING );
8314 return;
8315 }
8316
8317 MUTEX_LOCK( &stream_.mutex );
8318
8319 stream_.state = STREAM_RUNNING;
8320
8321 pah->runnable = true;
8322 pthread_cond_signal( &pah->runnable_cv );
8323 MUTEX_UNLOCK( &stream_.mutex );
8324 }
8325
stopStream(void)8326 void RtApiPulse::stopStream( void )
8327 {
8328 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8329
8330 if ( stream_.state == STREAM_CLOSED ) {
8331 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8332 error( RtAudioError::INVALID_USE );
8333 return;
8334 }
8335 if ( stream_.state == STREAM_STOPPED ) {
8336 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8337 error( RtAudioError::WARNING );
8338 return;
8339 }
8340
8341 stream_.state = STREAM_STOPPED;
8342 MUTEX_LOCK( &stream_.mutex );
8343
8344 if ( pah && pah->s_play ) {
8345 int pa_error;
8346 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8347 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8348 pa_strerror( pa_error ) << ".";
8349 errorText_ = errorStream_.str();
8350 MUTEX_UNLOCK( &stream_.mutex );
8351 error( RtAudioError::SYSTEM_ERROR );
8352 return;
8353 }
8354 }
8355
8356 stream_.state = STREAM_STOPPED;
8357 MUTEX_UNLOCK( &stream_.mutex );
8358 }
8359
abortStream(void)8360 void RtApiPulse::abortStream( void )
8361 {
8362 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8363
8364 if ( stream_.state == STREAM_CLOSED ) {
8365 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8366 error( RtAudioError::INVALID_USE );
8367 return;
8368 }
8369 if ( stream_.state == STREAM_STOPPED ) {
8370 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8371 error( RtAudioError::WARNING );
8372 return;
8373 }
8374
8375 stream_.state = STREAM_STOPPED;
8376 MUTEX_LOCK( &stream_.mutex );
8377
8378 if ( pah && pah->s_play ) {
8379 int pa_error;
8380 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8381 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8382 pa_strerror( pa_error ) << ".";
8383 errorText_ = errorStream_.str();
8384 MUTEX_UNLOCK( &stream_.mutex );
8385 error( RtAudioError::SYSTEM_ERROR );
8386 return;
8387 }
8388 }
8389
8390 stream_.state = STREAM_STOPPED;
8391 MUTEX_UNLOCK( &stream_.mutex );
8392 }
8393
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8394 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8395 unsigned int channels, unsigned int firstChannel,
8396 unsigned int sampleRate, RtAudioFormat format,
8397 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8398 {
8399 PulseAudioHandle *pah = 0;
8400 unsigned long bufferBytes = 0;
8401 pa_sample_spec ss;
8402
8403 if ( device != 0 ) return false;
8404 if ( mode != INPUT && mode != OUTPUT ) return false;
8405 if ( channels != 1 && channels != 2 ) {
8406 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8407 return false;
8408 }
8409 ss.channels = channels;
8410
8411 if ( firstChannel != 0 ) return false;
8412
8413 bool sr_found = false;
8414 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8415 if ( sampleRate == *sr ) {
8416 sr_found = true;
8417 stream_.sampleRate = sampleRate;
8418 ss.rate = sampleRate;
8419 break;
8420 }
8421 }
8422 if ( !sr_found ) {
8423 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8424 return false;
8425 }
8426
8427 bool sf_found = 0;
8428 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8429 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8430 if ( format == sf->rtaudio_format ) {
8431 sf_found = true;
8432 stream_.userFormat = sf->rtaudio_format;
8433 stream_.deviceFormat[mode] = stream_.userFormat;
8434 ss.format = sf->pa_format;
8435 break;
8436 }
8437 }
8438 if ( !sf_found ) { // Use internal data format conversion.
8439 stream_.userFormat = format;
8440 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8441 ss.format = PA_SAMPLE_FLOAT32LE;
8442 }
8443
8444 // Set other stream parameters.
8445 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8446 else stream_.userInterleaved = true;
8447 stream_.deviceInterleaved[mode] = true;
8448 stream_.nBuffers = 1;
8449 stream_.doByteSwap[mode] = false;
8450 stream_.nUserChannels[mode] = channels;
8451 stream_.nDeviceChannels[mode] = channels + firstChannel;
8452 stream_.channelOffset[mode] = 0;
8453 std::string streamName = "RtAudio";
8454
8455 // Set flags for buffer conversion.
8456 stream_.doConvertBuffer[mode] = false;
8457 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8458 stream_.doConvertBuffer[mode] = true;
8459 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8460 stream_.doConvertBuffer[mode] = true;
8461
8462 // Allocate necessary internal buffers.
8463 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8464 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8465 if ( stream_.userBuffer[mode] == NULL ) {
8466 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8467 goto error;
8468 }
8469 stream_.bufferSize = *bufferSize;
8470
8471 if ( stream_.doConvertBuffer[mode] ) {
8472
8473 bool makeBuffer = true;
8474 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8475 if ( mode == INPUT ) {
8476 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8477 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8478 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8479 }
8480 }
8481
8482 if ( makeBuffer ) {
8483 bufferBytes *= *bufferSize;
8484 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8485 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8486 if ( stream_.deviceBuffer == NULL ) {
8487 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8488 goto error;
8489 }
8490 }
8491 }
8492
8493 stream_.device[mode] = device;
8494
8495 // Setup the buffer conversion information structure.
8496 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8497
8498 if ( !stream_.apiHandle ) {
8499 PulseAudioHandle *pah = new PulseAudioHandle;
8500 if ( !pah ) {
8501 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8502 goto error;
8503 }
8504
8505 stream_.apiHandle = pah;
8506 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8507 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8508 goto error;
8509 }
8510 }
8511 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8512
8513 int error;
8514 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8515 switch ( mode ) {
8516 case INPUT:
8517 pa_buffer_attr buffer_attr;
8518 buffer_attr.fragsize = bufferBytes;
8519 buffer_attr.maxlength = -1;
8520
8521 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8522 if ( !pah->s_rec ) {
8523 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8524 goto error;
8525 }
8526 break;
8527 case OUTPUT:
8528 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8529 if ( !pah->s_play ) {
8530 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8531 goto error;
8532 }
8533 break;
8534 default:
8535 goto error;
8536 }
8537
8538 if ( stream_.mode == UNINITIALIZED )
8539 stream_.mode = mode;
8540 else if ( stream_.mode == mode )
8541 goto error;
8542 else
8543 stream_.mode = DUPLEX;
8544
8545 if ( !stream_.callbackInfo.isRunning ) {
8546 stream_.callbackInfo.object = this;
8547 stream_.callbackInfo.isRunning = true;
8548 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8549 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8550 goto error;
8551 }
8552 }
8553
8554 stream_.state = STREAM_STOPPED;
8555 return true;
8556
8557 error:
8558 if ( pah && stream_.callbackInfo.isRunning ) {
8559 pthread_cond_destroy( &pah->runnable_cv );
8560 delete pah;
8561 stream_.apiHandle = 0;
8562 }
8563
8564 for ( int i=0; i<2; i++ ) {
8565 if ( stream_.userBuffer[i] ) {
8566 free( stream_.userBuffer[i] );
8567 stream_.userBuffer[i] = 0;
8568 }
8569 }
8570
8571 if ( stream_.deviceBuffer ) {
8572 free( stream_.deviceBuffer );
8573 stream_.deviceBuffer = 0;
8574 }
8575
8576 return FAILURE;
8577 }
8578
8579 //******************** End of __LINUX_PULSE__ *********************//
8580 #endif
8581
8582 #if defined(__LINUX_OSS__)
8583
8584 #include <unistd.h>
8585 #include <sys/ioctl.h>
8586 #include <unistd.h>
8587 #include <fcntl.h>
8588 #include <sys/soundcard.h>
8589 #include <errno.h>
8590 #include <math.h>
8591
8592 #if defined(__FreeBSD__)
8593 #define SND_DEVICE "/dev/dsp"
8594 #else
8595 #define SND_DEVICE "/dev/mixer"
8596 #endif
8597
8598 static void *ossCallbackHandler(void * ptr);
8599
8600 // A structure to hold various information related to the OSS API
8601 // implementation.
8602 struct OssHandle {
8603 int id[2]; // device ids
8604 bool xrun[2];
8605 bool triggered;
8606 pthread_cond_t runnable;
8607
OssHandleOssHandle8608 OssHandle()
8609 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8610 };
8611
RtApiOss()8612 RtApiOss :: RtApiOss()
8613 {
8614 // Nothing to do here.
8615 }
8616
~RtApiOss()8617 RtApiOss :: ~RtApiOss()
8618 {
8619 if ( stream_.state != STREAM_CLOSED ) closeStream();
8620 }
8621
getDeviceCount(void)8622 unsigned int RtApiOss :: getDeviceCount( void )
8623 {
8624 int mixerfd = open( SND_DEVICE, O_RDWR, 0 );
8625 if ( mixerfd == -1 ) {
8626 errorText_ = "RtApiOss::getDeviceCount: error opening '";
8627 errorText_ += SND_DEVICE;
8628 errorText_ += "'.";
8629 error( RtAudioError::WARNING );
8630 return 0;
8631 }
8632
8633 oss_sysinfo sysinfo;
8634 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8635 close( mixerfd );
8636 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8637 error( RtAudioError::WARNING );
8638 return 0;
8639 }
8640
8641 close( mixerfd );
8642 return sysinfo.numaudios;
8643 }
8644
getDeviceInfo(unsigned int device)8645 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8646 {
8647 RtAudio::DeviceInfo info;
8648 info.probed = false;
8649
8650 int mixerfd = open( SND_DEVICE, O_RDWR, 0 );
8651 if ( mixerfd == -1 ) {
8652 errorText_ = "RtApiOss::getDeviceInfo: error opening '";
8653 errorText_ += SND_DEVICE;
8654 errorText_ += "'.";
8655 error( RtAudioError::WARNING );
8656 return info;
8657 }
8658
8659 oss_sysinfo sysinfo;
8660 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8661 if ( result == -1 ) {
8662 close( mixerfd );
8663 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8664 error( RtAudioError::WARNING );
8665 return info;
8666 }
8667
8668 unsigned nDevices = sysinfo.numaudios;
8669 if ( nDevices == 0 ) {
8670 close( mixerfd );
8671 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8672 error( RtAudioError::INVALID_USE );
8673 return info;
8674 }
8675
8676 if ( device >= nDevices ) {
8677 close( mixerfd );
8678 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8679 error( RtAudioError::INVALID_USE );
8680 return info;
8681 }
8682
8683 oss_audioinfo ainfo;
8684 ainfo.dev = device;
8685 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8686 close( mixerfd );
8687 if ( result == -1 ) {
8688 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8689 errorText_ = errorStream_.str();
8690 error( RtAudioError::WARNING );
8691 return info;
8692 }
8693
8694 // Probe channels
8695 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8696 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8697 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8698 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8699 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8700 }
8701
8702 // Probe data formats ... do for input
8703 unsigned long mask = ainfo.iformats;
8704 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8705 info.nativeFormats |= RTAUDIO_SINT16;
8706 if ( mask & AFMT_S8 )
8707 info.nativeFormats |= RTAUDIO_SINT8;
8708 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8709 info.nativeFormats |= RTAUDIO_SINT32;
8710 #if defined(AFMT_FLOAT)
8711 if ( mask & AFMT_FLOAT )
8712 info.nativeFormats |= RTAUDIO_FLOAT32;
8713 #endif
8714 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8715 info.nativeFormats |= RTAUDIO_SINT24;
8716
8717 // Check that we have at least one supported format
8718 if ( info.nativeFormats == 0 ) {
8719 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8720 errorText_ = errorStream_.str();
8721 error( RtAudioError::WARNING );
8722 return info;
8723 }
8724
8725 // Probe the supported sample rates.
8726 info.sampleRates.clear();
8727 if ( ainfo.nrates ) {
8728 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8729 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8730 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8731 info.sampleRates.push_back( SAMPLE_RATES[k] );
8732
8733 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8734 info.preferredSampleRate = SAMPLE_RATES[k];
8735
8736 break;
8737 }
8738 }
8739 }
8740 }
8741 else {
8742 // Check min and max rate values;
8743 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8744 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8745 info.sampleRates.push_back( SAMPLE_RATES[k] );
8746
8747 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8748 info.preferredSampleRate = SAMPLE_RATES[k];
8749 }
8750 }
8751 }
8752
8753 if ( info.sampleRates.size() == 0 ) {
8754 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8755 errorText_ = errorStream_.str();
8756 error( RtAudioError::WARNING );
8757 }
8758 else {
8759 info.probed = true;
8760 info.name = ainfo.name;
8761 }
8762
8763 return info;
8764 }
8765
8766
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8767 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8768 unsigned int firstChannel, unsigned int sampleRate,
8769 RtAudioFormat format, unsigned int *bufferSize,
8770 RtAudio::StreamOptions *options )
8771 {
8772 int mixerfd = open( SND_DEVICE, O_RDWR, 0 );
8773 if ( mixerfd == -1 ) {
8774 errorText_ = "RtApiOss::probeDeviceOpen: error opening '";
8775 errorText_ += SND_DEVICE;
8776 errorText_ += "'.";
8777 return FAILURE;
8778 }
8779
8780 oss_sysinfo sysinfo;
8781 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8782 if ( result == -1 ) {
8783 close( mixerfd );
8784 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8785 return FAILURE;
8786 }
8787
8788 unsigned nDevices = sysinfo.numaudios;
8789 if ( nDevices == 0 ) {
8790 // This should not happen because a check is made before this function is called.
8791 close( mixerfd );
8792 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8793 return FAILURE;
8794 }
8795
8796 if ( device >= nDevices ) {
8797 // This should not happen because a check is made before this function is called.
8798 close( mixerfd );
8799 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8800 return FAILURE;
8801 }
8802
8803 oss_audioinfo ainfo;
8804 #if defined(__FreeBSD__)
8805 ainfo.dev = -1; // specify -1 to get default device
8806 #else
8807 ainfo.dev = device;
8808 #endif
8809 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8810 close( mixerfd );
8811 if ( result == -1 ) {
8812 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8813 errorText_ = errorStream_.str();
8814 return FAILURE;
8815 }
8816
8817 // Check if device supports input or output
8818 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8819 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8820 if ( mode == OUTPUT )
8821 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8822 else
8823 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8824 errorText_ = errorStream_.str();
8825 return FAILURE;
8826 }
8827
8828 int flags = 0;
8829 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8830 if ( mode == OUTPUT )
8831 flags |= O_WRONLY;
8832 else { // mode == INPUT
8833 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8834 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8835 close( handle->id[0] );
8836 handle->id[0] = 0;
8837 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8838 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8839 errorText_ = errorStream_.str();
8840 return FAILURE;
8841 }
8842 // Check that the number previously set channels is the same.
8843 if ( stream_.nUserChannels[0] != channels ) {
8844 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8845 errorText_ = errorStream_.str();
8846 return FAILURE;
8847 }
8848 flags |= O_RDWR;
8849 }
8850 else
8851 flags |= O_RDONLY;
8852 }
8853
8854 // Set exclusive access if specified.
8855 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8856
8857 // Try to open the device.
8858 int fd;
8859 fd = open( ainfo.devnode, flags, 0 );
8860 if ( fd == -1 ) {
8861 if ( errno == EBUSY )
8862 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8863 else
8864 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8865 errorText_ = errorStream_.str();
8866 return FAILURE;
8867 }
8868
8869 // For duplex operation, specifically set this mode (this doesn't seem to work).
8870 /*
8871 if ( flags | O_RDWR ) {
8872 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8873 if ( result == -1) {
8874 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8875 errorText_ = errorStream_.str();
8876 return FAILURE;
8877 }
8878 }
8879 */
8880
8881 // Check the device channel support.
8882 stream_.nUserChannels[mode] = channels;
8883 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8884 close( fd );
8885 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8886 errorText_ = errorStream_.str();
8887 return FAILURE;
8888 }
8889
8890 // Set the number of channels.
8891 int deviceChannels = channels + firstChannel;
8892 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8893 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8894 close( fd );
8895 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8896 errorText_ = errorStream_.str();
8897 return FAILURE;
8898 }
8899 stream_.nDeviceChannels[mode] = deviceChannels;
8900
8901 // Get the data format mask
8902 int mask;
8903 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8904 if ( result == -1 ) {
8905 close( fd );
8906 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8907 errorText_ = errorStream_.str();
8908 return FAILURE;
8909 }
8910
8911 // Determine how to set the device format.
8912 stream_.userFormat = format;
8913 int deviceFormat = -1;
8914 stream_.doByteSwap[mode] = false;
8915 if ( format == RTAUDIO_SINT8 ) {
8916 if ( mask & AFMT_S8 ) {
8917 deviceFormat = AFMT_S8;
8918 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8919 }
8920 }
8921 else if ( format == RTAUDIO_SINT16 ) {
8922 if ( mask & AFMT_S16_NE ) {
8923 deviceFormat = AFMT_S16_NE;
8924 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8925 }
8926 else if ( mask & AFMT_S16_OE ) {
8927 deviceFormat = AFMT_S16_OE;
8928 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8929 stream_.doByteSwap[mode] = true;
8930 }
8931 }
8932 else if ( format == RTAUDIO_SINT24 ) {
8933 if ( mask & AFMT_S24_NE ) {
8934 deviceFormat = AFMT_S24_NE;
8935 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8936 }
8937 else if ( mask & AFMT_S24_OE ) {
8938 deviceFormat = AFMT_S24_OE;
8939 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8940 stream_.doByteSwap[mode] = true;
8941 }
8942 }
8943 else if ( format == RTAUDIO_SINT32 ) {
8944 if ( mask & AFMT_S32_NE ) {
8945 deviceFormat = AFMT_S32_NE;
8946 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8947 }
8948 else if ( mask & AFMT_S32_OE ) {
8949 deviceFormat = AFMT_S32_OE;
8950 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8951 stream_.doByteSwap[mode] = true;
8952 }
8953 }
8954
8955 if ( deviceFormat == -1 ) {
8956 // The user requested format is not natively supported by the device.
8957 if ( mask & AFMT_S16_NE ) {
8958 deviceFormat = AFMT_S16_NE;
8959 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8960 }
8961 else if ( mask & AFMT_S32_NE ) {
8962 deviceFormat = AFMT_S32_NE;
8963 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8964 }
8965 else if ( mask & AFMT_S24_NE ) {
8966 deviceFormat = AFMT_S24_NE;
8967 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8968 }
8969 else if ( mask & AFMT_S16_OE ) {
8970 deviceFormat = AFMT_S16_OE;
8971 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8972 stream_.doByteSwap[mode] = true;
8973 }
8974 else if ( mask & AFMT_S32_OE ) {
8975 deviceFormat = AFMT_S32_OE;
8976 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8977 stream_.doByteSwap[mode] = true;
8978 }
8979 else if ( mask & AFMT_S24_OE ) {
8980 deviceFormat = AFMT_S24_OE;
8981 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8982 stream_.doByteSwap[mode] = true;
8983 }
8984 else if ( mask & AFMT_S8) {
8985 deviceFormat = AFMT_S8;
8986 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8987 }
8988 }
8989
8990 if ( stream_.deviceFormat[mode] == 0 ) {
8991 // This really shouldn't happen ...
8992 close( fd );
8993 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8994 errorText_ = errorStream_.str();
8995 return FAILURE;
8996 }
8997
8998 // Set the data format.
8999 int temp = deviceFormat;
9000 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9001 if ( result == -1 || deviceFormat != temp ) {
9002 close( fd );
9003 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9004 errorText_ = errorStream_.str();
9005 return FAILURE;
9006 }
9007
9008 // Attempt to set the buffer size. According to OSS, the minimum
9009 // number of buffers is two. The supposed minimum buffer size is 16
9010 // bytes, so that will be our lower bound. The argument to this
9011 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9012 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9013 // We'll check the actual value used near the end of the setup
9014 // procedure.
9015 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9016 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9017 int buffers = 0;
9018 if ( options ) buffers = options->numberOfBuffers;
9019 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9020 if ( buffers < 2 ) buffers = 3;
9021 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9022 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9023 if ( result == -1 ) {
9024 close( fd );
9025 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9026 errorText_ = errorStream_.str();
9027 return FAILURE;
9028 }
9029 stream_.nBuffers = buffers;
9030
9031 // Save buffer size (in sample frames).
9032 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9033 stream_.bufferSize = *bufferSize;
9034
9035 // Set the sample rate.
9036 int srate = sampleRate;
9037 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9038 if ( result == -1 ) {
9039 close( fd );
9040 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9041 errorText_ = errorStream_.str();
9042 return FAILURE;
9043 }
9044
9045 // Verify the sample rate setup worked.
9046 if ( abs( srate - (int)sampleRate ) > 100 ) {
9047 close( fd );
9048 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9049 errorText_ = errorStream_.str();
9050 return FAILURE;
9051 }
9052 stream_.sampleRate = sampleRate;
9053
9054 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9055 // We're doing duplex setup here.
9056 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9057 stream_.nDeviceChannels[0] = deviceChannels;
9058 }
9059
9060 // Set interleaving parameters.
9061 stream_.userInterleaved = true;
9062 stream_.deviceInterleaved[mode] = true;
9063 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9064 stream_.userInterleaved = false;
9065
9066 // Set flags for buffer conversion
9067 stream_.doConvertBuffer[mode] = false;
9068 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9069 stream_.doConvertBuffer[mode] = true;
9070 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9071 stream_.doConvertBuffer[mode] = true;
9072 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9073 stream_.nUserChannels[mode] > 1 )
9074 stream_.doConvertBuffer[mode] = true;
9075
9076 // Allocate the stream handles if necessary and then save.
9077 if ( stream_.apiHandle == 0 ) {
9078 try {
9079 handle = new OssHandle;
9080 }
9081 catch ( std::bad_alloc& ) {
9082 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9083 goto error;
9084 }
9085
9086 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9087 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9088 goto error;
9089 }
9090
9091 stream_.apiHandle = (void *) handle;
9092 }
9093 else {
9094 handle = (OssHandle *) stream_.apiHandle;
9095 }
9096 handle->id[mode] = fd;
9097
9098 // Allocate necessary internal buffers.
9099 unsigned long bufferBytes;
9100 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9101 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9102 if ( stream_.userBuffer[mode] == NULL ) {
9103 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9104 goto error;
9105 }
9106
9107 if ( stream_.doConvertBuffer[mode] ) {
9108
9109 bool makeBuffer = true;
9110 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9111 if ( mode == INPUT ) {
9112 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9113 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9114 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9115 }
9116 }
9117
9118 if ( makeBuffer ) {
9119 bufferBytes *= *bufferSize;
9120 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9121 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9122 if ( stream_.deviceBuffer == NULL ) {
9123 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9124 goto error;
9125 }
9126 }
9127 }
9128
9129 stream_.device[mode] = device;
9130 stream_.state = STREAM_STOPPED;
9131
9132 // Setup the buffer conversion information structure.
9133 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9134
9135 // Setup thread if necessary.
9136 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9137 // We had already set up an output stream.
9138 stream_.mode = DUPLEX;
9139 if ( stream_.device[0] == device ) handle->id[0] = fd;
9140 }
9141 else {
9142 stream_.mode = mode;
9143
9144 // Setup callback thread.
9145 stream_.callbackInfo.object = (void *) this;
9146
9147 // Set the thread attributes for joinable and realtime scheduling
9148 // priority. The higher priority will only take affect if the
9149 // program is run as root or suid.
9150 pthread_attr_t attr;
9151 pthread_attr_init( &attr );
9152 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9153 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9154 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9155 struct sched_param param;
9156 int priority = options->priority;
9157 int min = sched_get_priority_min( SCHED_RR );
9158 int max = sched_get_priority_max( SCHED_RR );
9159 if ( priority < min ) priority = min;
9160 else if ( priority > max ) priority = max;
9161 param.sched_priority = priority;
9162 pthread_attr_setschedparam( &attr, ¶m );
9163 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9164 }
9165 else
9166 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9167 #else
9168 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9169 #endif
9170
9171 stream_.callbackInfo.isRunning = true;
9172 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9173 pthread_attr_destroy( &attr );
9174 if ( result ) {
9175 stream_.callbackInfo.isRunning = false;
9176 errorText_ = "RtApiOss::error creating callback thread!";
9177 goto error;
9178 }
9179 }
9180
9181 return SUCCESS;
9182
9183 error:
9184 if ( handle ) {
9185 pthread_cond_destroy( &handle->runnable );
9186 if ( handle->id[0] ) close( handle->id[0] );
9187 if ( handle->id[1] ) close( handle->id[1] );
9188 delete handle;
9189 stream_.apiHandle = 0;
9190 }
9191
9192 for ( int i=0; i<2; i++ ) {
9193 if ( stream_.userBuffer[i] ) {
9194 free( stream_.userBuffer[i] );
9195 stream_.userBuffer[i] = 0;
9196 }
9197 }
9198
9199 if ( stream_.deviceBuffer ) {
9200 free( stream_.deviceBuffer );
9201 stream_.deviceBuffer = 0;
9202 }
9203
9204 return FAILURE;
9205 }
9206
closeStream()9207 void RtApiOss :: closeStream()
9208 {
9209 if ( stream_.state == STREAM_CLOSED ) {
9210 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9211 error( RtAudioError::WARNING );
9212 return;
9213 }
9214
9215 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9216 stream_.callbackInfo.isRunning = false;
9217 MUTEX_LOCK( &stream_.mutex );
9218 if ( stream_.state == STREAM_STOPPED )
9219 pthread_cond_signal( &handle->runnable );
9220 MUTEX_UNLOCK( &stream_.mutex );
9221 pthread_join( stream_.callbackInfo.thread, NULL );
9222
9223 if ( stream_.state == STREAM_RUNNING ) {
9224 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9225 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9226 else
9227 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9228 stream_.state = STREAM_STOPPED;
9229 }
9230
9231 if ( handle ) {
9232 pthread_cond_destroy( &handle->runnable );
9233 if ( handle->id[0] ) close( handle->id[0] );
9234 if ( handle->id[1] ) close( handle->id[1] );
9235 delete handle;
9236 stream_.apiHandle = 0;
9237 }
9238
9239 for ( int i=0; i<2; i++ ) {
9240 if ( stream_.userBuffer[i] ) {
9241 free( stream_.userBuffer[i] );
9242 stream_.userBuffer[i] = 0;
9243 }
9244 }
9245
9246 if ( stream_.deviceBuffer ) {
9247 free( stream_.deviceBuffer );
9248 stream_.deviceBuffer = 0;
9249 }
9250
9251 stream_.mode = UNINITIALIZED;
9252 stream_.state = STREAM_CLOSED;
9253 }
9254
startStream()9255 void RtApiOss :: startStream()
9256 {
9257 verifyStream();
9258 if ( stream_.state == STREAM_RUNNING ) {
9259 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9260 error( RtAudioError::WARNING );
9261 return;
9262 }
9263
9264 MUTEX_LOCK( &stream_.mutex );
9265
9266 stream_.state = STREAM_RUNNING;
9267
9268 // No need to do anything else here ... OSS automatically starts
9269 // when fed samples.
9270
9271 MUTEX_UNLOCK( &stream_.mutex );
9272
9273 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9274 pthread_cond_signal( &handle->runnable );
9275 }
9276
stopStream()9277 void RtApiOss :: stopStream()
9278 {
9279 verifyStream();
9280 if ( stream_.state == STREAM_STOPPED ) {
9281 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9282 error( RtAudioError::WARNING );
9283 return;
9284 }
9285
9286 MUTEX_LOCK( &stream_.mutex );
9287
9288 // The state might change while waiting on a mutex.
9289 if ( stream_.state == STREAM_STOPPED ) {
9290 MUTEX_UNLOCK( &stream_.mutex );
9291 return;
9292 }
9293
9294 int result = 0;
9295 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9296 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9297
9298 // Flush the output with zeros a few times.
9299 char *buffer;
9300 int samples;
9301 RtAudioFormat format;
9302
9303 if ( stream_.doConvertBuffer[0] ) {
9304 buffer = stream_.deviceBuffer;
9305 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9306 format = stream_.deviceFormat[0];
9307 }
9308 else {
9309 buffer = stream_.userBuffer[0];
9310 samples = stream_.bufferSize * stream_.nUserChannels[0];
9311 format = stream_.userFormat;
9312 }
9313
9314 memset( buffer, 0, samples * formatBytes(format) );
9315 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9316 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9317 if ( result == -1 ) {
9318 errorText_ = "RtApiOss::stopStream: audio write error.";
9319 error( RtAudioError::WARNING );
9320 }
9321 }
9322
9323 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9324 if ( result == -1 ) {
9325 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9326 errorText_ = errorStream_.str();
9327 goto unlock;
9328 }
9329 handle->triggered = false;
9330 }
9331
9332 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9333 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9334 if ( result == -1 ) {
9335 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9336 errorText_ = errorStream_.str();
9337 goto unlock;
9338 }
9339 }
9340
9341 unlock:
9342 stream_.state = STREAM_STOPPED;
9343 MUTEX_UNLOCK( &stream_.mutex );
9344
9345 if ( result != -1 ) return;
9346 error( RtAudioError::SYSTEM_ERROR );
9347 }
9348
abortStream()9349 void RtApiOss :: abortStream()
9350 {
9351 verifyStream();
9352 if ( stream_.state == STREAM_STOPPED ) {
9353 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9354 error( RtAudioError::WARNING );
9355 return;
9356 }
9357
9358 MUTEX_LOCK( &stream_.mutex );
9359
9360 // The state might change while waiting on a mutex.
9361 if ( stream_.state == STREAM_STOPPED ) {
9362 MUTEX_UNLOCK( &stream_.mutex );
9363 return;
9364 }
9365
9366 int result = 0;
9367 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9368 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9369 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9370 if ( result == -1 ) {
9371 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9372 errorText_ = errorStream_.str();
9373 goto unlock;
9374 }
9375 handle->triggered = false;
9376 }
9377
9378 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9379 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9380 if ( result == -1 ) {
9381 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9382 errorText_ = errorStream_.str();
9383 goto unlock;
9384 }
9385 }
9386
9387 unlock:
9388 stream_.state = STREAM_STOPPED;
9389 MUTEX_UNLOCK( &stream_.mutex );
9390
9391 if ( result != -1 ) return;
9392 error( RtAudioError::SYSTEM_ERROR );
9393 }
9394
callbackEvent()9395 void RtApiOss :: callbackEvent()
9396 {
9397 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9398 if ( stream_.state == STREAM_STOPPED ) {
9399 MUTEX_LOCK( &stream_.mutex );
9400 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9401 if ( stream_.state != STREAM_RUNNING ) {
9402 MUTEX_UNLOCK( &stream_.mutex );
9403 return;
9404 }
9405 MUTEX_UNLOCK( &stream_.mutex );
9406 }
9407
9408 if ( stream_.state == STREAM_CLOSED ) {
9409 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9410 error( RtAudioError::WARNING );
9411 return;
9412 }
9413
9414 // Invoke user callback to get fresh output data.
9415 int doStopStream = 0;
9416 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9417 double streamTime = getStreamTime();
9418 RtAudioStreamStatus status = 0;
9419 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9420 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9421 handle->xrun[0] = false;
9422 }
9423 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9424 status |= RTAUDIO_INPUT_OVERFLOW;
9425 handle->xrun[1] = false;
9426 }
9427 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9428 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9429 if ( doStopStream == 2 ) {
9430 this->abortStream();
9431 return;
9432 }
9433
9434 MUTEX_LOCK( &stream_.mutex );
9435
9436 // The state might change while waiting on a mutex.
9437 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9438
9439 int result;
9440 char *buffer;
9441 int samples;
9442 RtAudioFormat format;
9443
9444 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9445
9446 // Setup parameters and do buffer conversion if necessary.
9447 if ( stream_.doConvertBuffer[0] ) {
9448 buffer = stream_.deviceBuffer;
9449 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9450 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9451 format = stream_.deviceFormat[0];
9452 }
9453 else {
9454 buffer = stream_.userBuffer[0];
9455 samples = stream_.bufferSize * stream_.nUserChannels[0];
9456 format = stream_.userFormat;
9457 }
9458
9459 // Do byte swapping if necessary.
9460 if ( stream_.doByteSwap[0] )
9461 byteSwapBuffer( buffer, samples, format );
9462
9463 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9464 int trig = 0;
9465 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9466 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9467 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9468 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9469 handle->triggered = true;
9470 }
9471 else
9472 // Write samples to device.
9473 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9474
9475 if ( result == -1 ) {
9476 // We'll assume this is an underrun, though there isn't a
9477 // specific means for determining that.
9478 handle->xrun[0] = true;
9479 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9480 error( RtAudioError::WARNING );
9481 // Continue on to input section.
9482 }
9483 }
9484
9485 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9486
9487 // Setup parameters.
9488 if ( stream_.doConvertBuffer[1] ) {
9489 buffer = stream_.deviceBuffer;
9490 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9491 format = stream_.deviceFormat[1];
9492 }
9493 else {
9494 buffer = stream_.userBuffer[1];
9495 samples = stream_.bufferSize * stream_.nUserChannels[1];
9496 format = stream_.userFormat;
9497 }
9498
9499 // Read samples from device.
9500 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9501
9502 if ( result == -1 ) {
9503 // We'll assume this is an overrun, though there isn't a
9504 // specific means for determining that.
9505 handle->xrun[1] = true;
9506 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9507 error( RtAudioError::WARNING );
9508 goto unlock;
9509 }
9510
9511 // Do byte swapping if necessary.
9512 if ( stream_.doByteSwap[1] )
9513 byteSwapBuffer( buffer, samples, format );
9514
9515 // Do buffer conversion if necessary.
9516 if ( stream_.doConvertBuffer[1] )
9517 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9518 }
9519
9520 unlock:
9521 MUTEX_UNLOCK( &stream_.mutex );
9522
9523 RtApi::tickStreamTime();
9524 if ( doStopStream == 1 ) this->stopStream();
9525 }
9526
ossCallbackHandler(void * ptr)9527 static void *ossCallbackHandler( void *ptr )
9528 {
9529 CallbackInfo *info = (CallbackInfo *) ptr;
9530 RtApiOss *object = (RtApiOss *) info->object;
9531 bool *isRunning = &info->isRunning;
9532
9533 while ( *isRunning == true ) {
9534 pthread_testcancel();
9535 object->callbackEvent();
9536 }
9537
9538 pthread_exit( NULL );
9539 }
9540
9541 //******************** End of __LINUX_OSS__ *********************//
9542 #endif
9543
9544
9545 // *************************************************** //
9546 //
9547 // Protected common (OS-independent) RtAudio methods.
9548 //
9549 // *************************************************** //
9550
9551 // This method can be modified to control the behavior of error
9552 // message printing.
error(RtAudioError::Type type)9553 void RtApi :: error( RtAudioError::Type type )
9554 {
9555 errorStream_.str(""); // clear the ostringstream
9556
9557 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9558 if ( errorCallback ) {
9559 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9560
9561 if ( firstErrorOccurred_ )
9562 return;
9563
9564 firstErrorOccurred_ = true;
9565 const std::string errorMessage = errorText_;
9566
9567 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9568 stream_.callbackInfo.isRunning = false; // exit from the thread
9569 abortStream();
9570 }
9571
9572 errorCallback( type, errorMessage );
9573 firstErrorOccurred_ = false;
9574 return;
9575 }
9576
9577 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9578 std::cerr << '\n' << errorText_ << "\n\n";
9579 else if ( type != RtAudioError::WARNING )
9580 throw( RtAudioError( errorText_, type ) );
9581 }
9582
verifyStream()9583 void RtApi :: verifyStream()
9584 {
9585 if ( stream_.state == STREAM_CLOSED ) {
9586 errorText_ = "RtApi:: a stream is not open!";
9587 error( RtAudioError::INVALID_USE );
9588 }
9589 }
9590
clearStreamInfo()9591 void RtApi :: clearStreamInfo()
9592 {
9593 stream_.mode = UNINITIALIZED;
9594 stream_.state = STREAM_CLOSED;
9595 stream_.sampleRate = 0;
9596 stream_.bufferSize = 0;
9597 stream_.nBuffers = 0;
9598 stream_.userFormat = 0;
9599 stream_.userInterleaved = true;
9600 stream_.streamTime = 0.0;
9601 stream_.apiHandle = 0;
9602 stream_.deviceBuffer = 0;
9603 stream_.callbackInfo.callback = 0;
9604 stream_.callbackInfo.userData = 0;
9605 stream_.callbackInfo.isRunning = false;
9606 stream_.callbackInfo.errorCallback = 0;
9607 for ( int i=0; i<2; i++ ) {
9608 stream_.device[i] = 11111;
9609 stream_.doConvertBuffer[i] = false;
9610 stream_.deviceInterleaved[i] = true;
9611 stream_.doByteSwap[i] = false;
9612 stream_.nUserChannels[i] = 0;
9613 stream_.nDeviceChannels[i] = 0;
9614 stream_.channelOffset[i] = 0;
9615 stream_.deviceFormat[i] = 0;
9616 stream_.latency[i] = 0;
9617 stream_.userBuffer[i] = 0;
9618 stream_.convertInfo[i].channels = 0;
9619 stream_.convertInfo[i].inJump = 0;
9620 stream_.convertInfo[i].outJump = 0;
9621 stream_.convertInfo[i].inFormat = 0;
9622 stream_.convertInfo[i].outFormat = 0;
9623 stream_.convertInfo[i].inOffset.clear();
9624 stream_.convertInfo[i].outOffset.clear();
9625 }
9626 }
9627
formatBytes(RtAudioFormat format)9628 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9629 {
9630 if ( format == RTAUDIO_SINT16 )
9631 return 2;
9632 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9633 return 4;
9634 else if ( format == RTAUDIO_FLOAT64 )
9635 return 8;
9636 else if ( format == RTAUDIO_SINT24 )
9637 return 3;
9638 else if ( format == RTAUDIO_SINT8 )
9639 return 1;
9640
9641 errorText_ = "RtApi::formatBytes: undefined format.";
9642 error( RtAudioError::WARNING );
9643
9644 return 0;
9645 }
9646
setConvertInfo(StreamMode mode,unsigned int firstChannel)9647 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9648 {
9649 if ( mode == INPUT ) { // convert device to user buffer
9650 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9651 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9652 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9653 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9654 }
9655 else { // convert user to device buffer
9656 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9657 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9658 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9659 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9660 }
9661
9662 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9663 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9664 else
9665 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9666
9667 // Set up the interleave/deinterleave offsets.
9668 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9669 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9670 ( mode == INPUT && stream_.userInterleaved ) ) {
9671 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9672 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9673 stream_.convertInfo[mode].outOffset.push_back( k );
9674 stream_.convertInfo[mode].inJump = 1;
9675 }
9676 }
9677 else {
9678 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9679 stream_.convertInfo[mode].inOffset.push_back( k );
9680 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9681 stream_.convertInfo[mode].outJump = 1;
9682 }
9683 }
9684 }
9685 else { // no (de)interleaving
9686 if ( stream_.userInterleaved ) {
9687 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9688 stream_.convertInfo[mode].inOffset.push_back( k );
9689 stream_.convertInfo[mode].outOffset.push_back( k );
9690 }
9691 }
9692 else {
9693 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9694 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9695 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9696 stream_.convertInfo[mode].inJump = 1;
9697 stream_.convertInfo[mode].outJump = 1;
9698 }
9699 }
9700 }
9701
9702 // Add channel offset.
9703 if ( firstChannel > 0 ) {
9704 if ( stream_.deviceInterleaved[mode] ) {
9705 if ( mode == OUTPUT ) {
9706 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9707 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9708 }
9709 else {
9710 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9711 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9712 }
9713 }
9714 else {
9715 if ( mode == OUTPUT ) {
9716 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9717 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9718 }
9719 else {
9720 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9721 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9722 }
9723 }
9724 }
9725 }
9726
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)9727 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9728 {
9729 // This function does format conversion, input/output channel compensation, and
9730 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9731 // the lower three bytes of a 32-bit integer.
9732
9733 // Clear our device buffer when in/out duplex device channels are different
9734 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9735 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9736 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9737
9738 int j;
9739 if (info.outFormat == RTAUDIO_FLOAT64) {
9740 Float64 scale;
9741 Float64 *out = (Float64 *)outBuffer;
9742
9743 if (info.inFormat == RTAUDIO_SINT8) {
9744 signed char *in = (signed char *)inBuffer;
9745 scale = 1.0 / 127.5;
9746 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9747 for (j=0; j<info.channels; j++) {
9748 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9749 out[info.outOffset[j]] += 0.5;
9750 out[info.outOffset[j]] *= scale;
9751 }
9752 in += info.inJump;
9753 out += info.outJump;
9754 }
9755 }
9756 else if (info.inFormat == RTAUDIO_SINT16) {
9757 Int16 *in = (Int16 *)inBuffer;
9758 scale = 1.0 / 32767.5;
9759 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9760 for (j=0; j<info.channels; j++) {
9761 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9762 out[info.outOffset[j]] += 0.5;
9763 out[info.outOffset[j]] *= scale;
9764 }
9765 in += info.inJump;
9766 out += info.outJump;
9767 }
9768 }
9769 else if (info.inFormat == RTAUDIO_SINT24) {
9770 Int24 *in = (Int24 *)inBuffer;
9771 scale = 1.0 / 8388607.5;
9772 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9773 for (j=0; j<info.channels; j++) {
9774 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9775 out[info.outOffset[j]] += 0.5;
9776 out[info.outOffset[j]] *= scale;
9777 }
9778 in += info.inJump;
9779 out += info.outJump;
9780 }
9781 }
9782 else if (info.inFormat == RTAUDIO_SINT32) {
9783 Int32 *in = (Int32 *)inBuffer;
9784 scale = 1.0 / 2147483647.5;
9785 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9786 for (j=0; j<info.channels; j++) {
9787 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9788 out[info.outOffset[j]] += 0.5;
9789 out[info.outOffset[j]] *= scale;
9790 }
9791 in += info.inJump;
9792 out += info.outJump;
9793 }
9794 }
9795 else if (info.inFormat == RTAUDIO_FLOAT32) {
9796 Float32 *in = (Float32 *)inBuffer;
9797 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9798 for (j=0; j<info.channels; j++) {
9799 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9800 }
9801 in += info.inJump;
9802 out += info.outJump;
9803 }
9804 }
9805 else if (info.inFormat == RTAUDIO_FLOAT64) {
9806 // Channel compensation and/or (de)interleaving only.
9807 Float64 *in = (Float64 *)inBuffer;
9808 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9809 for (j=0; j<info.channels; j++) {
9810 out[info.outOffset[j]] = in[info.inOffset[j]];
9811 }
9812 in += info.inJump;
9813 out += info.outJump;
9814 }
9815 }
9816 }
9817 else if (info.outFormat == RTAUDIO_FLOAT32) {
9818 Float32 scale;
9819 Float32 *out = (Float32 *)outBuffer;
9820
9821 if (info.inFormat == RTAUDIO_SINT8) {
9822 signed char *in = (signed char *)inBuffer;
9823 scale = (Float32) ( 1.0 / 127.5 );
9824 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9825 for (j=0; j<info.channels; j++) {
9826 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9827 out[info.outOffset[j]] += 0.5;
9828 out[info.outOffset[j]] *= scale;
9829 }
9830 in += info.inJump;
9831 out += info.outJump;
9832 }
9833 }
9834 else if (info.inFormat == RTAUDIO_SINT16) {
9835 Int16 *in = (Int16 *)inBuffer;
9836 scale = (Float32) ( 1.0 / 32767.5 );
9837 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9838 for (j=0; j<info.channels; j++) {
9839 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9840 out[info.outOffset[j]] += 0.5;
9841 out[info.outOffset[j]] *= scale;
9842 }
9843 in += info.inJump;
9844 out += info.outJump;
9845 }
9846 }
9847 else if (info.inFormat == RTAUDIO_SINT24) {
9848 Int24 *in = (Int24 *)inBuffer;
9849 scale = (Float32) ( 1.0 / 8388607.5 );
9850 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9851 for (j=0; j<info.channels; j++) {
9852 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9853 out[info.outOffset[j]] += 0.5;
9854 out[info.outOffset[j]] *= scale;
9855 }
9856 in += info.inJump;
9857 out += info.outJump;
9858 }
9859 }
9860 else if (info.inFormat == RTAUDIO_SINT32) {
9861 Int32 *in = (Int32 *)inBuffer;
9862 scale = (Float32) ( 1.0 / 2147483647.5 );
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9864 for (j=0; j<info.channels; j++) {
9865 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9866 out[info.outOffset[j]] += 0.5;
9867 out[info.outOffset[j]] *= scale;
9868 }
9869 in += info.inJump;
9870 out += info.outJump;
9871 }
9872 }
9873 else if (info.inFormat == RTAUDIO_FLOAT32) {
9874 // Channel compensation and/or (de)interleaving only.
9875 Float32 *in = (Float32 *)inBuffer;
9876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9877 for (j=0; j<info.channels; j++) {
9878 out[info.outOffset[j]] = in[info.inOffset[j]];
9879 }
9880 in += info.inJump;
9881 out += info.outJump;
9882 }
9883 }
9884 else if (info.inFormat == RTAUDIO_FLOAT64) {
9885 Float64 *in = (Float64 *)inBuffer;
9886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9887 for (j=0; j<info.channels; j++) {
9888 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9889 }
9890 in += info.inJump;
9891 out += info.outJump;
9892 }
9893 }
9894 }
9895 else if (info.outFormat == RTAUDIO_SINT32) {
9896 Int32 *out = (Int32 *)outBuffer;
9897 if (info.inFormat == RTAUDIO_SINT8) {
9898 signed char *in = (signed char *)inBuffer;
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9900 for (j=0; j<info.channels; j++) {
9901 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9902 out[info.outOffset[j]] <<= 24;
9903 }
9904 in += info.inJump;
9905 out += info.outJump;
9906 }
9907 }
9908 else if (info.inFormat == RTAUDIO_SINT16) {
9909 Int16 *in = (Int16 *)inBuffer;
9910 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9911 for (j=0; j<info.channels; j++) {
9912 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9913 out[info.outOffset[j]] <<= 16;
9914 }
9915 in += info.inJump;
9916 out += info.outJump;
9917 }
9918 }
9919 else if (info.inFormat == RTAUDIO_SINT24) {
9920 Int24 *in = (Int24 *)inBuffer;
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9922 for (j=0; j<info.channels; j++) {
9923 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9924 out[info.outOffset[j]] <<= 8;
9925 }
9926 in += info.inJump;
9927 out += info.outJump;
9928 }
9929 }
9930 else if (info.inFormat == RTAUDIO_SINT32) {
9931 // Channel compensation and/or (de)interleaving only.
9932 Int32 *in = (Int32 *)inBuffer;
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9934 for (j=0; j<info.channels; j++) {
9935 out[info.outOffset[j]] = in[info.inOffset[j]];
9936 }
9937 in += info.inJump;
9938 out += info.outJump;
9939 }
9940 }
9941 else if (info.inFormat == RTAUDIO_FLOAT32) {
9942 Float32 *in = (Float32 *)inBuffer;
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9944 for (j=0; j<info.channels; j++) {
9945 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9946 }
9947 in += info.inJump;
9948 out += info.outJump;
9949 }
9950 }
9951 else if (info.inFormat == RTAUDIO_FLOAT64) {
9952 Float64 *in = (Float64 *)inBuffer;
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9954 for (j=0; j<info.channels; j++) {
9955 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9956 }
9957 in += info.inJump;
9958 out += info.outJump;
9959 }
9960 }
9961 }
9962 else if (info.outFormat == RTAUDIO_SINT24) {
9963 Int24 *out = (Int24 *)outBuffer;
9964 if (info.inFormat == RTAUDIO_SINT8) {
9965 signed char *in = (signed char *)inBuffer;
9966 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9967 for (j=0; j<info.channels; j++) {
9968 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9969 //out[info.outOffset[j]] <<= 16;
9970 }
9971 in += info.inJump;
9972 out += info.outJump;
9973 }
9974 }
9975 else if (info.inFormat == RTAUDIO_SINT16) {
9976 Int16 *in = (Int16 *)inBuffer;
9977 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9978 for (j=0; j<info.channels; j++) {
9979 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9980 //out[info.outOffset[j]] <<= 8;
9981 }
9982 in += info.inJump;
9983 out += info.outJump;
9984 }
9985 }
9986 else if (info.inFormat == RTAUDIO_SINT24) {
9987 // Channel compensation and/or (de)interleaving only.
9988 Int24 *in = (Int24 *)inBuffer;
9989 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9990 for (j=0; j<info.channels; j++) {
9991 out[info.outOffset[j]] = in[info.inOffset[j]];
9992 }
9993 in += info.inJump;
9994 out += info.outJump;
9995 }
9996 }
9997 else if (info.inFormat == RTAUDIO_SINT32) {
9998 Int32 *in = (Int32 *)inBuffer;
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10000 for (j=0; j<info.channels; j++) {
10001 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10002 //out[info.outOffset[j]] >>= 8;
10003 }
10004 in += info.inJump;
10005 out += info.outJump;
10006 }
10007 }
10008 else if (info.inFormat == RTAUDIO_FLOAT32) {
10009 Float32 *in = (Float32 *)inBuffer;
10010 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10011 for (j=0; j<info.channels; j++) {
10012 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10013 }
10014 in += info.inJump;
10015 out += info.outJump;
10016 }
10017 }
10018 else if (info.inFormat == RTAUDIO_FLOAT64) {
10019 Float64 *in = (Float64 *)inBuffer;
10020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10021 for (j=0; j<info.channels; j++) {
10022 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10023 }
10024 in += info.inJump;
10025 out += info.outJump;
10026 }
10027 }
10028 }
10029 else if (info.outFormat == RTAUDIO_SINT16) {
10030 Int16 *out = (Int16 *)outBuffer;
10031 if (info.inFormat == RTAUDIO_SINT8) {
10032 signed char *in = (signed char *)inBuffer;
10033 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10034 for (j=0; j<info.channels; j++) {
10035 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10036 out[info.outOffset[j]] <<= 8;
10037 }
10038 in += info.inJump;
10039 out += info.outJump;
10040 }
10041 }
10042 else if (info.inFormat == RTAUDIO_SINT16) {
10043 // Channel compensation and/or (de)interleaving only.
10044 Int16 *in = (Int16 *)inBuffer;
10045 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10046 for (j=0; j<info.channels; j++) {
10047 out[info.outOffset[j]] = in[info.inOffset[j]];
10048 }
10049 in += info.inJump;
10050 out += info.outJump;
10051 }
10052 }
10053 else if (info.inFormat == RTAUDIO_SINT24) {
10054 Int24 *in = (Int24 *)inBuffer;
10055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10056 for (j=0; j<info.channels; j++) {
10057 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10058 }
10059 in += info.inJump;
10060 out += info.outJump;
10061 }
10062 }
10063 else if (info.inFormat == RTAUDIO_SINT32) {
10064 Int32 *in = (Int32 *)inBuffer;
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10066 for (j=0; j<info.channels; j++) {
10067 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10068 }
10069 in += info.inJump;
10070 out += info.outJump;
10071 }
10072 }
10073 else if (info.inFormat == RTAUDIO_FLOAT32) {
10074 Float32 *in = (Float32 *)inBuffer;
10075 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10076 for (j=0; j<info.channels; j++) {
10077 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10078 }
10079 in += info.inJump;
10080 out += info.outJump;
10081 }
10082 }
10083 else if (info.inFormat == RTAUDIO_FLOAT64) {
10084 Float64 *in = (Float64 *)inBuffer;
10085 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10086 for (j=0; j<info.channels; j++) {
10087 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10088 }
10089 in += info.inJump;
10090 out += info.outJump;
10091 }
10092 }
10093 }
10094 else if (info.outFormat == RTAUDIO_SINT8) {
10095 signed char *out = (signed char *)outBuffer;
10096 if (info.inFormat == RTAUDIO_SINT8) {
10097 // Channel compensation and/or (de)interleaving only.
10098 signed char *in = (signed char *)inBuffer;
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = in[info.inOffset[j]];
10102 }
10103 in += info.inJump;
10104 out += info.outJump;
10105 }
10106 }
10107 if (info.inFormat == RTAUDIO_SINT16) {
10108 Int16 *in = (Int16 *)inBuffer;
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10110 for (j=0; j<info.channels; j++) {
10111 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10112 }
10113 in += info.inJump;
10114 out += info.outJump;
10115 }
10116 }
10117 else if (info.inFormat == RTAUDIO_SINT24) {
10118 Int24 *in = (Int24 *)inBuffer;
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10120 for (j=0; j<info.channels; j++) {
10121 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10122 }
10123 in += info.inJump;
10124 out += info.outJump;
10125 }
10126 }
10127 else if (info.inFormat == RTAUDIO_SINT32) {
10128 Int32 *in = (Int32 *)inBuffer;
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10130 for (j=0; j<info.channels; j++) {
10131 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10132 }
10133 in += info.inJump;
10134 out += info.outJump;
10135 }
10136 }
10137 else if (info.inFormat == RTAUDIO_FLOAT32) {
10138 Float32 *in = (Float32 *)inBuffer;
10139 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10140 for (j=0; j<info.channels; j++) {
10141 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10142 }
10143 in += info.inJump;
10144 out += info.outJump;
10145 }
10146 }
10147 else if (info.inFormat == RTAUDIO_FLOAT64) {
10148 Float64 *in = (Float64 *)inBuffer;
10149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10150 for (j=0; j<info.channels; j++) {
10151 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10152 }
10153 in += info.inJump;
10154 out += info.outJump;
10155 }
10156 }
10157 }
10158 }
10159
10160 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10161 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10162 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10163
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10164 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10165 {
10166 char val;
10167 char *ptr;
10168
10169 ptr = buffer;
10170 if ( format == RTAUDIO_SINT16 ) {
10171 for ( unsigned int i=0; i<samples; i++ ) {
10172 // Swap 1st and 2nd bytes.
10173 val = *(ptr);
10174 *(ptr) = *(ptr+1);
10175 *(ptr+1) = val;
10176
10177 // Increment 2 bytes.
10178 ptr += 2;
10179 }
10180 }
10181 else if ( format == RTAUDIO_SINT32 ||
10182 format == RTAUDIO_FLOAT32 ) {
10183 for ( unsigned int i=0; i<samples; i++ ) {
10184 // Swap 1st and 4th bytes.
10185 val = *(ptr);
10186 *(ptr) = *(ptr+3);
10187 *(ptr+3) = val;
10188
10189 // Swap 2nd and 3rd bytes.
10190 ptr += 1;
10191 val = *(ptr);
10192 *(ptr) = *(ptr+1);
10193 *(ptr+1) = val;
10194
10195 // Increment 3 more bytes.
10196 ptr += 3;
10197 }
10198 }
10199 else if ( format == RTAUDIO_SINT24 ) {
10200 for ( unsigned int i=0; i<samples; i++ ) {
10201 // Swap 1st and 3rd bytes.
10202 val = *(ptr);
10203 *(ptr) = *(ptr+2);
10204 *(ptr+2) = val;
10205
10206 // Increment 2 more bytes.
10207 ptr += 2;
10208 }
10209 }
10210 else if ( format == RTAUDIO_FLOAT64 ) {
10211 for ( unsigned int i=0; i<samples; i++ ) {
10212 // Swap 1st and 8th bytes
10213 val = *(ptr);
10214 *(ptr) = *(ptr+7);
10215 *(ptr+7) = val;
10216
10217 // Swap 2nd and 7th bytes
10218 ptr += 1;
10219 val = *(ptr);
10220 *(ptr) = *(ptr+5);
10221 *(ptr+5) = val;
10222
10223 // Swap 3rd and 6th bytes
10224 ptr += 1;
10225 val = *(ptr);
10226 *(ptr) = *(ptr+3);
10227 *(ptr+3) = val;
10228
10229 // Swap 4th and 5th bytes
10230 ptr += 1;
10231 val = *(ptr);
10232 *(ptr) = *(ptr+1);
10233 *(ptr+1) = val;
10234
10235 // Increment 5 more bytes.
10236 ptr += 5;
10237 }
10238 }
10239 }
10240
10241 // Indentation settings for Vim and Emacs
10242 //
10243 // Local Variables:
10244 // c-basic-offset: 2
10245 // indent-tabs-mode: nil
10246 // End:
10247 //
10248 // vim: et sts=2 sw=2
10249
10250 #endif // RTAUDIO_ENABLED -GODOT-
10251