1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2016 Gary P. Scavone
14
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
22
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
25
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
30
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 /************************************************************************/
40
41 // RtAudio: Version 4.1.2
42
43 #include "RtAudio.h"
44 #include <iostream>
45 #include <cstdlib>
46 #include <cstring>
47 #include <climits>
48 #include <algorithm>
49
50 // Static variable definitions.
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
52 const unsigned int RtApi::SAMPLE_RATES[] = {
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
55 };
56
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
62
63 #include "tchar.h"
64
convertCharPointerToStdString(const char * text)65 static std::string convertCharPointerToStdString(const char *text)
66 {
67 return std::string(text);
68 }
69
convertCharPointerToStdString(const wchar_t * text)70 static std::string convertCharPointerToStdString(const wchar_t *text)
71 {
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
73 std::string s( length-1, '\0' );
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
75 return s;
76 }
77
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
79 // pthread API
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
84 #else
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
87 #endif
88
89 // *************************************************** //
90 //
91 // RtAudio definitions.
92 //
93 // *************************************************** //
94
getVersion(void)95 std::string RtAudio :: getVersion( void ) throw()
96 {
97 return RTAUDIO_VERSION;
98 }
99
getCompiledApi(std::vector<RtAudio::Api> & apis)100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
101 {
102 apis.clear();
103
104 // The order here will control the order of RtAudio's API search in
105 // the constructor.
106 #if defined(__UNIX_JACK__)
107 apis.push_back( UNIX_JACK );
108 #endif
109 #if defined(__LINUX_ALSA__)
110 apis.push_back( LINUX_ALSA );
111 #endif
112 #if defined(__LINUX_PULSE__)
113 apis.push_back( LINUX_PULSE );
114 #endif
115 #if defined(__LINUX_OSS__)
116 apis.push_back( LINUX_OSS );
117 #endif
118 #if defined(__WINDOWS_ASIO__)
119 apis.push_back( WINDOWS_ASIO );
120 #endif
121 #if defined(__WINDOWS_WASAPI__)
122 apis.push_back( WINDOWS_WASAPI );
123 #endif
124 #if defined(__WINDOWS_DS__)
125 apis.push_back( WINDOWS_DS );
126 #endif
127 #if defined(__MACOSX_CORE__)
128 apis.push_back( MACOSX_CORE );
129 #endif
130 #if defined(__RTAUDIO_DUMMY__)
131 apis.push_back( RTAUDIO_DUMMY );
132 #endif
133 }
134
openRtApi(RtAudio::Api api)135 void RtAudio :: openRtApi( RtAudio::Api api )
136 {
137 if ( rtapi_ )
138 delete rtapi_;
139 rtapi_ = 0;
140
141 #if defined(__UNIX_JACK__)
142 if ( api == UNIX_JACK )
143 rtapi_ = new RtApiJack();
144 #endif
145 #if defined(__LINUX_ALSA__)
146 if ( api == LINUX_ALSA )
147 rtapi_ = new RtApiAlsa();
148 #endif
149 #if defined(__LINUX_PULSE__)
150 if ( api == LINUX_PULSE )
151 rtapi_ = new RtApiPulse();
152 #endif
153 #if defined(__LINUX_OSS__)
154 if ( api == LINUX_OSS )
155 rtapi_ = new RtApiOss();
156 #endif
157 #if defined(__WINDOWS_ASIO__)
158 if ( api == WINDOWS_ASIO )
159 rtapi_ = new RtApiAsio();
160 #endif
161 #if defined(__WINDOWS_WASAPI__)
162 if ( api == WINDOWS_WASAPI )
163 rtapi_ = new RtApiWasapi();
164 #endif
165 #if defined(__WINDOWS_DS__)
166 if ( api == WINDOWS_DS )
167 rtapi_ = new RtApiDs();
168 #endif
169 #if defined(__MACOSX_CORE__)
170 if ( api == MACOSX_CORE )
171 rtapi_ = new RtApiCore();
172 #endif
173 #if defined(__RTAUDIO_DUMMY__)
174 if ( api == RTAUDIO_DUMMY )
175 rtapi_ = new RtApiDummy();
176 #endif
177 }
178
RtAudio(RtAudio::Api api)179 RtAudio :: RtAudio( RtAudio::Api api )
180 {
181 rtapi_ = 0;
182
183 if ( api != UNSPECIFIED ) {
184 // Attempt to open the specified API.
185 openRtApi( api );
186 if ( rtapi_ ) return;
187
188 // No compiled support for specified API value. Issue a debug
189 // warning and continue as if no API was specified.
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
191 }
192
193 // Iterate through the compiled APIs and return as soon as we find
194 // one with at least one device or we reach the end of the list.
195 std::vector< RtAudio::Api > apis;
196 getCompiledApi( apis );
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
198 openRtApi( apis[i] );
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
200 }
201
202 if ( rtapi_ ) return;
203
204 // It should not be possible to get here because the preprocessor
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
206 // API-specific definitions are passed to the compiler. But just in
207 // case something weird happens, we'll throw an error.
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
210 }
211
~RtAudio()212 RtAudio :: ~RtAudio() throw()
213 {
214 if ( rtapi_ )
215 delete rtapi_;
216 }
217
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
219 RtAudio::StreamParameters *inputParameters,
220 RtAudioFormat format, unsigned int sampleRate,
221 unsigned int *bufferFrames,
222 RtAudioCallback callback, void *userData,
223 RtAudio::StreamOptions *options,
224 RtAudioErrorCallback errorCallback )
225 {
226 return rtapi_->openStream( outputParameters, inputParameters, format,
227 sampleRate, bufferFrames, callback,
228 userData, options, errorCallback );
229 }
230
231 // *************************************************** //
232 //
233 // Public RtApi definitions (see end of file for
234 // private or protected utility functions).
235 //
236 // *************************************************** //
237
RtApi()238 RtApi :: RtApi()
239 {
240 stream_.state = STREAM_CLOSED;
241 stream_.mode = UNINITIALIZED;
242 stream_.apiHandle = 0;
243 stream_.userBuffer[0] = 0;
244 stream_.userBuffer[1] = 0;
245 MUTEX_INITIALIZE( &stream_.mutex );
246 showWarnings_ = true;
247 firstErrorOccurred_ = false;
248 }
249
~RtApi()250 RtApi :: ~RtApi()
251 {
252 MUTEX_DESTROY( &stream_.mutex );
253 }
254
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
256 RtAudio::StreamParameters *iParams,
257 RtAudioFormat format, unsigned int sampleRate,
258 unsigned int *bufferFrames,
259 RtAudioCallback callback, void *userData,
260 RtAudio::StreamOptions *options,
261 RtAudioErrorCallback errorCallback )
262 {
263 if ( stream_.state != STREAM_CLOSED ) {
264 errorText_ = "RtApi::openStream: a stream is already open!";
265 error( RtAudioError::INVALID_USE );
266 return;
267 }
268
269 // Clear stream information potentially left from a previously open stream.
270 clearStreamInfo();
271
272 if ( oParams && oParams->nChannels < 1 ) {
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
274 error( RtAudioError::INVALID_USE );
275 return;
276 }
277
278 if ( iParams && iParams->nChannels < 1 ) {
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
280 error( RtAudioError::INVALID_USE );
281 return;
282 }
283
284 if ( oParams == NULL && iParams == NULL ) {
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
286 error( RtAudioError::INVALID_USE );
287 return;
288 }
289
290 if ( formatBytes(format) == 0 ) {
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
292 error( RtAudioError::INVALID_USE );
293 return;
294 }
295
296 unsigned int nDevices = getDeviceCount();
297 unsigned int oChannels = 0;
298 if ( oParams ) {
299 oChannels = oParams->nChannels;
300 if ( oParams->deviceId >= nDevices ) {
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
302 error( RtAudioError::INVALID_USE );
303 return;
304 }
305 }
306
307 unsigned int iChannels = 0;
308 if ( iParams ) {
309 iChannels = iParams->nChannels;
310 if ( iParams->deviceId >= nDevices ) {
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
312 error( RtAudioError::INVALID_USE );
313 return;
314 }
315 }
316
317 bool result;
318
319 if ( oChannels > 0 ) {
320
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
322 sampleRate, format, bufferFrames, options );
323 if ( result == false ) {
324 error( RtAudioError::SYSTEM_ERROR );
325 return;
326 }
327 }
328
329 if ( iChannels > 0 ) {
330
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
332 sampleRate, format, bufferFrames, options );
333 if ( result == false ) {
334 if ( oChannels > 0 ) closeStream();
335 error( RtAudioError::SYSTEM_ERROR );
336 return;
337 }
338 }
339
340 stream_.callbackInfo.callback = (void *) callback;
341 stream_.callbackInfo.userData = userData;
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
343
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
345 stream_.state = STREAM_STOPPED;
346 }
347
getDefaultInputDevice(void)348 unsigned int RtApi :: getDefaultInputDevice( void )
349 {
350 // Should be implemented in subclasses if possible.
351 return 0;
352 }
353
getDefaultOutputDevice(void)354 unsigned int RtApi :: getDefaultOutputDevice( void )
355 {
356 // Should be implemented in subclasses if possible.
357 return 0;
358 }
359
closeStream(void)360 void RtApi :: closeStream( void )
361 {
362 // MUST be implemented in subclasses!
363 return;
364 }
365
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
369 RtAudio::StreamOptions * /*options*/ )
370 {
371 // MUST be implemented in subclasses!
372 return FAILURE;
373 }
374
tickStreamTime(void)375 void RtApi :: tickStreamTime( void )
376 {
377 // Subclasses that do not provide their own implementation of
378 // getStreamTime should call this function once per buffer I/O to
379 // provide basic stream time support.
380
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
382
383 #if defined( HAVE_GETTIMEOFDAY )
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
385 #endif
386 }
387
getStreamLatency(void)388 long RtApi :: getStreamLatency( void )
389 {
390 verifyStream();
391
392 long totalLatency = 0;
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
394 totalLatency = stream_.latency[0];
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
396 totalLatency += stream_.latency[1];
397
398 return totalLatency;
399 }
400
getStreamTime(void)401 double RtApi :: getStreamTime( void )
402 {
403 verifyStream();
404
405 #if defined( HAVE_GETTIMEOFDAY )
406 // Return a very accurate estimate of the stream time by
407 // adding in the elapsed time since the last tick.
408 struct timeval then;
409 struct timeval now;
410
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
412 return stream_.streamTime;
413
414 gettimeofday( &now, NULL );
415 then = stream_.lastTickTimestamp;
416 return stream_.streamTime +
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
418 (then.tv_sec + 0.000001 * then.tv_usec));
419 #else
420 return stream_.streamTime;
421 #endif
422 }
423
setStreamTime(double time)424 void RtApi :: setStreamTime( double time )
425 {
426 verifyStream();
427
428 if ( time >= 0.0 )
429 stream_.streamTime = time;
430 }
431
getStreamSampleRate(void)432 unsigned int RtApi :: getStreamSampleRate( void )
433 {
434 verifyStream();
435
436 return stream_.sampleRate;
437 }
438
439
440 // *************************************************** //
441 //
442 // OS/API-specific methods.
443 //
444 // *************************************************** //
445
446 #if defined(__MACOSX_CORE__)
447
448 // The OS X CoreAudio API is designed to use a separate callback
449 // procedure for each of its audio devices. A single RtAudio duplex
450 // stream using two different devices is supported here, though it
451 // cannot be guaranteed to always behave correctly because we cannot
452 // synchronize these two callbacks.
453 //
454 // A property listener is installed for over/underrun information.
455 // However, no functionality is currently provided to allow property
456 // listeners to trigger user handlers because it is unclear what could
457 // be done if a critical stream parameter (buffer size, sample rate,
458 // device disconnect) notification arrived. The listeners entail
459 // quite a bit of extra code and most likely, a user program wouldn't
460 // be prepared for the result anyway. However, we do provide a flag
461 // to the client callback function to inform of an over/underrun.
462
463 // A structure to hold various information related to the CoreAudio API
464 // implementation.
465 struct CoreHandle {
466 AudioDeviceID id[2]; // device ids
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
468 AudioDeviceIOProcID procId[2];
469 #endif
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
471 UInt32 nStreams[2]; // number of streams to use
472 bool xrun[2];
473 char *deviceBuffer;
474 pthread_cond_t condition;
475 int drainCounter; // Tracks callback counts when draining
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
477
CoreHandleCoreHandle478 CoreHandle()
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
480 };
481
RtApiCore()482 RtApiCore:: RtApiCore()
483 {
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
485 // This is a largely undocumented but absolutely necessary
486 // requirement starting with OS-X 10.6. If not called, queries and
487 // updates to various audio device properties are not handled
488 // correctly.
489 CFRunLoopRef theRunLoop = NULL;
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
491 kAudioObjectPropertyScopeGlobal,
492 kAudioObjectPropertyElementMaster };
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
494 if ( result != noErr ) {
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
496 error( RtAudioError::WARNING );
497 }
498 #endif
499 }
500
~RtApiCore()501 RtApiCore :: ~RtApiCore()
502 {
503 // The subclass destructor gets called before the base class
504 // destructor, so close an existing stream before deallocating
505 // apiDeviceId memory.
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
507 }
508
getDeviceCount(void)509 unsigned int RtApiCore :: getDeviceCount( void )
510 {
511 // Find out how many audio devices there are, if any.
512 UInt32 dataSize;
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
515 if ( result != noErr ) {
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
517 error( RtAudioError::WARNING );
518 return 0;
519 }
520
521 return dataSize / sizeof( AudioDeviceID );
522 }
523
getDefaultInputDevice(void)524 unsigned int RtApiCore :: getDefaultInputDevice( void )
525 {
526 unsigned int nDevices = getDeviceCount();
527 if ( nDevices <= 1 ) return 0;
528
529 AudioDeviceID id;
530 UInt32 dataSize = sizeof( AudioDeviceID );
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
533 if ( result != noErr ) {
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
535 error( RtAudioError::WARNING );
536 return 0;
537 }
538
539 dataSize *= nDevices;
540 AudioDeviceID deviceList[ nDevices ];
541 property.mSelector = kAudioHardwarePropertyDevices;
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
543 if ( result != noErr ) {
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
545 error( RtAudioError::WARNING );
546 return 0;
547 }
548
549 for ( unsigned int i=0; i<nDevices; i++ )
550 if ( id == deviceList[i] ) return i;
551
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
553 error( RtAudioError::WARNING );
554 return 0;
555 }
556
getDefaultOutputDevice(void)557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
558 {
559 unsigned int nDevices = getDeviceCount();
560 if ( nDevices <= 1 ) return 0;
561
562 AudioDeviceID id;
563 UInt32 dataSize = sizeof( AudioDeviceID );
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
566 if ( result != noErr ) {
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
568 error( RtAudioError::WARNING );
569 return 0;
570 }
571
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
573 AudioDeviceID deviceList[ nDevices ];
574 property.mSelector = kAudioHardwarePropertyDevices;
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
578 error( RtAudioError::WARNING );
579 return 0;
580 }
581
582 for ( unsigned int i=0; i<nDevices; i++ )
583 if ( id == deviceList[i] ) return i;
584
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
586 error( RtAudioError::WARNING );
587 return 0;
588 }
589
getDeviceInfo(unsigned int device)590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
591 {
592 RtAudio::DeviceInfo info;
593 info.probed = false;
594
595 // Get device ID
596 unsigned int nDevices = getDeviceCount();
597 if ( nDevices == 0 ) {
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
599 error( RtAudioError::INVALID_USE );
600 return info;
601 }
602
603 if ( device >= nDevices ) {
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
605 error( RtAudioError::INVALID_USE );
606 return info;
607 }
608
609 AudioDeviceID deviceList[ nDevices ];
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
612 kAudioObjectPropertyScopeGlobal,
613 kAudioObjectPropertyElementMaster };
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
615 0, NULL, &dataSize, (void *) &deviceList );
616 if ( result != noErr ) {
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
618 error( RtAudioError::WARNING );
619 return info;
620 }
621
622 AudioDeviceID id = deviceList[ device ];
623
624 // Get the device name.
625 info.name.erase();
626 CFStringRef cfname;
627 dataSize = sizeof( CFStringRef );
628 property.mSelector = kAudioObjectPropertyManufacturer;
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
630 if ( result != noErr ) {
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
632 errorText_ = errorStream_.str();
633 error( RtAudioError::WARNING );
634 return info;
635 }
636
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
638 int length = CFStringGetLength(cfname);
639 char *mname = (char *)malloc(length * 3 + 1);
640 #if defined( UNICODE ) || defined( _UNICODE )
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
642 #else
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
644 #endif
645 info.name.append( (const char *)mname, strlen(mname) );
646 info.name.append( ": " );
647 CFRelease( cfname );
648 free(mname);
649
650 property.mSelector = kAudioObjectPropertyName;
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
652 if ( result != noErr ) {
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
654 errorText_ = errorStream_.str();
655 error( RtAudioError::WARNING );
656 return info;
657 }
658
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
660 length = CFStringGetLength(cfname);
661 char *name = (char *)malloc(length * 3 + 1);
662 #if defined( UNICODE ) || defined( _UNICODE )
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
664 #else
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
666 #endif
667 info.name.append( (const char *)name, strlen(name) );
668 CFRelease( cfname );
669 free(name);
670
671 // Get the output stream "configuration".
672 AudioBufferList *bufferList = nil;
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
674 property.mScope = kAudioDevicePropertyScopeOutput;
675 // property.mElement = kAudioObjectPropertyElementWildcard;
676 dataSize = 0;
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
678 if ( result != noErr || dataSize == 0 ) {
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
680 errorText_ = errorStream_.str();
681 error( RtAudioError::WARNING );
682 return info;
683 }
684
685 // Allocate the AudioBufferList.
686 bufferList = (AudioBufferList *) malloc( dataSize );
687 if ( bufferList == NULL ) {
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
689 error( RtAudioError::WARNING );
690 return info;
691 }
692
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
694 if ( result != noErr || dataSize == 0 ) {
695 free( bufferList );
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
697 errorText_ = errorStream_.str();
698 error( RtAudioError::WARNING );
699 return info;
700 }
701
702 // Get output channel information.
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
704 for ( i=0; i<nStreams; i++ )
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
706 free( bufferList );
707
708 // Get the input stream "configuration".
709 property.mScope = kAudioDevicePropertyScopeInput;
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
711 if ( result != noErr || dataSize == 0 ) {
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
713 errorText_ = errorStream_.str();
714 error( RtAudioError::WARNING );
715 return info;
716 }
717
718 // Allocate the AudioBufferList.
719 bufferList = (AudioBufferList *) malloc( dataSize );
720 if ( bufferList == NULL ) {
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
722 error( RtAudioError::WARNING );
723 return info;
724 }
725
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
727 if (result != noErr || dataSize == 0) {
728 free( bufferList );
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
730 errorText_ = errorStream_.str();
731 error( RtAudioError::WARNING );
732 return info;
733 }
734
735 // Get input channel information.
736 nStreams = bufferList->mNumberBuffers;
737 for ( i=0; i<nStreams; i++ )
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
739 free( bufferList );
740
741 // If device opens for both playback and capture, we determine the channels.
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
744
745 // Probe the device sample rates.
746 bool isInput = false;
747 if ( info.outputChannels == 0 ) isInput = true;
748
749 // Determine the supported sample rates.
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
755 errorText_ = errorStream_.str();
756 error( RtAudioError::WARNING );
757 return info;
758 }
759
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
761 AudioValueRange rangeList[ nRanges ];
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
763 if ( result != kAudioHardwareNoError ) {
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
765 errorText_ = errorStream_.str();
766 error( RtAudioError::WARNING );
767 return info;
768 }
769
770 // The sample rate reporting mechanism is a bit of a mystery. It
771 // seems that it can either return individual rates or a range of
772 // rates. I assume that if the min / max range values are the same,
773 // then that represents a single supported rate and if the min / max
774 // range values are different, the device supports an arbitrary
775 // range of values (though there might be multiple ranges, so we'll
776 // use the most conservative range).
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
778 bool haveValueRange = false;
779 info.sampleRates.clear();
780 for ( UInt32 i=0; i<nRanges; i++ ) {
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
783 info.sampleRates.push_back( tmpSr );
784
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
786 info.preferredSampleRate = tmpSr;
787
788 } else {
789 haveValueRange = true;
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
792 }
793 }
794
795 if ( haveValueRange ) {
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
799
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
801 info.preferredSampleRate = SAMPLE_RATES[k];
802 }
803 }
804 }
805
806 // Sort and remove any redundant values
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
809
810 if ( info.sampleRates.size() == 0 ) {
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
812 errorText_ = errorStream_.str();
813 error( RtAudioError::WARNING );
814 return info;
815 }
816
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
818 // Thus, any other "physical" formats supported by the device are of
819 // no interest to the client.
820 info.nativeFormats = RTAUDIO_FLOAT32;
821
822 if ( info.outputChannels > 0 )
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
824 if ( info.inputChannels > 0 )
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
826
827 info.probed = true;
828 return info;
829 }
830
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)831 static OSStatus callbackHandler( AudioDeviceID inDevice,
832 const AudioTimeStamp* /*inNow*/,
833 const AudioBufferList* inInputData,
834 const AudioTimeStamp* /*inInputTime*/,
835 AudioBufferList* outOutputData,
836 const AudioTimeStamp* /*inOutputTime*/,
837 void* infoPointer )
838 {
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
840
841 RtApiCore *object = (RtApiCore *) info->object;
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
843 return kAudioHardwareUnspecifiedError;
844 else
845 return kAudioHardwareNoError;
846 }
847
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
849 UInt32 nAddresses,
850 const AudioObjectPropertyAddress properties[],
851 void* handlePointer )
852 {
853 CoreHandle *handle = (CoreHandle *) handlePointer;
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
857 handle->xrun[1] = true;
858 else
859 handle->xrun[0] = true;
860 }
861 }
862
863 return kAudioHardwareNoError;
864 }
865
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)866 static OSStatus rateListener( AudioObjectID inDevice,
867 UInt32 /*nAddresses*/,
868 const AudioObjectPropertyAddress /*properties*/[],
869 void* ratePointer )
870 {
871 Float64 *rate = (Float64 *) ratePointer;
872 UInt32 dataSize = sizeof( Float64 );
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
874 kAudioObjectPropertyScopeGlobal,
875 kAudioObjectPropertyElementMaster };
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
877 return kAudioHardwareNoError;
878 }
879
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
881 unsigned int firstChannel, unsigned int sampleRate,
882 RtAudioFormat format, unsigned int *bufferSize,
883 RtAudio::StreamOptions *options )
884 {
885 // Get device ID
886 unsigned int nDevices = getDeviceCount();
887 if ( nDevices == 0 ) {
888 // This should not happen because a check is made before this function is called.
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
890 return FAILURE;
891 }
892
893 if ( device >= nDevices ) {
894 // This should not happen because a check is made before this function is called.
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
896 return FAILURE;
897 }
898
899 AudioDeviceID deviceList[ nDevices ];
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
902 kAudioObjectPropertyScopeGlobal,
903 kAudioObjectPropertyElementMaster };
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
905 0, NULL, &dataSize, (void *) &deviceList );
906 if ( result != noErr ) {
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
908 return FAILURE;
909 }
910
911 AudioDeviceID id = deviceList[ device ];
912
913 // Setup for stream mode.
914 bool isInput = false;
915 if ( mode == INPUT ) {
916 isInput = true;
917 property.mScope = kAudioDevicePropertyScopeInput;
918 }
919 else
920 property.mScope = kAudioDevicePropertyScopeOutput;
921
922 // Get the stream "configuration".
923 AudioBufferList *bufferList = nil;
924 dataSize = 0;
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
927 if ( result != noErr || dataSize == 0 ) {
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
929 errorText_ = errorStream_.str();
930 return FAILURE;
931 }
932
933 // Allocate the AudioBufferList.
934 bufferList = (AudioBufferList *) malloc( dataSize );
935 if ( bufferList == NULL ) {
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
937 return FAILURE;
938 }
939
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
941 if (result != noErr || dataSize == 0) {
942 free( bufferList );
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
944 errorText_ = errorStream_.str();
945 return FAILURE;
946 }
947
948 // Search for one or more streams that contain the desired number of
949 // channels. CoreAudio devices can have an arbitrary number of
950 // streams and each stream can have an arbitrary number of channels.
951 // For each stream, a single buffer of interleaved samples is
952 // provided. RtAudio prefers the use of one stream of interleaved
953 // data or multiple consecutive single-channel streams. However, we
954 // now support multiple consecutive multi-channel streams of
955 // interleaved data as well.
956 UInt32 iStream, offsetCounter = firstChannel;
957 UInt32 nStreams = bufferList->mNumberBuffers;
958 bool monoMode = false;
959 bool foundStream = false;
960
961 // First check that the device supports the requested number of
962 // channels.
963 UInt32 deviceChannels = 0;
964 for ( iStream=0; iStream<nStreams; iStream++ )
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
966
967 if ( deviceChannels < ( channels + firstChannel ) ) {
968 free( bufferList );
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
970 errorText_ = errorStream_.str();
971 return FAILURE;
972 }
973
974 // Look for a single stream meeting our needs.
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
978 if ( streamChannels >= channels + offsetCounter ) {
979 firstStream = iStream;
980 channelOffset = offsetCounter;
981 foundStream = true;
982 break;
983 }
984 if ( streamChannels > offsetCounter ) break;
985 offsetCounter -= streamChannels;
986 }
987
988 // If we didn't find a single stream above, then we should be able
989 // to meet the channel specification with multiple streams.
990 if ( foundStream == false ) {
991 monoMode = true;
992 offsetCounter = firstChannel;
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
995 if ( streamChannels > offsetCounter ) break;
996 offsetCounter -= streamChannels;
997 }
998
999 firstStream = iStream;
1000 channelOffset = offsetCounter;
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
1002
1003 if ( streamChannels > 1 ) monoMode = false;
1004 while ( channelCounter > 0 ) {
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1006 if ( streamChannels > 1 ) monoMode = false;
1007 channelCounter -= streamChannels;
1008 streamCount++;
1009 }
1010 }
1011
1012 free( bufferList );
1013
1014 // Determine the buffer size.
1015 AudioValueRange bufferRange;
1016 dataSize = sizeof( AudioValueRange );
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1019
1020 if ( result != noErr ) {
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1022 errorText_ = errorStream_.str();
1023 return FAILURE;
1024 }
1025
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1029
1030 // Set the buffer size. For multiple streams, I'm assuming we only
1031 // need to make this setting for the master channel.
1032 UInt32 theSize = (UInt32) *bufferSize;
1033 dataSize = sizeof( UInt32 );
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1036
1037 if ( result != noErr ) {
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1039 errorText_ = errorStream_.str();
1040 return FAILURE;
1041 }
1042
1043 // If attempting to setup a duplex stream, the bufferSize parameter
1044 // MUST be the same in both directions!
1045 *bufferSize = theSize;
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1048 errorText_ = errorStream_.str();
1049 return FAILURE;
1050 }
1051
1052 stream_.bufferSize = *bufferSize;
1053 stream_.nBuffers = 1;
1054
1055 // Try to set "hog" mode ... it's not clear to me this is working.
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1057 pid_t hog_pid;
1058 dataSize = sizeof( hog_pid );
1059 property.mSelector = kAudioDevicePropertyHogMode;
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1061 if ( result != noErr ) {
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1063 errorText_ = errorStream_.str();
1064 return FAILURE;
1065 }
1066
1067 if ( hog_pid != getpid() ) {
1068 hog_pid = getpid();
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1070 if ( result != noErr ) {
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1072 errorText_ = errorStream_.str();
1073 return FAILURE;
1074 }
1075 }
1076 }
1077
1078 // Check and if necessary, change the sample rate for the device.
1079 Float64 nominalRate;
1080 dataSize = sizeof( Float64 );
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1083 if ( result != noErr ) {
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1085 errorText_ = errorStream_.str();
1086 return FAILURE;
1087 }
1088
1089 // Only change the sample rate if off by more than 1 Hz.
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1091
1092 // Set a property listener for the sample rate change
1093 Float64 reportedRate = 0.0;
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1096 if ( result != noErr ) {
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1098 errorText_ = errorStream_.str();
1099 return FAILURE;
1100 }
1101
1102 nominalRate = (Float64) sampleRate;
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1104 if ( result != noErr ) {
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1107 errorText_ = errorStream_.str();
1108 return FAILURE;
1109 }
1110
1111 // Now wait until the reported nominal rate is what we just set.
1112 UInt32 microCounter = 0;
1113 while ( reportedRate != nominalRate ) {
1114 microCounter += 5000;
1115 if ( microCounter > 5000000 ) break;
1116 usleep( 5000 );
1117 }
1118
1119 // Remove the property listener.
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1121
1122 if ( microCounter > 5000000 ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1124 errorText_ = errorStream_.str();
1125 return FAILURE;
1126 }
1127 }
1128
1129 // Now set the stream format for all streams. Also, check the
1130 // physical format of the device and change that if necessary.
1131 AudioStreamBasicDescription description;
1132 dataSize = sizeof( AudioStreamBasicDescription );
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1135 if ( result != noErr ) {
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1137 errorText_ = errorStream_.str();
1138 return FAILURE;
1139 }
1140
1141 // Set the sample rate and data format id. However, only make the
1142 // change if the sample rate is not within 1.0 of the desired
1143 // rate and the format is not linear pcm.
1144 bool updateFormat = false;
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1146 description.mSampleRate = (Float64) sampleRate;
1147 updateFormat = true;
1148 }
1149
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1151 description.mFormatID = kAudioFormatLinearPCM;
1152 updateFormat = true;
1153 }
1154
1155 if ( updateFormat ) {
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1160 return FAILURE;
1161 }
1162 }
1163
1164 // Now check the physical format.
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1167 if ( result != noErr ) {
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1169 errorText_ = errorStream_.str();
1170 return FAILURE;
1171 }
1172
1173 //std::cout << "Current physical stream format:" << std::endl;
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1178
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1180 description.mFormatID = kAudioFormatLinearPCM;
1181 //description.mSampleRate = (Float64) sampleRate;
1182 AudioStreamBasicDescription testDescription = description;
1183 UInt32 formatFlags;
1184
1185 // We'll try higher bit rates first and then work our way down.
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1199
1200 bool setPhysicalFormat = false;
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1202 testDescription = description;
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1204 testDescription.mFormatFlags = physicalFormats[i].second;
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1207 else
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1211 if ( result == noErr ) {
1212 setPhysicalFormat = true;
1213 //std::cout << "Updated physical stream format:" << std::endl;
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1218 break;
1219 }
1220 }
1221
1222 if ( !setPhysicalFormat ) {
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1224 errorText_ = errorStream_.str();
1225 return FAILURE;
1226 }
1227 } // done setting virtual/physical formats.
1228
1229 // Get the stream / device latency.
1230 UInt32 latency;
1231 dataSize = sizeof( UInt32 );
1232 property.mSelector = kAudioDevicePropertyLatency;
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1236 else {
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1238 errorText_ = errorStream_.str();
1239 error( RtAudioError::WARNING );
1240 }
1241 }
1242
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
1244 // always be presented in native-endian format, so we should never
1245 // need to byte swap.
1246 stream_.doByteSwap[mode] = false;
1247
1248 // From the CoreAudio documentation, PCM data must be supplied as
1249 // 32-bit floats.
1250 stream_.userFormat = format;
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1252
1253 if ( streamCount == 1 )
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1255 else // multiple streams
1256 stream_.nDeviceChannels[mode] = channels;
1257 stream_.nUserChannels[mode] = channels;
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1260 else stream_.userInterleaved = true;
1261 stream_.deviceInterleaved[mode] = true;
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1263
1264 // Set flags for buffer conversion.
1265 stream_.doConvertBuffer[mode] = false;
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1267 stream_.doConvertBuffer[mode] = true;
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1269 stream_.doConvertBuffer[mode] = true;
1270 if ( streamCount == 1 ) {
1271 if ( stream_.nUserChannels[mode] > 1 &&
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 }
1275 else if ( monoMode && stream_.userInterleaved )
1276 stream_.doConvertBuffer[mode] = true;
1277
1278 // Allocate our CoreHandle structure for the stream.
1279 CoreHandle *handle = 0;
1280 if ( stream_.apiHandle == 0 ) {
1281 try {
1282 handle = new CoreHandle;
1283 }
1284 catch ( std::bad_alloc& ) {
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1286 goto error;
1287 }
1288
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1291 goto error;
1292 }
1293 stream_.apiHandle = (void *) handle;
1294 }
1295 else
1296 handle = (CoreHandle *) stream_.apiHandle;
1297 handle->iStream[mode] = firstStream;
1298 handle->nStreams[mode] = streamCount;
1299 handle->id[mode] = id;
1300
1301 // Allocate necessary internal buffers.
1302 unsigned long bufferBytes;
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1307 if ( stream_.userBuffer[mode] == NULL ) {
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1309 goto error;
1310 }
1311
1312 // If possible, we will make use of the CoreAudio stream buffers as
1313 // "device buffers". However, we can't do this if using multiple
1314 // streams.
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1316
1317 bool makeBuffer = true;
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1319 if ( mode == INPUT ) {
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1323 }
1324 }
1325
1326 if ( makeBuffer ) {
1327 bufferBytes *= *bufferSize;
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1330 if ( stream_.deviceBuffer == NULL ) {
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1332 goto error;
1333 }
1334 }
1335 }
1336
1337 stream_.sampleRate = sampleRate;
1338 stream_.device[mode] = device;
1339 stream_.state = STREAM_STOPPED;
1340 stream_.callbackInfo.object = (void *) this;
1341
1342 // Setup the buffer conversion information structure.
1343 if ( stream_.doConvertBuffer[mode] ) {
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1345 else setConvertInfo( mode, channelOffset );
1346 }
1347
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1349 // Only one callback procedure per device.
1350 stream_.mode = DUPLEX;
1351 else {
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1354 #else
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1357 #endif
1358 if ( result != noErr ) {
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1360 errorText_ = errorStream_.str();
1361 goto error;
1362 }
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
1364 stream_.mode = DUPLEX;
1365 else
1366 stream_.mode = mode;
1367 }
1368
1369 // Setup the device property listener for over/underload.
1370 property.mSelector = kAudioDeviceProcessorOverload;
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1373
1374 return SUCCESS;
1375
1376 error:
1377 if ( handle ) {
1378 pthread_cond_destroy( &handle->condition );
1379 delete handle;
1380 stream_.apiHandle = 0;
1381 }
1382
1383 for ( int i=0; i<2; i++ ) {
1384 if ( stream_.userBuffer[i] ) {
1385 free( stream_.userBuffer[i] );
1386 stream_.userBuffer[i] = 0;
1387 }
1388 }
1389
1390 if ( stream_.deviceBuffer ) {
1391 free( stream_.deviceBuffer );
1392 stream_.deviceBuffer = 0;
1393 }
1394
1395 stream_.state = STREAM_CLOSED;
1396 return FAILURE;
1397 }
1398
closeStream(void)1399 void RtApiCore :: closeStream( void )
1400 {
1401 if ( stream_.state == STREAM_CLOSED ) {
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1403 error( RtAudioError::WARNING );
1404 return;
1405 }
1406
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1409 if (handle) {
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1411 kAudioObjectPropertyScopeGlobal,
1412 kAudioObjectPropertyElementMaster };
1413
1414 property.mSelector = kAudioDeviceProcessorOverload;
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1418 error( RtAudioError::WARNING );
1419 }
1420 }
1421 if ( stream_.state == STREAM_RUNNING )
1422 AudioDeviceStop( handle->id[0], callbackHandler );
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1425 #else
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1428 #endif
1429 }
1430
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1432 if (handle) {
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1434 kAudioObjectPropertyScopeGlobal,
1435 kAudioObjectPropertyElementMaster };
1436
1437 property.mSelector = kAudioDeviceProcessorOverload;
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1441 error( RtAudioError::WARNING );
1442 }
1443 }
1444 if ( stream_.state == STREAM_RUNNING )
1445 AudioDeviceStop( handle->id[1], callbackHandler );
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1448 #else
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1451 #endif
1452 }
1453
1454 for ( int i=0; i<2; i++ ) {
1455 if ( stream_.userBuffer[i] ) {
1456 free( stream_.userBuffer[i] );
1457 stream_.userBuffer[i] = 0;
1458 }
1459 }
1460
1461 if ( stream_.deviceBuffer ) {
1462 free( stream_.deviceBuffer );
1463 stream_.deviceBuffer = 0;
1464 }
1465
1466 // Destroy pthread condition variable.
1467 pthread_cond_destroy( &handle->condition );
1468 delete handle;
1469 stream_.apiHandle = 0;
1470
1471 stream_.mode = UNINITIALIZED;
1472 stream_.state = STREAM_CLOSED;
1473 }
1474
startStream(void)1475 void RtApiCore :: startStream( void )
1476 {
1477 verifyStream();
1478 if ( stream_.state == STREAM_RUNNING ) {
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1480 error( RtAudioError::WARNING );
1481 return;
1482 }
1483
1484 OSStatus result = noErr;
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1487
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
1489 if ( result != noErr ) {
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1491 errorText_ = errorStream_.str();
1492 goto unlock;
1493 }
1494 }
1495
1496 if ( stream_.mode == INPUT ||
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1498
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
1500 if ( result != noErr ) {
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1502 errorText_ = errorStream_.str();
1503 goto unlock;
1504 }
1505 }
1506
1507 handle->drainCounter = 0;
1508 handle->internalDrain = false;
1509 stream_.state = STREAM_RUNNING;
1510
1511 unlock:
1512 if ( result == noErr ) return;
1513 error( RtAudioError::SYSTEM_ERROR );
1514 }
1515
stopStream(void)1516 void RtApiCore :: stopStream( void )
1517 {
1518 verifyStream();
1519 if ( stream_.state == STREAM_STOPPED ) {
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1521 error( RtAudioError::WARNING );
1522 return;
1523 }
1524
1525 OSStatus result = noErr;
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1528
1529 if ( handle->drainCounter == 0 ) {
1530 handle->drainCounter = 2;
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1532 }
1533
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
1535 if ( result != noErr ) {
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1537 errorText_ = errorStream_.str();
1538 goto unlock;
1539 }
1540 }
1541
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1543
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
1545 if ( result != noErr ) {
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1547 errorText_ = errorStream_.str();
1548 goto unlock;
1549 }
1550 }
1551
1552 stream_.state = STREAM_STOPPED;
1553
1554 unlock:
1555 if ( result == noErr ) return;
1556 error( RtAudioError::SYSTEM_ERROR );
1557 }
1558
abortStream(void)1559 void RtApiCore :: abortStream( void )
1560 {
1561 verifyStream();
1562 if ( stream_.state == STREAM_STOPPED ) {
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1564 error( RtAudioError::WARNING );
1565 return;
1566 }
1567
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1569 handle->drainCounter = 2;
1570
1571 stopStream();
1572 }
1573
1574 // This function will be called by a spawned thread when the user
1575 // callback function signals that the stream should be stopped or
1576 // aborted. It is better to handle it this way because the
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
1578 // function is called.
coreStopStream(void * ptr)1579 static void *coreStopStream( void *ptr )
1580 {
1581 CallbackInfo *info = (CallbackInfo *) ptr;
1582 RtApiCore *object = (RtApiCore *) info->object;
1583
1584 object->stopStream();
1585 pthread_exit( NULL );
1586 }
1587
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1589 const AudioBufferList *inBufferList,
1590 const AudioBufferList *outBufferList )
1591 {
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1593 if ( stream_.state == STREAM_CLOSED ) {
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1595 error( RtAudioError::WARNING );
1596 return FAILURE;
1597 }
1598
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1601
1602 // Check if we were draining the stream and signal is finished.
1603 if ( handle->drainCounter > 3 ) {
1604 ThreadHandle threadId;
1605
1606 stream_.state = STREAM_STOPPING;
1607 if ( handle->internalDrain == true )
1608 pthread_create( &threadId, NULL, coreStopStream, info );
1609 else // external call to stopStream()
1610 pthread_cond_signal( &handle->condition );
1611 return SUCCESS;
1612 }
1613
1614 AudioDeviceID outputDevice = handle->id[0];
1615
1616 // Invoke user callback to get fresh output data UNLESS we are
1617 // draining stream or duplex mode AND the input/output devices are
1618 // different AND this function is called for the input device.
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
1621 double streamTime = getStreamTime();
1622 RtAudioStreamStatus status = 0;
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1625 handle->xrun[0] = false;
1626 }
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1628 status |= RTAUDIO_INPUT_OVERFLOW;
1629 handle->xrun[1] = false;
1630 }
1631
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1633 stream_.bufferSize, streamTime, status, info->userData );
1634 if ( cbReturnValue == 2 ) {
1635 stream_.state = STREAM_STOPPING;
1636 handle->drainCounter = 2;
1637 abortStream();
1638 return SUCCESS;
1639 }
1640 else if ( cbReturnValue == 1 ) {
1641 handle->drainCounter = 1;
1642 handle->internalDrain = true;
1643 }
1644 }
1645
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1647
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1649
1650 if ( handle->nStreams[0] == 1 ) {
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1652 0,
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1654 }
1655 else { // fill multiple streams with zeros
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1658 0,
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1660 }
1661 }
1662 }
1663 else if ( handle->nStreams[0] == 1 ) {
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
1667 }
1668 else { // copy from user buffer
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0],
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1672 }
1673 }
1674 else { // fill multiple streams
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1676 if ( stream_.doConvertBuffer[0] ) {
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
1679 }
1680
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1686 }
1687 }
1688 else { // fill multiple multi-channel streams with interleaved data
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1690 Float32 *out, *in;
1691
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1693 UInt32 inChannels = stream_.nUserChannels[0];
1694 if ( stream_.doConvertBuffer[0] ) {
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1696 inChannels = stream_.nDeviceChannels[0];
1697 }
1698
1699 if ( inInterleaved ) inOffset = 1;
1700 else inOffset = stream_.bufferSize;
1701
1702 channelsLeft = inChannels;
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1704 in = inBuffer;
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1707
1708 outJump = 0;
1709 // Account for possible channel offset in first stream
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1711 streamChannels -= stream_.channelOffset[0];
1712 outJump = stream_.channelOffset[0];
1713 out += outJump;
1714 }
1715
1716 // Account for possible unfilled channels at end of the last stream
1717 if ( streamChannels > channelsLeft ) {
1718 outJump = streamChannels - channelsLeft;
1719 streamChannels = channelsLeft;
1720 }
1721
1722 // Determine input buffer offsets and skips
1723 if ( inInterleaved ) {
1724 inJump = inChannels;
1725 in += inChannels - channelsLeft;
1726 }
1727 else {
1728 inJump = 1;
1729 in += (inChannels - channelsLeft) * inOffset;
1730 }
1731
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
1734 *out++ = in[j*inOffset];
1735 }
1736 out += outJump;
1737 in += inJump;
1738 }
1739 channelsLeft -= streamChannels;
1740 }
1741 }
1742 }
1743 }
1744
1745 // Don't bother draining input
1746 if ( handle->drainCounter ) {
1747 handle->drainCounter++;
1748 goto unlock;
1749 }
1750
1751 AudioDeviceID inputDevice;
1752 inputDevice = handle->id[1];
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1754
1755 if ( handle->nStreams[1] == 1 ) {
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1757 convertBuffer( stream_.userBuffer[1],
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1759 stream_.convertInfo[1] );
1760 }
1761 else { // copy to user buffer
1762 memcpy( stream_.userBuffer[1],
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1765 }
1766 }
1767 else { // read from multiple streams
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1770
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1776 }
1777 }
1778 else { // read from multiple multi-channel streams
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1780 Float32 *out, *in;
1781
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1783 UInt32 outChannels = stream_.nUserChannels[1];
1784 if ( stream_.doConvertBuffer[1] ) {
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1786 outChannels = stream_.nDeviceChannels[1];
1787 }
1788
1789 if ( outInterleaved ) outOffset = 1;
1790 else outOffset = stream_.bufferSize;
1791
1792 channelsLeft = outChannels;
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1794 out = outBuffer;
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1797
1798 inJump = 0;
1799 // Account for possible channel offset in first stream
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1801 streamChannels -= stream_.channelOffset[1];
1802 inJump = stream_.channelOffset[1];
1803 in += inJump;
1804 }
1805
1806 // Account for possible unread channels at end of the last stream
1807 if ( streamChannels > channelsLeft ) {
1808 inJump = streamChannels - channelsLeft;
1809 streamChannels = channelsLeft;
1810 }
1811
1812 // Determine output buffer offsets and skips
1813 if ( outInterleaved ) {
1814 outJump = outChannels;
1815 out += outChannels - channelsLeft;
1816 }
1817 else {
1818 outJump = 1;
1819 out += (outChannels - channelsLeft) * outOffset;
1820 }
1821
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
1824 out[j*outOffset] = *in++;
1825 }
1826 out += outJump;
1827 in += inJump;
1828 }
1829 channelsLeft -= streamChannels;
1830 }
1831 }
1832
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1834 convertBuffer( stream_.userBuffer[1],
1835 stream_.deviceBuffer,
1836 stream_.convertInfo[1] );
1837 }
1838 }
1839 }
1840
1841 unlock:
1842 //MUTEX_UNLOCK( &stream_.mutex );
1843
1844 RtApi::tickStreamTime();
1845 return SUCCESS;
1846 }
1847
getErrorCode(OSStatus code)1848 const char* RtApiCore :: getErrorCode( OSStatus code )
1849 {
1850 switch( code ) {
1851
1852 case kAudioHardwareNotRunningError:
1853 return "kAudioHardwareNotRunningError";
1854
1855 case kAudioHardwareUnspecifiedError:
1856 return "kAudioHardwareUnspecifiedError";
1857
1858 case kAudioHardwareUnknownPropertyError:
1859 return "kAudioHardwareUnknownPropertyError";
1860
1861 case kAudioHardwareBadPropertySizeError:
1862 return "kAudioHardwareBadPropertySizeError";
1863
1864 case kAudioHardwareIllegalOperationError:
1865 return "kAudioHardwareIllegalOperationError";
1866
1867 case kAudioHardwareBadObjectError:
1868 return "kAudioHardwareBadObjectError";
1869
1870 case kAudioHardwareBadDeviceError:
1871 return "kAudioHardwareBadDeviceError";
1872
1873 case kAudioHardwareBadStreamError:
1874 return "kAudioHardwareBadStreamError";
1875
1876 case kAudioHardwareUnsupportedOperationError:
1877 return "kAudioHardwareUnsupportedOperationError";
1878
1879 case kAudioDeviceUnsupportedFormatError:
1880 return "kAudioDeviceUnsupportedFormatError";
1881
1882 case kAudioDevicePermissionsError:
1883 return "kAudioDevicePermissionsError";
1884
1885 default:
1886 return "CoreAudio unknown error";
1887 }
1888 }
1889
1890 //******************** End of __MACOSX_CORE__ *********************//
1891 #endif
1892
1893 #if defined(__UNIX_JACK__)
1894
1895 // JACK is a low-latency audio server, originally written for the
1896 // GNU/Linux operating system and now also ported to OS-X. It can
1897 // connect a number of different applications to an audio device, as
1898 // well as allowing them to share audio between themselves.
1899 //
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
1901 // have ports connected to the server. The JACK server is typically
1902 // started in a terminal as follows:
1903 //
1904 // .jackd -d alsa -d hw:0
1905 //
1906 // or through an interface program such as qjackctl. Many of the
1907 // parameters normally set for a stream are fixed by the JACK server
1908 // and can be specified when the JACK server is started. In
1909 // particular,
1910 //
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1912 //
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1914 // frames, and number of buffers = 4. Once the server is running, it
1915 // is not possible to override these values. If the values are not
1916 // specified in the command-line, the JACK server uses default values.
1917 //
1918 // The JACK server does not have to be running when an instance of
1919 // RtApiJack is created, though the function getDeviceCount() will
1920 // report 0 devices found until JACK has been started. When no
1921 // devices are available (i.e., the JACK server is not running), a
1922 // stream cannot be opened.
1923
1924 #include <jack/jack.h>
1925 #include <unistd.h>
1926 #include <cstdio>
1927
1928 // A structure to hold various information related to the Jack API
1929 // implementation.
1930 struct JackHandle {
1931 jack_client_t *client;
1932 jack_port_t **ports[2];
1933 std::string deviceName[2];
1934 bool xrun[2];
1935 pthread_cond_t condition;
1936 int drainCounter; // Tracks callback counts when draining
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
1938
JackHandleJackHandle1939 JackHandle()
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1941 };
1942
jackSilentError(const char *)1943 static void jackSilentError( const char * ) {};
1944
RtApiJack()1945 RtApiJack :: RtApiJack()
1946 {
1947 // Nothing to do here.
1948 #if !defined(__RTAUDIO_DEBUG__)
1949 // Turn off Jack's internal error reporting.
1950 jack_set_error_function( &jackSilentError );
1951 #endif
1952 }
1953
~RtApiJack()1954 RtApiJack :: ~RtApiJack()
1955 {
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
1957 }
1958
getDeviceCount(void)1959 unsigned int RtApiJack :: getDeviceCount( void )
1960 {
1961 // See if we can become a jack client.
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1963 jack_status_t *status = NULL;
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1965 if ( client == 0 ) return 0;
1966
1967 const char **ports;
1968 std::string port, previousPort;
1969 unsigned int nChannels = 0, nDevices = 0;
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
1971 if ( ports ) {
1972 // Parse the port names up to the first colon (:).
1973 size_t iColon = 0;
1974 do {
1975 port = (char *) ports[ nChannels ];
1976 iColon = port.find(":");
1977 if ( iColon != std::string::npos ) {
1978 port = port.substr( 0, iColon + 1 );
1979 if ( port != previousPort ) {
1980 nDevices++;
1981 previousPort = port;
1982 }
1983 }
1984 } while ( ports[++nChannels] );
1985 free( ports );
1986 }
1987
1988 jack_client_close( client );
1989 return nDevices;
1990 }
1991
getDeviceInfo(unsigned int device)1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1993 {
1994 RtAudio::DeviceInfo info;
1995 info.probed = false;
1996
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
1998 jack_status_t *status = NULL;
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2000 if ( client == 0 ) {
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2002 error( RtAudioError::WARNING );
2003 return info;
2004 }
2005
2006 const char **ports;
2007 std::string port, previousPort;
2008 unsigned int nPorts = 0, nDevices = 0;
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
2010 if ( ports ) {
2011 // Parse the port names up to the first colon (:).
2012 size_t iColon = 0;
2013 do {
2014 port = (char *) ports[ nPorts ];
2015 iColon = port.find(":");
2016 if ( iColon != std::string::npos ) {
2017 port = port.substr( 0, iColon );
2018 if ( port != previousPort ) {
2019 if ( nDevices == device ) info.name = port;
2020 nDevices++;
2021 previousPort = port;
2022 }
2023 }
2024 } while ( ports[++nPorts] );
2025 free( ports );
2026 }
2027
2028 if ( device >= nDevices ) {
2029 jack_client_close( client );
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2031 error( RtAudioError::INVALID_USE );
2032 return info;
2033 }
2034
2035 // Get the current jack server sample rate.
2036 info.sampleRates.clear();
2037
2038 info.preferredSampleRate = jack_get_sample_rate( client );
2039 info.sampleRates.push_back( info.preferredSampleRate );
2040
2041 // Count the available ports containing the client name as device
2042 // channels. Jack "input ports" equal RtAudio output channels.
2043 unsigned int nChannels = 0;
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2045 if ( ports ) {
2046 while ( ports[ nChannels ] ) nChannels++;
2047 free( ports );
2048 info.outputChannels = nChannels;
2049 }
2050
2051 // Jack "output ports" equal RtAudio input channels.
2052 nChannels = 0;
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2054 if ( ports ) {
2055 while ( ports[ nChannels ] ) nChannels++;
2056 free( ports );
2057 info.inputChannels = nChannels;
2058 }
2059
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2061 jack_client_close(client);
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2063 error( RtAudioError::WARNING );
2064 return info;
2065 }
2066
2067 // If device opens for both playback and capture, we determine the channels.
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2070
2071 // Jack always uses 32-bit floats.
2072 info.nativeFormats = RTAUDIO_FLOAT32;
2073
2074 // Jack doesn't provide default devices so we'll use the first available one.
2075 if ( device == 0 && info.outputChannels > 0 )
2076 info.isDefaultOutput = true;
2077 if ( device == 0 && info.inputChannels > 0 )
2078 info.isDefaultInput = true;
2079
2080 jack_client_close(client);
2081 info.probed = true;
2082 return info;
2083 }
2084
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2086 {
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
2088
2089 RtApiJack *object = (RtApiJack *) info->object;
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2091
2092 return 0;
2093 }
2094
2095 // This function will be called by a spawned thread when the Jack
2096 // server signals that it is shutting down. It is necessary to handle
2097 // it this way because the jackShutdown() function must return before
2098 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2099 static void *jackCloseStream( void *ptr )
2100 {
2101 CallbackInfo *info = (CallbackInfo *) ptr;
2102 RtApiJack *object = (RtApiJack *) info->object;
2103
2104 object->closeStream();
2105
2106 pthread_exit( NULL );
2107 }
jackShutdown(void * infoPointer)2108 static void jackShutdown( void *infoPointer )
2109 {
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
2111 RtApiJack *object = (RtApiJack *) info->object;
2112
2113 // Check current stream state. If stopped, then we'll assume this
2114 // was called as a result of a call to RtApiJack::stopStream (the
2115 // deactivation of a client handle causes this function to be called).
2116 // If not, we'll assume the Jack server is shutting down or some
2117 // other problem occurred and we should close the stream.
2118 if ( object->isStreamRunning() == false ) return;
2119
2120 ThreadHandle threadId;
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2123 }
2124
jackXrun(void * infoPointer)2125 static int jackXrun( void *infoPointer )
2126 {
2127 JackHandle *handle = (JackHandle *) infoPointer;
2128
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
2131
2132 return 0;
2133 }
2134
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2136 unsigned int firstChannel, unsigned int sampleRate,
2137 RtAudioFormat format, unsigned int *bufferSize,
2138 RtAudio::StreamOptions *options )
2139 {
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2141
2142 // Look for jack server and try to become a client (only do once per stream).
2143 jack_client_t *client = 0;
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2146 jack_status_t *status = NULL;
2147 if ( options && !options->streamName.empty() )
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2149 else
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
2151 if ( client == 0 ) {
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2153 error( RtAudioError::WARNING );
2154 return FAILURE;
2155 }
2156 }
2157 else {
2158 // The handle must have been created on an earlier pass.
2159 client = handle->client;
2160 }
2161
2162 const char **ports;
2163 std::string port, previousPort, deviceName;
2164 unsigned int nPorts = 0, nDevices = 0;
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
2166 if ( ports ) {
2167 // Parse the port names up to the first colon (:).
2168 size_t iColon = 0;
2169 do {
2170 port = (char *) ports[ nPorts ];
2171 iColon = port.find(":");
2172 if ( iColon != std::string::npos ) {
2173 port = port.substr( 0, iColon );
2174 if ( port != previousPort ) {
2175 if ( nDevices == device ) deviceName = port;
2176 nDevices++;
2177 previousPort = port;
2178 }
2179 }
2180 } while ( ports[++nPorts] );
2181 free( ports );
2182 }
2183
2184 if ( device >= nDevices ) {
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2186 return FAILURE;
2187 }
2188
2189 // Count the available ports containing the client name as device
2190 // channels. Jack "input ports" equal RtAudio output channels.
2191 unsigned int nChannels = 0;
2192 unsigned long flag = JackPortIsInput;
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2195 if ( ports ) {
2196 while ( ports[ nChannels ] ) nChannels++;
2197 free( ports );
2198 }
2199
2200 // Compare the jack ports for specified client to the requested number of channels.
2201 if ( nChannels < (channels + firstChannel) ) {
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2203 errorText_ = errorStream_.str();
2204 return FAILURE;
2205 }
2206
2207 // Check the jack server sample rate.
2208 unsigned int jackRate = jack_get_sample_rate( client );
2209 if ( sampleRate != jackRate ) {
2210 jack_client_close( client );
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2212 errorText_ = errorStream_.str();
2213 return FAILURE;
2214 }
2215 stream_.sampleRate = jackRate;
2216
2217 // Get the latency of the JACK port.
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2219 if ( ports[ firstChannel ] ) {
2220 // Added by Ge Wang
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2222 // the range (usually the min and max are equal)
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2224 // get the latency range
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2226 // be optimistic, use the min!
2227 stream_.latency[mode] = latrange.min;
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2229 }
2230 free( ports );
2231
2232 // The jack server always uses 32-bit floating-point data.
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2234 stream_.userFormat = format;
2235
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2237 else stream_.userInterleaved = true;
2238
2239 // Jack always uses non-interleaved buffers.
2240 stream_.deviceInterleaved[mode] = false;
2241
2242 // Jack always provides host byte-ordered data.
2243 stream_.doByteSwap[mode] = false;
2244
2245 // Get the buffer size. The buffer size and number of buffers
2246 // (periods) is set when the jack server is started.
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
2248 *bufferSize = stream_.bufferSize;
2249
2250 stream_.nDeviceChannels[mode] = channels;
2251 stream_.nUserChannels[mode] = channels;
2252
2253 // Set flags for buffer conversion.
2254 stream_.doConvertBuffer[mode] = false;
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2256 stream_.doConvertBuffer[mode] = true;
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2258 stream_.nUserChannels[mode] > 1 )
2259 stream_.doConvertBuffer[mode] = true;
2260
2261 // Allocate our JackHandle structure for the stream.
2262 if ( handle == 0 ) {
2263 try {
2264 handle = new JackHandle;
2265 }
2266 catch ( std::bad_alloc& ) {
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2268 goto error;
2269 }
2270
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2273 goto error;
2274 }
2275 stream_.apiHandle = (void *) handle;
2276 handle->client = client;
2277 }
2278 handle->deviceName[mode] = deviceName;
2279
2280 // Allocate necessary internal buffers.
2281 unsigned long bufferBytes;
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2284 if ( stream_.userBuffer[mode] == NULL ) {
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2286 goto error;
2287 }
2288
2289 if ( stream_.doConvertBuffer[mode] ) {
2290
2291 bool makeBuffer = true;
2292 if ( mode == OUTPUT )
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2294 else { // mode == INPUT
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
2299 }
2300 }
2301
2302 if ( makeBuffer ) {
2303 bufferBytes *= *bufferSize;
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2306 if ( stream_.deviceBuffer == NULL ) {
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2308 goto error;
2309 }
2310 }
2311 }
2312
2313 // Allocate memory for the Jack ports (channels) identifiers.
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2315 if ( handle->ports[mode] == NULL ) {
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2317 goto error;
2318 }
2319
2320 stream_.device[mode] = device;
2321 stream_.channelOffset[mode] = firstChannel;
2322 stream_.state = STREAM_STOPPED;
2323 stream_.callbackInfo.object = (void *) this;
2324
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
2326 // We had already set up the stream for output.
2327 stream_.mode = DUPLEX;
2328 else {
2329 stream_.mode = mode;
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2333 }
2334
2335 // Register our ports.
2336 char label[64];
2337 if ( mode == OUTPUT ) {
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2339 snprintf( label, 64, "outport %d", i );
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2342 }
2343 }
2344 else {
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2346 snprintf( label, 64, "inport %d", i );
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2349 }
2350 }
2351
2352 // Setup the buffer conversion information structure. We don't use
2353 // buffers to do channel offsets, so we override that parameter
2354 // here.
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2356
2357 return SUCCESS;
2358
2359 error:
2360 if ( handle ) {
2361 pthread_cond_destroy( &handle->condition );
2362 jack_client_close( handle->client );
2363
2364 if ( handle->ports[0] ) free( handle->ports[0] );
2365 if ( handle->ports[1] ) free( handle->ports[1] );
2366
2367 delete handle;
2368 stream_.apiHandle = 0;
2369 }
2370
2371 for ( int i=0; i<2; i++ ) {
2372 if ( stream_.userBuffer[i] ) {
2373 free( stream_.userBuffer[i] );
2374 stream_.userBuffer[i] = 0;
2375 }
2376 }
2377
2378 if ( stream_.deviceBuffer ) {
2379 free( stream_.deviceBuffer );
2380 stream_.deviceBuffer = 0;
2381 }
2382
2383 return FAILURE;
2384 }
2385
closeStream(void)2386 void RtApiJack :: closeStream( void )
2387 {
2388 if ( stream_.state == STREAM_CLOSED ) {
2389 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2390 error( RtAudioError::WARNING );
2391 return;
2392 }
2393
2394 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2395 if ( handle ) {
2396
2397 if ( stream_.state == STREAM_RUNNING )
2398 jack_deactivate( handle->client );
2399
2400 jack_client_close( handle->client );
2401 }
2402
2403 if ( handle ) {
2404 if ( handle->ports[0] ) free( handle->ports[0] );
2405 if ( handle->ports[1] ) free( handle->ports[1] );
2406 pthread_cond_destroy( &handle->condition );
2407 delete handle;
2408 stream_.apiHandle = 0;
2409 }
2410
2411 for ( int i=0; i<2; i++ ) {
2412 if ( stream_.userBuffer[i] ) {
2413 free( stream_.userBuffer[i] );
2414 stream_.userBuffer[i] = 0;
2415 }
2416 }
2417
2418 if ( stream_.deviceBuffer ) {
2419 free( stream_.deviceBuffer );
2420 stream_.deviceBuffer = 0;
2421 }
2422
2423 stream_.mode = UNINITIALIZED;
2424 stream_.state = STREAM_CLOSED;
2425 }
2426
startStream(void)2427 void RtApiJack :: startStream( void )
2428 {
2429 verifyStream();
2430 if ( stream_.state == STREAM_RUNNING ) {
2431 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2432 error( RtAudioError::WARNING );
2433 return;
2434 }
2435
2436 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2437 int result = jack_activate( handle->client );
2438 if ( result ) {
2439 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2440 goto unlock;
2441 }
2442
2443 const char **ports;
2444
2445 // Get the list of available ports.
2446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2447 result = 1;
2448 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2449 if ( ports == NULL) {
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2451 goto unlock;
2452 }
2453
2454 // Now make the port connections. Since RtAudio wasn't designed to
2455 // allow the user to select particular channels of a device, we'll
2456 // just open the first "nChannels" ports with offset.
2457 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2458 result = 1;
2459 if ( ports[ stream_.channelOffset[0] + i ] )
2460 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2461 if ( result ) {
2462 free( ports );
2463 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2464 goto unlock;
2465 }
2466 }
2467 free(ports);
2468 }
2469
2470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2471 result = 1;
2472 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2473 if ( ports == NULL) {
2474 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2475 goto unlock;
2476 }
2477
2478 // Now make the port connections. See note above.
2479 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2480 result = 1;
2481 if ( ports[ stream_.channelOffset[1] + i ] )
2482 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2483 if ( result ) {
2484 free( ports );
2485 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2486 goto unlock;
2487 }
2488 }
2489 free(ports);
2490 }
2491
2492 handle->drainCounter = 0;
2493 handle->internalDrain = false;
2494 stream_.state = STREAM_RUNNING;
2495
2496 unlock:
2497 if ( result == 0 ) return;
2498 error( RtAudioError::SYSTEM_ERROR );
2499 }
2500
stopStream(void)2501 void RtApiJack :: stopStream( void )
2502 {
2503 verifyStream();
2504 if ( stream_.state == STREAM_STOPPED ) {
2505 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2506 error( RtAudioError::WARNING );
2507 return;
2508 }
2509
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2512
2513 if ( handle->drainCounter == 0 ) {
2514 handle->drainCounter = 2;
2515 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2516 }
2517 }
2518
2519 jack_deactivate( handle->client );
2520 stream_.state = STREAM_STOPPED;
2521 }
2522
abortStream(void)2523 void RtApiJack :: abortStream( void )
2524 {
2525 verifyStream();
2526 if ( stream_.state == STREAM_STOPPED ) {
2527 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2528 error( RtAudioError::WARNING );
2529 return;
2530 }
2531
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2533 handle->drainCounter = 2;
2534
2535 stopStream();
2536 }
2537
2538 // This function will be called by a spawned thread when the user
2539 // callback function signals that the stream should be stopped or
2540 // aborted. It is necessary to handle it this way because the
2541 // callbackEvent() function must return before the jack_deactivate()
2542 // function will return.
jackStopStream(void * ptr)2543 static void *jackStopStream( void *ptr )
2544 {
2545 CallbackInfo *info = (CallbackInfo *) ptr;
2546 RtApiJack *object = (RtApiJack *) info->object;
2547
2548 object->stopStream();
2549 pthread_exit( NULL );
2550 }
2551
callbackEvent(unsigned long nframes)2552 bool RtApiJack :: callbackEvent( unsigned long nframes )
2553 {
2554 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2555 if ( stream_.state == STREAM_CLOSED ) {
2556 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2557 error( RtAudioError::WARNING );
2558 return FAILURE;
2559 }
2560 if ( stream_.bufferSize != nframes ) {
2561 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2562 error( RtAudioError::WARNING );
2563 return FAILURE;
2564 }
2565
2566 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2568
2569 // Check if we were draining the stream and signal is finished.
2570 if ( handle->drainCounter > 3 ) {
2571 ThreadHandle threadId;
2572
2573 stream_.state = STREAM_STOPPING;
2574 if ( handle->internalDrain == true )
2575 pthread_create( &threadId, NULL, jackStopStream, info );
2576 else
2577 pthread_cond_signal( &handle->condition );
2578 return SUCCESS;
2579 }
2580
2581 // Invoke user callback first, to get fresh output data.
2582 if ( handle->drainCounter == 0 ) {
2583 RtAudioCallback callback = (RtAudioCallback) info->callback;
2584 double streamTime = getStreamTime();
2585 RtAudioStreamStatus status = 0;
2586 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2587 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2588 handle->xrun[0] = false;
2589 }
2590 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2591 status |= RTAUDIO_INPUT_OVERFLOW;
2592 handle->xrun[1] = false;
2593 }
2594 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2595 stream_.bufferSize, streamTime, status, info->userData );
2596 if ( cbReturnValue == 2 ) {
2597 stream_.state = STREAM_STOPPING;
2598 handle->drainCounter = 2;
2599 ThreadHandle id;
2600 pthread_create( &id, NULL, jackStopStream, info );
2601 return SUCCESS;
2602 }
2603 else if ( cbReturnValue == 1 ) {
2604 handle->drainCounter = 1;
2605 handle->internalDrain = true;
2606 }
2607 }
2608
2609 jack_default_audio_sample_t *jackbuffer;
2610 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2612
2613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2614
2615 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2616 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2617 memset( jackbuffer, 0, bufferBytes );
2618 }
2619
2620 }
2621 else if ( stream_.doConvertBuffer[0] ) {
2622
2623 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2624
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2628 }
2629 }
2630 else { // no buffer conversion
2631 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2633 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2634 }
2635 }
2636 }
2637
2638 // Don't bother draining input
2639 if ( handle->drainCounter ) {
2640 handle->drainCounter++;
2641 goto unlock;
2642 }
2643
2644 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2645
2646 if ( stream_.doConvertBuffer[1] ) {
2647 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2648 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2649 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2650 }
2651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2652 }
2653 else { // no buffer conversion
2654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2655 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2656 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2657 }
2658 }
2659 }
2660
2661 unlock:
2662 RtApi::tickStreamTime();
2663 return SUCCESS;
2664 }
2665 //******************** End of __UNIX_JACK__ *********************//
2666 #endif
2667
2668 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2669
2670 // The ASIO API is designed around a callback scheme, so this
2671 // implementation is similar to that used for OS-X CoreAudio and Linux
2672 // Jack. The primary constraint with ASIO is that it only allows
2673 // access to a single driver at a time. Thus, it is not possible to
2674 // have more than one simultaneous RtAudio stream.
2675 //
2676 // This implementation also requires a number of external ASIO files
2677 // and a few global variables. The ASIO callback scheme does not
2678 // allow for the passing of user data, so we must create a global
2679 // pointer to our callbackInfo structure.
2680 //
2681 // On unix systems, we make use of a pthread condition variable.
2682 // Since there is no equivalent in Windows, I hacked something based
2683 // on information found in
2684 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2685
2686 #include "asiosys.h"
2687 #include "asio.h"
2688 #include "iasiothiscallresolver.h"
2689 #include "asiodrivers.h"
2690 #include <cmath>
2691
2692 static AsioDrivers drivers;
2693 static ASIOCallbacks asioCallbacks;
2694 static ASIODriverInfo driverInfo;
2695 static CallbackInfo *asioCallbackInfo;
2696 static bool asioXRun;
2697
2698 struct AsioHandle {
2699 int drainCounter; // Tracks callback counts when draining
2700 bool internalDrain; // Indicates if stop is initiated from callback or not.
2701 ASIOBufferInfo *bufferInfos;
2702 HANDLE condition;
2703
AsioHandleAsioHandle2704 AsioHandle()
2705 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2706 };
2707
2708 // Function declarations (definitions at end of section)
2709 static const char* getAsioErrorString( ASIOError result );
2710 static void sampleRateChanged( ASIOSampleRate sRate );
2711 static long asioMessages( long selector, long value, void* message, double* opt );
2712
RtApiAsio()2713 RtApiAsio :: RtApiAsio()
2714 {
2715 // ASIO cannot run on a multi-threaded apartment. You can call
2716 // CoInitialize beforehand, but it must be for apartment threading
2717 // (in which case, CoInitilialize will return S_FALSE here).
2718 coInitialized_ = false;
2719 HRESULT hr = CoInitialize( NULL );
2720 if ( FAILED(hr) ) {
2721 errorText_ = "RtApiAsio::ASIO requires a single-threaded apartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2722 error( RtAudioError::WARNING );
2723 }
2724 coInitialized_ = true;
2725
2726 drivers.removeCurrentDriver();
2727 driverInfo.asioVersion = 2;
2728
2729 // See note in DirectSound implementation about GetDesktopWindow().
2730 driverInfo.sysRef = GetForegroundWindow();
2731 }
2732
~RtApiAsio()2733 RtApiAsio :: ~RtApiAsio()
2734 {
2735 if ( stream_.state != STREAM_CLOSED ) closeStream();
2736 if ( coInitialized_ ) CoUninitialize();
2737 }
2738
getDeviceCount(void)2739 unsigned int RtApiAsio :: getDeviceCount( void )
2740 {
2741 return (unsigned int) drivers.asioGetNumDev();
2742 }
2743
getDeviceInfo(unsigned int device)2744 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2745 {
2746 RtAudio::DeviceInfo info;
2747 info.probed = false;
2748
2749 // Get device ID
2750 unsigned int nDevices = getDeviceCount();
2751 if ( nDevices == 0 ) {
2752 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2753 error( RtAudioError::INVALID_USE );
2754 return info;
2755 }
2756
2757 if ( device >= nDevices ) {
2758 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2759 error( RtAudioError::INVALID_USE );
2760 return info;
2761 }
2762
2763 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2764 if ( stream_.state != STREAM_CLOSED ) {
2765 if ( device >= devices_.size() ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2767 error( RtAudioError::WARNING );
2768 return info;
2769 }
2770 return devices_[ device ];
2771 }
2772
2773 char driverName[32];
2774 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2775 if ( result != ASE_OK ) {
2776 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2777 errorText_ = errorStream_.str();
2778 error( RtAudioError::WARNING );
2779 return info;
2780 }
2781
2782 info.name = driverName;
2783
2784 if ( !drivers.loadDriver( driverName ) ) {
2785 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2786 errorText_ = errorStream_.str();
2787 error( RtAudioError::WARNING );
2788 return info;
2789 }
2790
2791 result = ASIOInit( &driverInfo );
2792 if ( result != ASE_OK ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2796 return info;
2797 }
2798
2799 // Determine the device channel information.
2800 long inputChannels, outputChannels;
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
2802 if ( result != ASE_OK ) {
2803 drivers.removeCurrentDriver();
2804 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2805 errorText_ = errorStream_.str();
2806 error( RtAudioError::WARNING );
2807 return info;
2808 }
2809
2810 info.outputChannels = outputChannels;
2811 info.inputChannels = inputChannels;
2812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2814
2815 // Determine the supported sample rates.
2816 info.sampleRates.clear();
2817 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2818 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2819 if ( result == ASE_OK ) {
2820 info.sampleRates.push_back( SAMPLE_RATES[i] );
2821
2822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2823 info.preferredSampleRate = SAMPLE_RATES[i];
2824 }
2825 }
2826
2827 // Determine supported data types ... just check first channel and assume rest are the same.
2828 ASIOChannelInfo channelInfo;
2829 channelInfo.channel = 0;
2830 channelInfo.isInput = true;
2831 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2832 result = ASIOGetChannelInfo( &channelInfo );
2833 if ( result != ASE_OK ) {
2834 drivers.removeCurrentDriver();
2835 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2836 errorText_ = errorStream_.str();
2837 error( RtAudioError::WARNING );
2838 return info;
2839 }
2840
2841 info.nativeFormats = 0;
2842 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2843 info.nativeFormats |= RTAUDIO_SINT16;
2844 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2845 info.nativeFormats |= RTAUDIO_SINT32;
2846 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2847 info.nativeFormats |= RTAUDIO_FLOAT32;
2848 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2849 info.nativeFormats |= RTAUDIO_FLOAT64;
2850 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2851 info.nativeFormats |= RTAUDIO_SINT24;
2852
2853 if ( info.outputChannels > 0 )
2854 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2855 if ( info.inputChannels > 0 )
2856 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2857
2858 info.probed = true;
2859 drivers.removeCurrentDriver();
2860 return info;
2861 }
2862
bufferSwitch(long index,ASIOBool)2863 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2864 {
2865 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2866 object->callbackEvent( index );
2867 }
2868
saveDeviceInfo(void)2869 void RtApiAsio :: saveDeviceInfo( void )
2870 {
2871 devices_.clear();
2872
2873 unsigned int nDevices = getDeviceCount();
2874 devices_.resize( nDevices );
2875 for ( unsigned int i=0; i<nDevices; i++ )
2876 devices_[i] = getDeviceInfo( i );
2877 }
2878
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2879 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2880 unsigned int firstChannel, unsigned int sampleRate,
2881 RtAudioFormat format, unsigned int *bufferSize,
2882 RtAudio::StreamOptions *options )
2883 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2884
2885 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2886
2887 // For ASIO, a duplex stream MUST use the same driver.
2888 if ( isDuplexInput && stream_.device[0] != device ) {
2889 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2890 return FAILURE;
2891 }
2892
2893 char driverName[32];
2894 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2895 if ( result != ASE_OK ) {
2896 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2897 errorText_ = errorStream_.str();
2898 return FAILURE;
2899 }
2900
2901 // Only load the driver once for duplex stream.
2902 if ( !isDuplexInput ) {
2903 // The getDeviceInfo() function will not work when a stream is open
2904 // because ASIO does not allow multiple devices to run at the same
2905 // time. Thus, we'll probe the system before opening a stream and
2906 // save the results for use by getDeviceInfo().
2907 this->saveDeviceInfo();
2908
2909 if ( !drivers.loadDriver( driverName ) ) {
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2911 errorText_ = errorStream_.str();
2912 return FAILURE;
2913 }
2914
2915 result = ASIOInit( &driverInfo );
2916 if ( result != ASE_OK ) {
2917 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2918 errorText_ = errorStream_.str();
2919 return FAILURE;
2920 }
2921 }
2922
2923 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2924 bool buffersAllocated = false;
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2926 unsigned int nChannels;
2927
2928
2929 // Check the device channel count.
2930 long inputChannels, outputChannels;
2931 result = ASIOGetChannels( &inputChannels, &outputChannels );
2932 if ( result != ASE_OK ) {
2933 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2934 errorText_ = errorStream_.str();
2935 goto error;
2936 }
2937
2938 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2939 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2940 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2941 errorText_ = errorStream_.str();
2942 goto error;
2943 }
2944 stream_.nDeviceChannels[mode] = channels;
2945 stream_.nUserChannels[mode] = channels;
2946 stream_.channelOffset[mode] = firstChannel;
2947
2948 // Verify the sample rate is supported.
2949 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2950 if ( result != ASE_OK ) {
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2952 errorText_ = errorStream_.str();
2953 goto error;
2954 }
2955
2956 // Get the current sample rate
2957 ASIOSampleRate currentRate;
2958 result = ASIOGetSampleRate( ¤tRate );
2959 if ( result != ASE_OK ) {
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2961 errorText_ = errorStream_.str();
2962 goto error;
2963 }
2964
2965 // Set the sample rate only if necessary
2966 if ( currentRate != sampleRate ) {
2967 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2968 if ( result != ASE_OK ) {
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2970 errorText_ = errorStream_.str();
2971 goto error;
2972 }
2973 }
2974
2975 // Determine the driver data type.
2976 ASIOChannelInfo channelInfo;
2977 channelInfo.channel = 0;
2978 if ( mode == OUTPUT ) channelInfo.isInput = false;
2979 else channelInfo.isInput = true;
2980 result = ASIOGetChannelInfo( &channelInfo );
2981 if ( result != ASE_OK ) {
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2983 errorText_ = errorStream_.str();
2984 goto error;
2985 }
2986
2987 // Assuming WINDOWS host is always little-endian.
2988 stream_.doByteSwap[mode] = false;
2989 stream_.userFormat = format;
2990 stream_.deviceFormat[mode] = 0;
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
2992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
2993 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
2994 }
2995 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
2997 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
2998 }
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3001 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3002 }
3003 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3005 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3006 }
3007 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3009 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3010 }
3011
3012 if ( stream_.deviceFormat[mode] == 0 ) {
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3014 errorText_ = errorStream_.str();
3015 goto error;
3016 }
3017
3018 // Set the buffer size. For a duplex stream, this will end up
3019 // setting the buffer size based on the input constraints, which
3020 // should be ok.
3021 long minSize, maxSize, preferSize, granularity;
3022 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3023 if ( result != ASE_OK ) {
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3025 errorText_ = errorStream_.str();
3026 goto error;
3027 }
3028
3029 if ( isDuplexInput ) {
3030 // When this is the duplex input (output was opened before), then we have to use the same
3031 // buffersize as the output, because it might use the preferred buffer size, which most
3032 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3033 // So instead of throwing an error, make them equal. The caller uses the reference
3034 // to the "bufferSize" param as usual to set up processing buffers.
3035
3036 *bufferSize = stream_.bufferSize;
3037
3038 } else {
3039 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3040 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3042 else if ( granularity == -1 ) {
3043 // Make sure bufferSize is a power of two.
3044 int log2_of_min_size = 0;
3045 int log2_of_max_size = 0;
3046
3047 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3048 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3049 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3050 }
3051
3052 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3053 int min_delta_num = log2_of_min_size;
3054
3055 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3056 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3057 if (current_delta < min_delta) {
3058 min_delta = current_delta;
3059 min_delta_num = i;
3060 }
3061 }
3062
3063 *bufferSize = ( (unsigned int)1 << min_delta_num );
3064 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3065 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3066 }
3067 else if ( granularity != 0 ) {
3068 // Set to an even multiple of granularity, rounding up.
3069 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3070 }
3071 }
3072
3073 /*
3074 // we don't use it anymore, see above!
3075 // Just left it here for the case...
3076 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3077 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3078 goto error;
3079 }
3080 */
3081
3082 stream_.bufferSize = *bufferSize;
3083 stream_.nBuffers = 2;
3084
3085 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3086 else stream_.userInterleaved = true;
3087
3088 // ASIO always uses non-interleaved buffers.
3089 stream_.deviceInterleaved[mode] = false;
3090
3091 // Allocate, if necessary, our AsioHandle structure for the stream.
3092 if ( handle == 0 ) {
3093 try {
3094 handle = new AsioHandle;
3095 }
3096 catch ( std::bad_alloc& ) {
3097 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3098 goto error;
3099 }
3100 handle->bufferInfos = 0;
3101
3102 // Create a manual-reset event.
3103 handle->condition = CreateEvent( NULL, // no security
3104 TRUE, // manual-reset
3105 FALSE, // non-signaled initially
3106 NULL ); // unnamed
3107 stream_.apiHandle = (void *) handle;
3108 }
3109
3110 // Create the ASIO internal buffers. Since RtAudio sets up input
3111 // and output separately, we'll have to dispose of previously
3112 // created output buffers for a duplex stream.
3113 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3114 ASIODisposeBuffers();
3115 if ( handle->bufferInfos ) free( handle->bufferInfos );
3116 }
3117
3118 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3119 unsigned int i;
3120 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3121 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3122 if ( handle->bufferInfos == NULL ) {
3123 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3124 errorText_ = errorStream_.str();
3125 goto error;
3126 }
3127
3128 ASIOBufferInfo *infos;
3129 infos = handle->bufferInfos;
3130 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3131 infos->isInput = ASIOFalse;
3132 infos->channelNum = i + stream_.channelOffset[0];
3133 infos->buffers[0] = infos->buffers[1] = 0;
3134 }
3135 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3136 infos->isInput = ASIOTrue;
3137 infos->channelNum = i + stream_.channelOffset[1];
3138 infos->buffers[0] = infos->buffers[1] = 0;
3139 }
3140
3141 // prepare for callbacks
3142 stream_.sampleRate = sampleRate;
3143 stream_.device[mode] = device;
3144 stream_.mode = isDuplexInput ? DUPLEX : mode;
3145
3146 // store this class instance before registering callbacks, that are going to use it
3147 asioCallbackInfo = &stream_.callbackInfo;
3148 stream_.callbackInfo.object = (void *) this;
3149
3150 // Set up the ASIO callback structure and create the ASIO data buffers.
3151 asioCallbacks.bufferSwitch = &bufferSwitch;
3152 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3153 asioCallbacks.asioMessage = &asioMessages;
3154 asioCallbacks.bufferSwitchTimeInfo = NULL;
3155 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3156 if ( result != ASE_OK ) {
3157 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3158 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3159 // in that case, let's be naïve and try that instead
3160 *bufferSize = preferSize;
3161 stream_.bufferSize = *bufferSize;
3162 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3163 }
3164
3165 if ( result != ASE_OK ) {
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3167 errorText_ = errorStream_.str();
3168 goto error;
3169 }
3170 buffersAllocated = true;
3171 stream_.state = STREAM_STOPPED;
3172
3173 // Set flags for buffer conversion.
3174 stream_.doConvertBuffer[mode] = false;
3175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3176 stream_.doConvertBuffer[mode] = true;
3177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3178 stream_.nUserChannels[mode] > 1 )
3179 stream_.doConvertBuffer[mode] = true;
3180
3181 // Allocate necessary internal buffers
3182 unsigned long bufferBytes;
3183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3185 if ( stream_.userBuffer[mode] == NULL ) {
3186 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3187 goto error;
3188 }
3189
3190 if ( stream_.doConvertBuffer[mode] ) {
3191
3192 bool makeBuffer = true;
3193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3194 if ( isDuplexInput && stream_.deviceBuffer ) {
3195 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3196 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3197 }
3198
3199 if ( makeBuffer ) {
3200 bufferBytes *= *bufferSize;
3201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3203 if ( stream_.deviceBuffer == NULL ) {
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3205 goto error;
3206 }
3207 }
3208 }
3209
3210 // Determine device latencies
3211 long inputLatency, outputLatency;
3212 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3213 if ( result != ASE_OK ) {
3214 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3215 errorText_ = errorStream_.str();
3216 error( RtAudioError::WARNING); // warn but don't fail
3217 }
3218 else {
3219 stream_.latency[0] = outputLatency;
3220 stream_.latency[1] = inputLatency;
3221 }
3222
3223 // Setup the buffer conversion information structure. We don't use
3224 // buffers to do channel offsets, so we override that parameter
3225 // here.
3226 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3227
3228 return SUCCESS;
3229
3230 error:
3231 if ( !isDuplexInput ) {
3232 // the cleanup for error in the duplex input, is done by RtApi::openStream
3233 // So we clean up for single channel only
3234
3235 if ( buffersAllocated )
3236 ASIODisposeBuffers();
3237
3238 drivers.removeCurrentDriver();
3239
3240 if ( handle ) {
3241 CloseHandle( handle->condition );
3242 if ( handle->bufferInfos )
3243 free( handle->bufferInfos );
3244
3245 delete handle;
3246 stream_.apiHandle = 0;
3247 }
3248
3249
3250 if ( stream_.userBuffer[mode] ) {
3251 free( stream_.userBuffer[mode] );
3252 stream_.userBuffer[mode] = 0;
3253 }
3254
3255 if ( stream_.deviceBuffer ) {
3256 free( stream_.deviceBuffer );
3257 stream_.deviceBuffer = 0;
3258 }
3259 }
3260
3261 return FAILURE;
3262 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3263
closeStream()3264 void RtApiAsio :: closeStream()
3265 {
3266 if ( stream_.state == STREAM_CLOSED ) {
3267 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3268 error( RtAudioError::WARNING );
3269 return;
3270 }
3271
3272 if ( stream_.state == STREAM_RUNNING ) {
3273 stream_.state = STREAM_STOPPED;
3274 ASIOStop();
3275 }
3276 ASIODisposeBuffers();
3277 drivers.removeCurrentDriver();
3278
3279 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3280 if ( handle ) {
3281 CloseHandle( handle->condition );
3282 if ( handle->bufferInfos )
3283 free( handle->bufferInfos );
3284 delete handle;
3285 stream_.apiHandle = 0;
3286 }
3287
3288 for ( int i=0; i<2; i++ ) {
3289 if ( stream_.userBuffer[i] ) {
3290 free( stream_.userBuffer[i] );
3291 stream_.userBuffer[i] = 0;
3292 }
3293 }
3294
3295 if ( stream_.deviceBuffer ) {
3296 free( stream_.deviceBuffer );
3297 stream_.deviceBuffer = 0;
3298 }
3299
3300 stream_.mode = UNINITIALIZED;
3301 stream_.state = STREAM_CLOSED;
3302 }
3303
3304 bool stopThreadCalled = false;
3305
startStream()3306 void RtApiAsio :: startStream()
3307 {
3308 verifyStream();
3309 if ( stream_.state == STREAM_RUNNING ) {
3310 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3311 error( RtAudioError::WARNING );
3312 return;
3313 }
3314
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3316 ASIOError result = ASIOStart();
3317 if ( result != ASE_OK ) {
3318 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3319 errorText_ = errorStream_.str();
3320 goto unlock;
3321 }
3322
3323 handle->drainCounter = 0;
3324 handle->internalDrain = false;
3325 ResetEvent( handle->condition );
3326 stream_.state = STREAM_RUNNING;
3327 asioXRun = false;
3328
3329 unlock:
3330 stopThreadCalled = false;
3331
3332 if ( result == ASE_OK ) return;
3333 error( RtAudioError::SYSTEM_ERROR );
3334 }
3335
stopStream()3336 void RtApiAsio :: stopStream()
3337 {
3338 verifyStream();
3339 if ( stream_.state == STREAM_STOPPED ) {
3340 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3341 error( RtAudioError::WARNING );
3342 return;
3343 }
3344
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3347 if ( handle->drainCounter == 0 ) {
3348 handle->drainCounter = 2;
3349 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3350 }
3351 }
3352
3353 stream_.state = STREAM_STOPPED;
3354
3355 ASIOError result = ASIOStop();
3356 if ( result != ASE_OK ) {
3357 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3358 errorText_ = errorStream_.str();
3359 }
3360
3361 if ( result == ASE_OK ) return;
3362 error( RtAudioError::SYSTEM_ERROR );
3363 }
3364
abortStream()3365 void RtApiAsio :: abortStream()
3366 {
3367 verifyStream();
3368 if ( stream_.state == STREAM_STOPPED ) {
3369 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3370 error( RtAudioError::WARNING );
3371 return;
3372 }
3373
3374 // The following lines were commented-out because some behavior was
3375 // noted where the device buffers need to be zeroed to avoid
3376 // continuing sound, even when the device buffers are completely
3377 // disposed. So now, calling abort is the same as calling stop.
3378 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3379 // handle->drainCounter = 2;
3380 stopStream();
3381 }
3382
3383 // This function will be called by a spawned thread when the user
3384 // callback function signals that the stream should be stopped or
3385 // aborted. It is necessary to handle it this way because the
3386 // callbackEvent() function must return before the ASIOStop()
3387 // function will return.
asioStopStream(void * ptr)3388 static unsigned __stdcall asioStopStream( void *ptr )
3389 {
3390 CallbackInfo *info = (CallbackInfo *) ptr;
3391 RtApiAsio *object = (RtApiAsio *) info->object;
3392
3393 object->stopStream();
3394 _endthreadex( 0 );
3395 return 0;
3396 }
3397
callbackEvent(long bufferIndex)3398 bool RtApiAsio :: callbackEvent( long bufferIndex )
3399 {
3400 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3401 if ( stream_.state == STREAM_CLOSED ) {
3402 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3403 error( RtAudioError::WARNING );
3404 return FAILURE;
3405 }
3406
3407 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3409
3410 // Check if we were draining the stream and signal if finished.
3411 if ( handle->drainCounter > 3 ) {
3412
3413 stream_.state = STREAM_STOPPING;
3414 if ( handle->internalDrain == false )
3415 SetEvent( handle->condition );
3416 else { // spawn a thread to stop the stream
3417 unsigned threadId;
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3419 &stream_.callbackInfo, 0, &threadId );
3420 }
3421 return SUCCESS;
3422 }
3423
3424 // Invoke user callback to get fresh output data UNLESS we are
3425 // draining stream.
3426 if ( handle->drainCounter == 0 ) {
3427 RtAudioCallback callback = (RtAudioCallback) info->callback;
3428 double streamTime = getStreamTime();
3429 RtAudioStreamStatus status = 0;
3430 if ( stream_.mode != INPUT && asioXRun == true ) {
3431 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3432 asioXRun = false;
3433 }
3434 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3435 status |= RTAUDIO_INPUT_OVERFLOW;
3436 asioXRun = false;
3437 }
3438 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3439 stream_.bufferSize, streamTime, status, info->userData );
3440 if ( cbReturnValue == 2 ) {
3441 stream_.state = STREAM_STOPPING;
3442 handle->drainCounter = 2;
3443 unsigned threadId;
3444 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3445 &stream_.callbackInfo, 0, &threadId );
3446 return SUCCESS;
3447 }
3448 else if ( cbReturnValue == 1 ) {
3449 handle->drainCounter = 1;
3450 handle->internalDrain = true;
3451 }
3452 }
3453
3454 unsigned int nChannels, bufferBytes, i, j;
3455 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3457
3458 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3459
3460 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3461
3462 for ( i=0, j=0; i<nChannels; i++ ) {
3463 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3464 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3465 }
3466
3467 }
3468 else if ( stream_.doConvertBuffer[0] ) {
3469
3470 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3471 if ( stream_.doByteSwap[0] )
3472 byteSwapBuffer( stream_.deviceBuffer,
3473 stream_.bufferSize * stream_.nDeviceChannels[0],
3474 stream_.deviceFormat[0] );
3475
3476 for ( i=0, j=0; i<nChannels; i++ ) {
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3478 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3479 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3480 }
3481
3482 }
3483 else {
3484
3485 if ( stream_.doByteSwap[0] )
3486 byteSwapBuffer( stream_.userBuffer[0],
3487 stream_.bufferSize * stream_.nUserChannels[0],
3488 stream_.userFormat );
3489
3490 for ( i=0, j=0; i<nChannels; i++ ) {
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3493 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3494 }
3495
3496 }
3497 }
3498
3499 // Don't bother draining input
3500 if ( handle->drainCounter ) {
3501 handle->drainCounter++;
3502 goto unlock;
3503 }
3504
3505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3506
3507 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3508
3509 if (stream_.doConvertBuffer[1]) {
3510
3511 // Always interleave ASIO input data.
3512 for ( i=0, j=0; i<nChannels; i++ ) {
3513 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3514 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3515 handle->bufferInfos[i].buffers[bufferIndex],
3516 bufferBytes );
3517 }
3518
3519 if ( stream_.doByteSwap[1] )
3520 byteSwapBuffer( stream_.deviceBuffer,
3521 stream_.bufferSize * stream_.nDeviceChannels[1],
3522 stream_.deviceFormat[1] );
3523 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3524
3525 }
3526 else {
3527 for ( i=0, j=0; i<nChannels; i++ ) {
3528 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3529 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3530 handle->bufferInfos[i].buffers[bufferIndex],
3531 bufferBytes );
3532 }
3533 }
3534
3535 if ( stream_.doByteSwap[1] )
3536 byteSwapBuffer( stream_.userBuffer[1],
3537 stream_.bufferSize * stream_.nUserChannels[1],
3538 stream_.userFormat );
3539 }
3540 }
3541
3542 unlock:
3543 // The following call was suggested by Malte Clasen. While the API
3544 // documentation indicates it should not be required, some device
3545 // drivers apparently do not function correctly without it.
3546 ASIOOutputReady();
3547
3548 RtApi::tickStreamTime();
3549 return SUCCESS;
3550 }
3551
sampleRateChanged(ASIOSampleRate sRate)3552 static void sampleRateChanged( ASIOSampleRate sRate )
3553 {
3554 // The ASIO documentation says that this usually only happens during
3555 // external sync. Audio processing is not stopped by the driver,
3556 // actual sample rate might not have even changed, maybe only the
3557 // sample rate status of an AES/EBU or S/PDIF digital input at the
3558 // audio device.
3559
3560 RtApi *object = (RtApi *) asioCallbackInfo->object;
3561 try {
3562 object->stopStream();
3563 }
3564 catch ( RtAudioError &exception ) {
3565 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3566 return;
3567 }
3568
3569 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3570 }
3571
asioMessages(long selector,long value,void *,double *)3572 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3573 {
3574 long ret = 0;
3575
3576 switch( selector ) {
3577 case kAsioSelectorSupported:
3578 if ( value == kAsioResetRequest
3579 || value == kAsioEngineVersion
3580 || value == kAsioResyncRequest
3581 || value == kAsioLatenciesChanged
3582 // The following three were added for ASIO 2.0, you don't
3583 // necessarily have to support them.
3584 || value == kAsioSupportsTimeInfo
3585 || value == kAsioSupportsTimeCode
3586 || value == kAsioSupportsInputMonitor)
3587 ret = 1L;
3588 break;
3589 case kAsioResetRequest:
3590 // Defer the task and perform the reset of the driver during the
3591 // next "safe" situation. You cannot reset the driver right now,
3592 // as this code is called from the driver. Reset the driver is
3593 // done by completely destruct is. I.e. ASIOStop(),
3594 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3595 // driver again.
3596 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3597 ret = 1L;
3598 break;
3599 case kAsioResyncRequest:
3600 // This informs the application that the driver encountered some
3601 // non-fatal data loss. It is used for synchronization purposes
3602 // of different media. Added mainly to work around the Win16Mutex
3603 // problems in Windows 95/98 with the Windows Multimedia system,
3604 // which could lose data because the Mutex was held too long by
3605 // another thread. However a driver can issue it in other
3606 // situations, too.
3607 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3608 asioXRun = true;
3609 ret = 1L;
3610 break;
3611 case kAsioLatenciesChanged:
3612 // This will inform the host application that the drivers were
3613 // latencies changed. Beware, it this does not mean that the
3614 // buffer sizes have changed! You might need to update internal
3615 // delay data.
3616 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3617 ret = 1L;
3618 break;
3619 case kAsioEngineVersion:
3620 // Return the supported ASIO version of the host application. If
3621 // a host application does not implement this selector, ASIO 1.0
3622 // is assumed by the driver.
3623 ret = 2L;
3624 break;
3625 case kAsioSupportsTimeInfo:
3626 // Informs the driver whether the
3627 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3628 // For compatibility with ASIO 1.0 drivers the host application
3629 // should always support the "old" bufferSwitch method, too.
3630 ret = 0;
3631 break;
3632 case kAsioSupportsTimeCode:
3633 // Informs the driver whether application is interested in time
3634 // code info. If an application does not need to know about time
3635 // code, the driver has less work to do.
3636 ret = 0;
3637 break;
3638 }
3639 return ret;
3640 }
3641
getAsioErrorString(ASIOError result)3642 static const char* getAsioErrorString( ASIOError result )
3643 {
3644 struct Messages
3645 {
3646 ASIOError value;
3647 const char*message;
3648 };
3649
3650 static const Messages m[] =
3651 {
3652 { ASE_NotPresent, "Hardware input or output is not present or available." },
3653 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3654 { ASE_InvalidParameter, "Invalid input parameter." },
3655 { ASE_InvalidMode, "Invalid mode." },
3656 { ASE_SPNotAdvancing, "Sample position not advancing." },
3657 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3658 { ASE_NoMemory, "Not enough memory to complete the request." }
3659 };
3660
3661 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3662 if ( m[i].value == result ) return m[i].message;
3663
3664 return "Unknown error.";
3665 }
3666
3667 //******************** End of __WINDOWS_ASIO__ *********************//
3668 #endif
3669
3670
3671 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3672
3673 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3674 // - Introduces support for the Windows WASAPI API
3675 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3676 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3677 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3678
3679 #ifndef INITGUID
3680 #define INITGUID
3681 #endif
3682 #include <audioclient.h>
3683 #include <avrt.h>
3684 #include <mmdeviceapi.h>
3685 #include <functiondiscoverykeys_devpkey.h>
3686 #include <math.h>
3687
3688 //=============================================================================
3689
3690 #define SAFE_RELEASE( objectPtr )\
3691 if ( objectPtr )\
3692 {\
3693 objectPtr->Release();\
3694 objectPtr = NULL;\
3695 }
3696
3697 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3698
3699 //-----------------------------------------------------------------------------
3700
3701 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3702 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3703 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3704 // provide intermediate storage for read / write synchronization.
3705 class WasapiBuffer
3706 {
3707 public:
WasapiBuffer()3708 WasapiBuffer()
3709 : buffer_( NULL ),
3710 bufferSize_( 0 ),
3711 inIndex_( 0 ),
3712 outIndex_( 0 ) {}
3713
~WasapiBuffer()3714 ~WasapiBuffer() {
3715 free( buffer_ );
3716 }
3717
3718 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3719 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3720 free( buffer_ );
3721
3722 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3723
3724 bufferSize_ = bufferSize;
3725 inIndex_ = 0;
3726 outIndex_ = 0;
3727 }
3728
3729 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3730 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3731 {
3732 if ( !buffer || // incoming buffer is NULL
3733 bufferSize == 0 || // incoming buffer has no data
3734 bufferSize > bufferSize_ ) // incoming buffer too large
3735 {
3736 return false;
3737 }
3738
3739 unsigned int relOutIndex = outIndex_;
3740 unsigned int inIndexEnd = inIndex_ + bufferSize;
3741 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3742 relOutIndex += bufferSize_;
3743 }
3744
3745 // "in" index can end on the "out" index but cannot begin at it
3746 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3747 return false; // not enough space between "in" index and "out" index
3748 }
3749
3750 // copy buffer from external to internal
3751 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3752 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3753 int fromInSize = bufferSize - fromZeroSize;
3754
3755 switch( format )
3756 {
3757 case RTAUDIO_SINT8:
3758 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3759 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3760 break;
3761 case RTAUDIO_SINT16:
3762 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3763 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3764 break;
3765 case RTAUDIO_SINT24:
3766 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3767 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3768 break;
3769 case RTAUDIO_SINT32:
3770 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3771 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3772 break;
3773 case RTAUDIO_FLOAT32:
3774 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3775 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3776 break;
3777 case RTAUDIO_FLOAT64:
3778 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3779 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3780 break;
3781 }
3782
3783 // update "in" index
3784 inIndex_ += bufferSize;
3785 inIndex_ %= bufferSize_;
3786
3787 return true;
3788 }
3789
3790 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3791 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3792 {
3793 if ( !buffer || // incoming buffer is NULL
3794 bufferSize == 0 || // incoming buffer has no data
3795 bufferSize > bufferSize_ ) // incoming buffer too large
3796 {
3797 return false;
3798 }
3799
3800 unsigned int relInIndex = inIndex_;
3801 unsigned int outIndexEnd = outIndex_ + bufferSize;
3802 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3803 relInIndex += bufferSize_;
3804 }
3805
3806 // "out" index can begin at and end on the "in" index
3807 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3808 return false; // not enough space between "out" index and "in" index
3809 }
3810
3811 // copy buffer from internal to external
3812 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3813 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3814 int fromOutSize = bufferSize - fromZeroSize;
3815
3816 switch( format )
3817 {
3818 case RTAUDIO_SINT8:
3819 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3820 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3821 break;
3822 case RTAUDIO_SINT16:
3823 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3824 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3825 break;
3826 case RTAUDIO_SINT24:
3827 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3828 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3829 break;
3830 case RTAUDIO_SINT32:
3831 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3832 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3833 break;
3834 case RTAUDIO_FLOAT32:
3835 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3836 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3837 break;
3838 case RTAUDIO_FLOAT64:
3839 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3840 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3841 break;
3842 }
3843
3844 // update "out" index
3845 outIndex_ += bufferSize;
3846 outIndex_ %= bufferSize_;
3847
3848 return true;
3849 }
3850
3851 private:
3852 char* buffer_;
3853 unsigned int bufferSize_;
3854 unsigned int inIndex_;
3855 unsigned int outIndex_;
3856 };
3857
3858 //-----------------------------------------------------------------------------
3859
3860 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3861 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3862 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3863 // This sample rate converter favors speed over quality, and works best with conversions between
3864 // one rate and its multiple.
convertBufferWasapi(char * outBuffer,const char * inBuffer,const unsigned int & channelCount,const unsigned int & inSampleRate,const unsigned int & outSampleRate,const unsigned int & inSampleCount,unsigned int & outSampleCount,const RtAudioFormat & format)3865 void convertBufferWasapi( char* outBuffer,
3866 const char* inBuffer,
3867 const unsigned int& channelCount,
3868 const unsigned int& inSampleRate,
3869 const unsigned int& outSampleRate,
3870 const unsigned int& inSampleCount,
3871 unsigned int& outSampleCount,
3872 const RtAudioFormat& format )
3873 {
3874 // calculate the new outSampleCount and relative sampleStep
3875 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3876 float sampleStep = 1.0f / sampleRatio;
3877 float inSampleFraction = 0.0f;
3878
3879 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
3880
3881 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3882 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3883 {
3884 unsigned int inSample = ( unsigned int ) inSampleFraction;
3885
3886 switch ( format )
3887 {
3888 case RTAUDIO_SINT8:
3889 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3890 break;
3891 case RTAUDIO_SINT16:
3892 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3893 break;
3894 case RTAUDIO_SINT24:
3895 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3896 break;
3897 case RTAUDIO_SINT32:
3898 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3899 break;
3900 case RTAUDIO_FLOAT32:
3901 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3902 break;
3903 case RTAUDIO_FLOAT64:
3904 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3905 break;
3906 }
3907
3908 // jump to next in sample
3909 inSampleFraction += sampleStep;
3910 }
3911 }
3912
3913 //-----------------------------------------------------------------------------
3914
3915 // A structure to hold various information related to the WASAPI implementation.
3916 struct WasapiHandle
3917 {
3918 IAudioClient* captureAudioClient;
3919 IAudioClient* renderAudioClient;
3920 IAudioCaptureClient* captureClient;
3921 IAudioRenderClient* renderClient;
3922 HANDLE captureEvent;
3923 HANDLE renderEvent;
3924
WasapiHandleWasapiHandle3925 WasapiHandle()
3926 : captureAudioClient( NULL ),
3927 renderAudioClient( NULL ),
3928 captureClient( NULL ),
3929 renderClient( NULL ),
3930 captureEvent( NULL ),
3931 renderEvent( NULL ) {}
3932 };
3933
3934 //=============================================================================
3935
RtApiWasapi()3936 RtApiWasapi::RtApiWasapi()
3937 : coInitialized_( false ), deviceEnumerator_( NULL )
3938 {
3939 // WASAPI can run either apartment or multi-threaded
3940 HRESULT hr = CoInitialize( NULL );
3941 if ( !FAILED( hr ) )
3942 coInitialized_ = true;
3943
3944 // Instantiate device enumerator
3945 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3946 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3947 ( void** ) &deviceEnumerator_ );
3948
3949 if ( FAILED( hr ) ) {
3950 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3951 error( RtAudioError::DRIVER_ERROR );
3952 }
3953 }
3954
3955 //-----------------------------------------------------------------------------
3956
~RtApiWasapi()3957 RtApiWasapi::~RtApiWasapi()
3958 {
3959 if ( stream_.state != STREAM_CLOSED )
3960 closeStream();
3961
3962 SAFE_RELEASE( deviceEnumerator_ );
3963
3964 // If this object previously called CoInitialize()
3965 if ( coInitialized_ )
3966 CoUninitialize();
3967 }
3968
3969 //=============================================================================
3970
getDeviceCount(void)3971 unsigned int RtApiWasapi::getDeviceCount( void )
3972 {
3973 unsigned int captureDeviceCount = 0;
3974 unsigned int renderDeviceCount = 0;
3975
3976 IMMDeviceCollection* captureDevices = NULL;
3977 IMMDeviceCollection* renderDevices = NULL;
3978
3979 // Count capture devices
3980 errorText_.clear();
3981 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3982 if ( FAILED( hr ) ) {
3983 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3984 goto Exit;
3985 }
3986
3987 hr = captureDevices->GetCount( &captureDeviceCount );
3988 if ( FAILED( hr ) ) {
3989 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3990 goto Exit;
3991 }
3992
3993 // Count render devices
3994 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3995 if ( FAILED( hr ) ) {
3996 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3997 goto Exit;
3998 }
3999
4000 hr = renderDevices->GetCount( &renderDeviceCount );
4001 if ( FAILED( hr ) ) {
4002 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4003 goto Exit;
4004 }
4005
4006 Exit:
4007 // release all references
4008 SAFE_RELEASE( captureDevices );
4009 SAFE_RELEASE( renderDevices );
4010
4011 if ( errorText_.empty() )
4012 return captureDeviceCount + renderDeviceCount;
4013
4014 error( RtAudioError::DRIVER_ERROR );
4015 return 0;
4016 }
4017
4018 //-----------------------------------------------------------------------------
4019
getDeviceInfo(unsigned int device)4020 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4021 {
4022 RtAudio::DeviceInfo info;
4023 unsigned int captureDeviceCount = 0;
4024 unsigned int renderDeviceCount = 0;
4025 std::string defaultDeviceName;
4026 bool isCaptureDevice = false;
4027
4028 PROPVARIANT deviceNameProp;
4029 PROPVARIANT defaultDeviceNameProp;
4030
4031 IMMDeviceCollection* captureDevices = NULL;
4032 IMMDeviceCollection* renderDevices = NULL;
4033 IMMDevice* devicePtr = NULL;
4034 IMMDevice* defaultDevicePtr = NULL;
4035 IAudioClient* audioClient = NULL;
4036 IPropertyStore* devicePropStore = NULL;
4037 IPropertyStore* defaultDevicePropStore = NULL;
4038
4039 WAVEFORMATEX* deviceFormat = NULL;
4040 WAVEFORMATEX* closestMatchFormat = NULL;
4041
4042 // probed
4043 info.probed = false;
4044
4045 // Count capture devices
4046 errorText_.clear();
4047 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4048 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4049 if ( FAILED( hr ) ) {
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4051 goto Exit;
4052 }
4053
4054 hr = captureDevices->GetCount( &captureDeviceCount );
4055 if ( FAILED( hr ) ) {
4056 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4057 goto Exit;
4058 }
4059
4060 // Count render devices
4061 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4062 if ( FAILED( hr ) ) {
4063 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4064 goto Exit;
4065 }
4066
4067 hr = renderDevices->GetCount( &renderDeviceCount );
4068 if ( FAILED( hr ) ) {
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4070 goto Exit;
4071 }
4072
4073 // validate device index
4074 if ( device >= captureDeviceCount + renderDeviceCount ) {
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4076 errorType = RtAudioError::INVALID_USE;
4077 goto Exit;
4078 }
4079
4080 // determine whether index falls within capture or render devices
4081 if ( device >= renderDeviceCount ) {
4082 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4083 if ( FAILED( hr ) ) {
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4085 goto Exit;
4086 }
4087 isCaptureDevice = true;
4088 }
4089 else {
4090 hr = renderDevices->Item( device, &devicePtr );
4091 if ( FAILED( hr ) ) {
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4093 goto Exit;
4094 }
4095 isCaptureDevice = false;
4096 }
4097
4098 // get default device name
4099 if ( isCaptureDevice ) {
4100 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4101 if ( FAILED( hr ) ) {
4102 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4103 goto Exit;
4104 }
4105 }
4106 else {
4107 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4108 if ( FAILED( hr ) ) {
4109 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4110 goto Exit;
4111 }
4112 }
4113
4114 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4115 if ( FAILED( hr ) ) {
4116 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4117 goto Exit;
4118 }
4119 PropVariantInit( &defaultDeviceNameProp );
4120
4121 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4122 if ( FAILED( hr ) ) {
4123 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4124 goto Exit;
4125 }
4126
4127 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4128
4129 // name
4130 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4131 if ( FAILED( hr ) ) {
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4133 goto Exit;
4134 }
4135
4136 PropVariantInit( &deviceNameProp );
4137
4138 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4139 if ( FAILED( hr ) ) {
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4141 goto Exit;
4142 }
4143
4144 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4145
4146 // is default
4147 if ( isCaptureDevice ) {
4148 info.isDefaultInput = info.name == defaultDeviceName;
4149 info.isDefaultOutput = false;
4150 }
4151 else {
4152 info.isDefaultInput = false;
4153 info.isDefaultOutput = info.name == defaultDeviceName;
4154 }
4155
4156 // channel count
4157 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4158 if ( FAILED( hr ) ) {
4159 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4160 goto Exit;
4161 }
4162
4163 hr = audioClient->GetMixFormat( &deviceFormat );
4164 if ( FAILED( hr ) ) {
4165 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4166 goto Exit;
4167 }
4168
4169 if ( isCaptureDevice ) {
4170 info.inputChannels = deviceFormat->nChannels;
4171 info.outputChannels = 0;
4172 info.duplexChannels = 0;
4173 }
4174 else {
4175 info.inputChannels = 0;
4176 info.outputChannels = deviceFormat->nChannels;
4177 info.duplexChannels = 0;
4178 }
4179
4180 // sample rates
4181 info.sampleRates.clear();
4182
4183 // allow support for all sample rates as we have a built-in sample rate converter
4184 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4185 info.sampleRates.push_back( SAMPLE_RATES[i] );
4186 }
4187 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4188
4189 // native format
4190 info.nativeFormats = 0;
4191
4192 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4193 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4194 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4195 {
4196 if ( deviceFormat->wBitsPerSample == 32 ) {
4197 info.nativeFormats |= RTAUDIO_FLOAT32;
4198 }
4199 else if ( deviceFormat->wBitsPerSample == 64 ) {
4200 info.nativeFormats |= RTAUDIO_FLOAT64;
4201 }
4202 }
4203 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4204 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4205 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4206 {
4207 if ( deviceFormat->wBitsPerSample == 8 ) {
4208 info.nativeFormats |= RTAUDIO_SINT8;
4209 }
4210 else if ( deviceFormat->wBitsPerSample == 16 ) {
4211 info.nativeFormats |= RTAUDIO_SINT16;
4212 }
4213 else if ( deviceFormat->wBitsPerSample == 24 ) {
4214 info.nativeFormats |= RTAUDIO_SINT24;
4215 }
4216 else if ( deviceFormat->wBitsPerSample == 32 ) {
4217 info.nativeFormats |= RTAUDIO_SINT32;
4218 }
4219 }
4220
4221 // probed
4222 info.probed = true;
4223
4224 Exit:
4225 // release all references
4226 PropVariantClear( &deviceNameProp );
4227 PropVariantClear( &defaultDeviceNameProp );
4228
4229 SAFE_RELEASE( captureDevices );
4230 SAFE_RELEASE( renderDevices );
4231 SAFE_RELEASE( devicePtr );
4232 SAFE_RELEASE( defaultDevicePtr );
4233 SAFE_RELEASE( audioClient );
4234 SAFE_RELEASE( devicePropStore );
4235 SAFE_RELEASE( defaultDevicePropStore );
4236
4237 CoTaskMemFree( deviceFormat );
4238 CoTaskMemFree( closestMatchFormat );
4239
4240 if ( !errorText_.empty() )
4241 error( errorType );
4242 return info;
4243 }
4244
4245 //-----------------------------------------------------------------------------
4246
getDefaultOutputDevice(void)4247 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4248 {
4249 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4250 if ( getDeviceInfo( i ).isDefaultOutput ) {
4251 return i;
4252 }
4253 }
4254
4255 return 0;
4256 }
4257
4258 //-----------------------------------------------------------------------------
4259
getDefaultInputDevice(void)4260 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4261 {
4262 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4263 if ( getDeviceInfo( i ).isDefaultInput ) {
4264 return i;
4265 }
4266 }
4267
4268 return 0;
4269 }
4270
4271 //-----------------------------------------------------------------------------
4272
closeStream(void)4273 void RtApiWasapi::closeStream( void )
4274 {
4275 if ( stream_.state == STREAM_CLOSED ) {
4276 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4277 error( RtAudioError::WARNING );
4278 return;
4279 }
4280
4281 if ( stream_.state != STREAM_STOPPED )
4282 stopStream();
4283
4284 // clean up stream memory
4285 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4286 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4287
4288 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4289 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4290
4291 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4292 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4293
4294 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4295 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4296
4297 delete ( WasapiHandle* ) stream_.apiHandle;
4298 stream_.apiHandle = NULL;
4299
4300 for ( int i = 0; i < 2; i++ ) {
4301 if ( stream_.userBuffer[i] ) {
4302 free( stream_.userBuffer[i] );
4303 stream_.userBuffer[i] = 0;
4304 }
4305 }
4306
4307 if ( stream_.deviceBuffer ) {
4308 free( stream_.deviceBuffer );
4309 stream_.deviceBuffer = 0;
4310 }
4311
4312 // update stream state
4313 stream_.state = STREAM_CLOSED;
4314 }
4315
4316 //-----------------------------------------------------------------------------
4317
startStream(void)4318 void RtApiWasapi::startStream( void )
4319 {
4320 verifyStream();
4321
4322 if ( stream_.state == STREAM_RUNNING ) {
4323 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4324 error( RtAudioError::WARNING );
4325 return;
4326 }
4327
4328 // update stream state
4329 stream_.state = STREAM_RUNNING;
4330
4331 // create WASAPI stream thread
4332 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4333
4334 if ( !stream_.callbackInfo.thread ) {
4335 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4336 error( RtAudioError::THREAD_ERROR );
4337 }
4338 else {
4339 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4340 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4341 }
4342 }
4343
4344 //-----------------------------------------------------------------------------
4345
stopStream(void)4346 void RtApiWasapi::stopStream( void )
4347 {
4348 verifyStream();
4349
4350 if ( stream_.state == STREAM_STOPPED ) {
4351 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4352 error( RtAudioError::WARNING );
4353 return;
4354 }
4355
4356 // inform stream thread by setting stream state to STREAM_STOPPING
4357 stream_.state = STREAM_STOPPING;
4358
4359 // wait until stream thread is stopped
4360 while( stream_.state != STREAM_STOPPED ) {
4361 Sleep( 1 );
4362 }
4363
4364 // Wait for the last buffer to play before stopping.
4365 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4366
4367 // stop capture client if applicable
4368 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4369 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4372 error( RtAudioError::DRIVER_ERROR );
4373 return;
4374 }
4375 }
4376
4377 // stop render client if applicable
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4379 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4382 error( RtAudioError::DRIVER_ERROR );
4383 return;
4384 }
4385 }
4386
4387 // close thread handle
4388 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4389 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4390 error( RtAudioError::THREAD_ERROR );
4391 return;
4392 }
4393
4394 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4395 }
4396
4397 //-----------------------------------------------------------------------------
4398
abortStream(void)4399 void RtApiWasapi::abortStream( void )
4400 {
4401 verifyStream();
4402
4403 if ( stream_.state == STREAM_STOPPED ) {
4404 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4405 error( RtAudioError::WARNING );
4406 return;
4407 }
4408
4409 // inform stream thread by setting stream state to STREAM_STOPPING
4410 stream_.state = STREAM_STOPPING;
4411
4412 // wait until stream thread is stopped
4413 while ( stream_.state != STREAM_STOPPED ) {
4414 Sleep( 1 );
4415 }
4416
4417 // stop capture client if applicable
4418 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4419 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4420 if ( FAILED( hr ) ) {
4421 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4422 error( RtAudioError::DRIVER_ERROR );
4423 return;
4424 }
4425 }
4426
4427 // stop render client if applicable
4428 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4429 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4430 if ( FAILED( hr ) ) {
4431 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4432 error( RtAudioError::DRIVER_ERROR );
4433 return;
4434 }
4435 }
4436
4437 // close thread handle
4438 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4439 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4440 error( RtAudioError::THREAD_ERROR );
4441 return;
4442 }
4443
4444 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4445 }
4446
4447 //-----------------------------------------------------------------------------
4448
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4449 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4450 unsigned int firstChannel, unsigned int sampleRate,
4451 RtAudioFormat format, unsigned int* bufferSize,
4452 RtAudio::StreamOptions* options )
4453 {
4454 bool methodResult = FAILURE;
4455 unsigned int captureDeviceCount = 0;
4456 unsigned int renderDeviceCount = 0;
4457
4458 IMMDeviceCollection* captureDevices = NULL;
4459 IMMDeviceCollection* renderDevices = NULL;
4460 IMMDevice* devicePtr = NULL;
4461 WAVEFORMATEX* deviceFormat = NULL;
4462 unsigned int bufferBytes;
4463 stream_.state = STREAM_STOPPED;
4464
4465 // create API Handle if not already created
4466 if ( !stream_.apiHandle )
4467 stream_.apiHandle = ( void* ) new WasapiHandle();
4468
4469 // Count capture devices
4470 errorText_.clear();
4471 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4472 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4473 if ( FAILED( hr ) ) {
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4475 goto Exit;
4476 }
4477
4478 hr = captureDevices->GetCount( &captureDeviceCount );
4479 if ( FAILED( hr ) ) {
4480 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4481 goto Exit;
4482 }
4483
4484 // Count render devices
4485 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4486 if ( FAILED( hr ) ) {
4487 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4488 goto Exit;
4489 }
4490
4491 hr = renderDevices->GetCount( &renderDeviceCount );
4492 if ( FAILED( hr ) ) {
4493 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4494 goto Exit;
4495 }
4496
4497 // validate device index
4498 if ( device >= captureDeviceCount + renderDeviceCount ) {
4499 errorType = RtAudioError::INVALID_USE;
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4501 goto Exit;
4502 }
4503
4504 // determine whether index falls within capture or render devices
4505 if ( device >= renderDeviceCount ) {
4506 if ( mode != INPUT ) {
4507 errorType = RtAudioError::INVALID_USE;
4508 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4509 goto Exit;
4510 }
4511
4512 // retrieve captureAudioClient from devicePtr
4513 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4514
4515 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4516 if ( FAILED( hr ) ) {
4517 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4518 goto Exit;
4519 }
4520
4521 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4522 NULL, ( void** ) &captureAudioClient );
4523 if ( FAILED( hr ) ) {
4524 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4525 goto Exit;
4526 }
4527
4528 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4529 if ( FAILED( hr ) ) {
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4531 goto Exit;
4532 }
4533
4534 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4535 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4536 }
4537 else {
4538 if ( mode != OUTPUT ) {
4539 errorType = RtAudioError::INVALID_USE;
4540 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4541 goto Exit;
4542 }
4543
4544 // retrieve renderAudioClient from devicePtr
4545 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4546
4547 hr = renderDevices->Item( device, &devicePtr );
4548 if ( FAILED( hr ) ) {
4549 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4550 goto Exit;
4551 }
4552
4553 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4554 NULL, ( void** ) &renderAudioClient );
4555 if ( FAILED( hr ) ) {
4556 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4557 goto Exit;
4558 }
4559
4560 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4561 if ( FAILED( hr ) ) {
4562 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4563 goto Exit;
4564 }
4565
4566 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4567 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4568 }
4569
4570 // fill stream data
4571 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4572 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4573 stream_.mode = DUPLEX;
4574 }
4575 else {
4576 stream_.mode = mode;
4577 }
4578
4579 stream_.device[mode] = device;
4580 stream_.doByteSwap[mode] = false;
4581 stream_.sampleRate = sampleRate;
4582 stream_.bufferSize = *bufferSize;
4583 stream_.nBuffers = 1;
4584 stream_.nUserChannels[mode] = channels;
4585 stream_.channelOffset[mode] = firstChannel;
4586 stream_.userFormat = format;
4587 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4588
4589 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4590 stream_.userInterleaved = false;
4591 else
4592 stream_.userInterleaved = true;
4593 stream_.deviceInterleaved[mode] = true;
4594
4595 // Set flags for buffer conversion.
4596 stream_.doConvertBuffer[mode] = false;
4597 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4598 stream_.nUserChannels != stream_.nDeviceChannels )
4599 stream_.doConvertBuffer[mode] = true;
4600 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4601 stream_.nUserChannels[mode] > 1 )
4602 stream_.doConvertBuffer[mode] = true;
4603
4604 if ( stream_.doConvertBuffer[mode] )
4605 setConvertInfo( mode, 0 );
4606
4607 // Allocate necessary internal buffers
4608 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4609
4610 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4611 if ( !stream_.userBuffer[mode] ) {
4612 errorType = RtAudioError::MEMORY_ERROR;
4613 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4614 goto Exit;
4615 }
4616
4617 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4618 stream_.callbackInfo.priority = 15;
4619 else
4620 stream_.callbackInfo.priority = 0;
4621
4622 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4623 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4624
4625 methodResult = SUCCESS;
4626
4627 Exit:
4628 //clean up
4629 SAFE_RELEASE( captureDevices );
4630 SAFE_RELEASE( renderDevices );
4631 SAFE_RELEASE( devicePtr );
4632 CoTaskMemFree( deviceFormat );
4633
4634 // if method failed, close the stream
4635 if ( methodResult == FAILURE )
4636 closeStream();
4637
4638 if ( !errorText_.empty() )
4639 error( errorType );
4640 return methodResult;
4641 }
4642
4643 //=============================================================================
4644
runWasapiThread(void * wasapiPtr)4645 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4646 {
4647 if ( wasapiPtr )
4648 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4649
4650 return 0;
4651 }
4652
stopWasapiThread(void * wasapiPtr)4653 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4654 {
4655 if ( wasapiPtr )
4656 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4657
4658 return 0;
4659 }
4660
abortWasapiThread(void * wasapiPtr)4661 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4662 {
4663 if ( wasapiPtr )
4664 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4665
4666 return 0;
4667 }
4668
4669 //-----------------------------------------------------------------------------
4670
wasapiThread()4671 void RtApiWasapi::wasapiThread()
4672 {
4673 // as this is a new thread, we must CoInitialize it
4674 CoInitialize( NULL );
4675
4676 HRESULT hr;
4677
4678 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4679 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4680 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4681 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4682 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4683 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4684
4685 WAVEFORMATEX* captureFormat = NULL;
4686 WAVEFORMATEX* renderFormat = NULL;
4687 float captureSrRatio = 0.0f;
4688 float renderSrRatio = 0.0f;
4689 WasapiBuffer captureBuffer;
4690 WasapiBuffer renderBuffer;
4691
4692 // declare local stream variables
4693 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4694 BYTE* streamBuffer = NULL;
4695 unsigned long captureFlags = 0;
4696 unsigned int bufferFrameCount = 0;
4697 unsigned int numFramesPadding = 0;
4698 unsigned int convBufferSize = 0;
4699 bool callbackPushed = false;
4700 bool callbackPulled = false;
4701 bool callbackStopped = false;
4702 int callbackResult = 0;
4703
4704 // convBuffer is used to store converted buffers between WASAPI and the user
4705 char* convBuffer = NULL;
4706 unsigned int convBuffSize = 0;
4707 unsigned int deviceBuffSize = 0;
4708
4709 errorText_.clear();
4710 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4711
4712 // Attempt to assign "Pro Audio" characteristic to thread
4713 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4714 if ( AvrtDll ) {
4715 DWORD taskIndex = 0;
4716 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4717 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4718 FreeLibrary( AvrtDll );
4719 }
4720
4721 // start capture stream if applicable
4722 if ( captureAudioClient ) {
4723 hr = captureAudioClient->GetMixFormat( &captureFormat );
4724 if ( FAILED( hr ) ) {
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4726 goto Exit;
4727 }
4728
4729 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4730
4731 // initialize capture stream according to desire buffer size
4732 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4733 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4734
4735 if ( !captureClient ) {
4736 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4737 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4738 desiredBufferPeriod,
4739 desiredBufferPeriod,
4740 captureFormat,
4741 NULL );
4742 if ( FAILED( hr ) ) {
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4744 goto Exit;
4745 }
4746
4747 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4748 ( void** ) &captureClient );
4749 if ( FAILED( hr ) ) {
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4751 goto Exit;
4752 }
4753
4754 // configure captureEvent to trigger on every available capture buffer
4755 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4756 if ( !captureEvent ) {
4757 errorType = RtAudioError::SYSTEM_ERROR;
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4759 goto Exit;
4760 }
4761
4762 hr = captureAudioClient->SetEventHandle( captureEvent );
4763 if ( FAILED( hr ) ) {
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4765 goto Exit;
4766 }
4767
4768 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4769 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4770 }
4771
4772 unsigned int inBufferSize = 0;
4773 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4774 if ( FAILED( hr ) ) {
4775 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4776 goto Exit;
4777 }
4778
4779 // scale outBufferSize according to stream->user sample rate ratio
4780 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4781 inBufferSize *= stream_.nDeviceChannels[INPUT];
4782
4783 // set captureBuffer size
4784 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4785
4786 // reset the capture stream
4787 hr = captureAudioClient->Reset();
4788 if ( FAILED( hr ) ) {
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4790 goto Exit;
4791 }
4792
4793 // start the capture stream
4794 hr = captureAudioClient->Start();
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4797 goto Exit;
4798 }
4799 }
4800
4801 // start render stream if applicable
4802 if ( renderAudioClient ) {
4803 hr = renderAudioClient->GetMixFormat( &renderFormat );
4804 if ( FAILED( hr ) ) {
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4806 goto Exit;
4807 }
4808
4809 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4810
4811 // initialize render stream according to desire buffer size
4812 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4813 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4814
4815 if ( !renderClient ) {
4816 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4817 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4818 desiredBufferPeriod,
4819 desiredBufferPeriod,
4820 renderFormat,
4821 NULL );
4822 if ( FAILED( hr ) ) {
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4824 goto Exit;
4825 }
4826
4827 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4828 ( void** ) &renderClient );
4829 if ( FAILED( hr ) ) {
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4831 goto Exit;
4832 }
4833
4834 // configure renderEvent to trigger on every available render buffer
4835 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4836 if ( !renderEvent ) {
4837 errorType = RtAudioError::SYSTEM_ERROR;
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4839 goto Exit;
4840 }
4841
4842 hr = renderAudioClient->SetEventHandle( renderEvent );
4843 if ( FAILED( hr ) ) {
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4845 goto Exit;
4846 }
4847
4848 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4849 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4850 }
4851
4852 unsigned int outBufferSize = 0;
4853 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4854 if ( FAILED( hr ) ) {
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4856 goto Exit;
4857 }
4858
4859 // scale inBufferSize according to user->stream sample rate ratio
4860 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4861 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4862
4863 // set renderBuffer size
4864 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4865
4866 // reset the render stream
4867 hr = renderAudioClient->Reset();
4868 if ( FAILED( hr ) ) {
4869 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4870 goto Exit;
4871 }
4872
4873 // start the render stream
4874 hr = renderAudioClient->Start();
4875 if ( FAILED( hr ) ) {
4876 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4877 goto Exit;
4878 }
4879 }
4880
4881 if ( stream_.mode == INPUT ) {
4882 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4883 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4884 }
4885 else if ( stream_.mode == OUTPUT ) {
4886 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4887 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4888 }
4889 else if ( stream_.mode == DUPLEX ) {
4890 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4891 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4892 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4893 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4894 }
4895
4896 convBuffer = ( char* ) malloc( convBuffSize );
4897 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4898 if ( !convBuffer || !stream_.deviceBuffer ) {
4899 errorType = RtAudioError::MEMORY_ERROR;
4900 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4901 goto Exit;
4902 }
4903
4904 // stream process loop
4905 while ( stream_.state != STREAM_STOPPING ) {
4906 if ( !callbackPulled ) {
4907 // Callback Input
4908 // ==============
4909 // 1. Pull callback buffer from inputBuffer
4910 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
4911 // Convert callback buffer to user format
4912
4913 if ( captureAudioClient ) {
4914 // Pull callback buffer from inputBuffer
4915 callbackPulled = captureBuffer.pullBuffer( convBuffer,
4916 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
4917 stream_.deviceFormat[INPUT] );
4918
4919 if ( callbackPulled ) {
4920 // Convert callback buffer to user sample rate
4921 convertBufferWasapi( stream_.deviceBuffer,
4922 convBuffer,
4923 stream_.nDeviceChannels[INPUT],
4924 captureFormat->nSamplesPerSec,
4925 stream_.sampleRate,
4926 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
4927 convBufferSize,
4928 stream_.deviceFormat[INPUT] );
4929
4930 if ( stream_.doConvertBuffer[INPUT] ) {
4931 // Convert callback buffer to user format
4932 convertBuffer( stream_.userBuffer[INPUT],
4933 stream_.deviceBuffer,
4934 stream_.convertInfo[INPUT] );
4935 }
4936 else {
4937 // no further conversion, simple copy deviceBuffer to userBuffer
4938 memcpy( stream_.userBuffer[INPUT],
4939 stream_.deviceBuffer,
4940 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4941 }
4942 }
4943 }
4944 else {
4945 // if there is no capture stream, set callbackPulled flag
4946 callbackPulled = true;
4947 }
4948
4949 // Execute Callback
4950 // ================
4951 // 1. Execute user callback method
4952 // 2. Handle return value from callback
4953
4954 // if callback has not requested the stream to stop
4955 if ( callbackPulled && !callbackStopped ) {
4956 // Execute user callback method
4957 callbackResult = callback( stream_.userBuffer[OUTPUT],
4958 stream_.userBuffer[INPUT],
4959 stream_.bufferSize,
4960 getStreamTime(),
4961 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4962 stream_.callbackInfo.userData );
4963
4964 // Handle return value from callback
4965 if ( callbackResult == 1 ) {
4966 // instantiate a thread to stop this thread
4967 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4968 if ( !threadHandle ) {
4969 errorType = RtAudioError::THREAD_ERROR;
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4971 goto Exit;
4972 }
4973 else if ( !CloseHandle( threadHandle ) ) {
4974 errorType = RtAudioError::THREAD_ERROR;
4975 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4976 goto Exit;
4977 }
4978
4979 callbackStopped = true;
4980 }
4981 else if ( callbackResult == 2 ) {
4982 // instantiate a thread to stop this thread
4983 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4984 if ( !threadHandle ) {
4985 errorType = RtAudioError::THREAD_ERROR;
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4987 goto Exit;
4988 }
4989 else if ( !CloseHandle( threadHandle ) ) {
4990 errorType = RtAudioError::THREAD_ERROR;
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4992 goto Exit;
4993 }
4994
4995 callbackStopped = true;
4996 }
4997 }
4998 }
4999
5000 // Callback Output
5001 // ===============
5002 // 1. Convert callback buffer to stream format
5003 // 2. Convert callback buffer to stream sample rate and channel count
5004 // 3. Push callback buffer into outputBuffer
5005
5006 if ( renderAudioClient && callbackPulled ) {
5007 if ( stream_.doConvertBuffer[OUTPUT] ) {
5008 // Convert callback buffer to stream format
5009 convertBuffer( stream_.deviceBuffer,
5010 stream_.userBuffer[OUTPUT],
5011 stream_.convertInfo[OUTPUT] );
5012
5013 }
5014
5015 // Convert callback buffer to stream sample rate
5016 convertBufferWasapi( convBuffer,
5017 stream_.deviceBuffer,
5018 stream_.nDeviceChannels[OUTPUT],
5019 stream_.sampleRate,
5020 renderFormat->nSamplesPerSec,
5021 stream_.bufferSize,
5022 convBufferSize,
5023 stream_.deviceFormat[OUTPUT] );
5024
5025 // Push callback buffer into outputBuffer
5026 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5027 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5028 stream_.deviceFormat[OUTPUT] );
5029 }
5030 else {
5031 // if there is no render stream, set callbackPushed flag
5032 callbackPushed = true;
5033 }
5034
5035 // Stream Capture
5036 // ==============
5037 // 1. Get capture buffer from stream
5038 // 2. Push capture buffer into inputBuffer
5039 // 3. If 2. was successful: Release capture buffer
5040
5041 if ( captureAudioClient ) {
5042 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5043 if ( !callbackPulled ) {
5044 WaitForSingleObject( captureEvent, INFINITE );
5045 }
5046
5047 // Get capture buffer from stream
5048 hr = captureClient->GetBuffer( &streamBuffer,
5049 &bufferFrameCount,
5050 &captureFlags, NULL, NULL );
5051 if ( FAILED( hr ) ) {
5052 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5053 goto Exit;
5054 }
5055
5056 if ( bufferFrameCount != 0 ) {
5057 // Push capture buffer into inputBuffer
5058 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5059 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5060 stream_.deviceFormat[INPUT] ) )
5061 {
5062 // Release capture buffer
5063 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5064 if ( FAILED( hr ) ) {
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5066 goto Exit;
5067 }
5068 }
5069 else
5070 {
5071 // Inform WASAPI that capture was unsuccessful
5072 hr = captureClient->ReleaseBuffer( 0 );
5073 if ( FAILED( hr ) ) {
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5075 goto Exit;
5076 }
5077 }
5078 }
5079 else
5080 {
5081 // Inform WASAPI that capture was unsuccessful
5082 hr = captureClient->ReleaseBuffer( 0 );
5083 if ( FAILED( hr ) ) {
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5085 goto Exit;
5086 }
5087 }
5088 }
5089
5090 // Stream Render
5091 // =============
5092 // 1. Get render buffer from stream
5093 // 2. Pull next buffer from outputBuffer
5094 // 3. If 2. was successful: Fill render buffer with next buffer
5095 // Release render buffer
5096
5097 if ( renderAudioClient ) {
5098 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5099 if ( callbackPulled && !callbackPushed ) {
5100 WaitForSingleObject( renderEvent, INFINITE );
5101 }
5102
5103 // Get render buffer from stream
5104 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5105 if ( FAILED( hr ) ) {
5106 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5107 goto Exit;
5108 }
5109
5110 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5111 if ( FAILED( hr ) ) {
5112 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5113 goto Exit;
5114 }
5115
5116 bufferFrameCount -= numFramesPadding;
5117
5118 if ( bufferFrameCount != 0 ) {
5119 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5120 if ( FAILED( hr ) ) {
5121 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5122 goto Exit;
5123 }
5124
5125 // Pull next buffer from outputBuffer
5126 // Fill render buffer with next buffer
5127 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5128 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5129 stream_.deviceFormat[OUTPUT] ) )
5130 {
5131 // Release render buffer
5132 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5133 if ( FAILED( hr ) ) {
5134 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5135 goto Exit;
5136 }
5137 }
5138 else
5139 {
5140 // Inform WASAPI that render was unsuccessful
5141 hr = renderClient->ReleaseBuffer( 0, 0 );
5142 if ( FAILED( hr ) ) {
5143 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5144 goto Exit;
5145 }
5146 }
5147 }
5148 else
5149 {
5150 // Inform WASAPI that render was unsuccessful
5151 hr = renderClient->ReleaseBuffer( 0, 0 );
5152 if ( FAILED( hr ) ) {
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5154 goto Exit;
5155 }
5156 }
5157 }
5158
5159 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5160 if ( callbackPushed ) {
5161 callbackPulled = false;
5162 // tick stream time
5163 RtApi::tickStreamTime();
5164 }
5165
5166 }
5167
5168 Exit:
5169 // clean up
5170 CoTaskMemFree( captureFormat );
5171 CoTaskMemFree( renderFormat );
5172
5173 free ( convBuffer );
5174
5175 CoUninitialize();
5176
5177 // update stream state
5178 stream_.state = STREAM_STOPPED;
5179
5180 if ( errorText_.empty() )
5181 return;
5182 else
5183 error( errorType );
5184 }
5185
5186 //******************** End of __WINDOWS_WASAPI__ *********************//
5187 #endif
5188
5189
5190 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5191
5192 // Modified by Robin Davies, October 2005
5193 // - Improvements to DirectX pointer chasing.
5194 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5195 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5196 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5197 // Changed device query structure for RtAudio 4.0.7, January 2010
5198
5199 #include <dsound.h>
5200 #include <assert.h>
5201 #include <algorithm>
5202
5203 #if defined(__MINGW32__)
5204 // missing from latest mingw winapi
5205 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5206 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5207 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5208 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5209 #endif
5210
5211 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5212
5213 #ifdef _MSC_VER // if Microsoft Visual C++
5214 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5215 #endif
5216
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5217 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5218 {
5219 if ( pointer > bufferSize ) pointer -= bufferSize;
5220 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5221 if ( pointer < earlierPointer ) pointer += bufferSize;
5222 return pointer >= earlierPointer && pointer < laterPointer;
5223 }
5224
5225 // A structure to hold various information related to the DirectSound
5226 // API implementation.
5227 struct DsHandle {
5228 unsigned int drainCounter; // Tracks callback counts when draining
5229 bool internalDrain; // Indicates if stop is initiated from callback or not.
5230 void *id[2];
5231 void *buffer[2];
5232 bool xrun[2];
5233 UINT bufferPointer[2];
5234 DWORD dsBufferSize[2];
5235 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5236 HANDLE condition;
5237
DsHandleDsHandle5238 DsHandle()
5239 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5240 };
5241
5242 // Declarations for utility functions, callbacks, and structures
5243 // specific to the DirectSound implementation.
5244 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5245 LPCTSTR description,
5246 LPCTSTR module,
5247 LPVOID lpContext );
5248
5249 static const char* getErrorString( int code );
5250
5251 static unsigned __stdcall callbackHandler( void *ptr );
5252
5253 struct DsDevice {
5254 LPGUID id[2];
5255 bool validId[2];
5256 bool found;
5257 std::string name;
5258
DsDeviceDsDevice5259 DsDevice()
5260 : found(false) { validId[0] = false; validId[1] = false; }
5261 };
5262
5263 struct DsProbeData {
5264 bool isInput;
5265 std::vector<struct DsDevice>* dsDevices;
5266 };
5267
RtApiDs()5268 RtApiDs :: RtApiDs()
5269 {
5270 // Dsound will run both-threaded. If CoInitialize fails, then just
5271 // accept whatever the mainline chose for a threading model.
5272 coInitialized_ = false;
5273 HRESULT hr = CoInitialize( NULL );
5274 if ( !FAILED( hr ) ) coInitialized_ = true;
5275 }
5276
~RtApiDs()5277 RtApiDs :: ~RtApiDs()
5278 {
5279 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5280 if ( stream_.state != STREAM_CLOSED ) closeStream();
5281 }
5282
5283 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5284 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5285 {
5286 return 0;
5287 }
5288
5289 // The DirectSound default input is always the first input device,
5290 // which is the first capture device enumerated.
getDefaultInputDevice(void)5291 unsigned int RtApiDs :: getDefaultInputDevice( void )
5292 {
5293 return 0;
5294 }
5295
getDeviceCount(void)5296 unsigned int RtApiDs :: getDeviceCount( void )
5297 {
5298 // Set query flag for previously found devices to false, so that we
5299 // can check for any devices that have disappeared.
5300 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5301 dsDevices[i].found = false;
5302
5303 // Query DirectSound devices.
5304 struct DsProbeData probeInfo;
5305 probeInfo.isInput = false;
5306 probeInfo.dsDevices = &dsDevices;
5307 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5308 if ( FAILED( result ) ) {
5309 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5310 errorText_ = errorStream_.str();
5311 error( RtAudioError::WARNING );
5312 }
5313
5314 // Query DirectSoundCapture devices.
5315 probeInfo.isInput = true;
5316 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5317 if ( FAILED( result ) ) {
5318 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5319 errorText_ = errorStream_.str();
5320 error( RtAudioError::WARNING );
5321 }
5322
5323 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5324 for ( unsigned int i=0; i<dsDevices.size(); ) {
5325 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5326 else i++;
5327 }
5328
5329 return static_cast<unsigned int>(dsDevices.size());
5330 }
5331
getDeviceInfo(unsigned int device)5332 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5333 {
5334 RtAudio::DeviceInfo info;
5335 info.probed = false;
5336
5337 if ( dsDevices.size() == 0 ) {
5338 // Force a query of all devices
5339 getDeviceCount();
5340 if ( dsDevices.size() == 0 ) {
5341 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5342 error( RtAudioError::INVALID_USE );
5343 return info;
5344 }
5345 }
5346
5347 if ( device >= dsDevices.size() ) {
5348 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5349 error( RtAudioError::INVALID_USE );
5350 return info;
5351 }
5352
5353 HRESULT result;
5354 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5355
5356 LPDIRECTSOUND output;
5357 DSCAPS outCaps;
5358 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5359 if ( FAILED( result ) ) {
5360 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5361 errorText_ = errorStream_.str();
5362 error( RtAudioError::WARNING );
5363 goto probeInput;
5364 }
5365
5366 outCaps.dwSize = sizeof( outCaps );
5367 result = output->GetCaps( &outCaps );
5368 if ( FAILED( result ) ) {
5369 output->Release();
5370 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5371 errorText_ = errorStream_.str();
5372 error( RtAudioError::WARNING );
5373 goto probeInput;
5374 }
5375
5376 // Get output channel information.
5377 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5378
5379 // Get sample rate information.
5380 info.sampleRates.clear();
5381 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5382 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5383 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5384 info.sampleRates.push_back( SAMPLE_RATES[k] );
5385
5386 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5387 info.preferredSampleRate = SAMPLE_RATES[k];
5388 }
5389 }
5390
5391 // Get format information.
5392 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5393 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5394
5395 output->Release();
5396
5397 if ( getDefaultOutputDevice() == device )
5398 info.isDefaultOutput = true;
5399
5400 if ( dsDevices[ device ].validId[1] == false ) {
5401 info.name = dsDevices[ device ].name;
5402 info.probed = true;
5403 return info;
5404 }
5405
5406 probeInput:
5407
5408 LPDIRECTSOUNDCAPTURE input;
5409 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5410 if ( FAILED( result ) ) {
5411 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5412 errorText_ = errorStream_.str();
5413 error( RtAudioError::WARNING );
5414 return info;
5415 }
5416
5417 DSCCAPS inCaps;
5418 inCaps.dwSize = sizeof( inCaps );
5419 result = input->GetCaps( &inCaps );
5420 if ( FAILED( result ) ) {
5421 input->Release();
5422 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5423 errorText_ = errorStream_.str();
5424 error( RtAudioError::WARNING );
5425 return info;
5426 }
5427
5428 // Get input channel information.
5429 info.inputChannels = inCaps.dwChannels;
5430
5431 // Get sample rate and format information.
5432 std::vector<unsigned int> rates;
5433 if ( inCaps.dwChannels >= 2 ) {
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5442
5443 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5444 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5445 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5446 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5447 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5448 }
5449 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5450 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5451 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5452 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5453 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5454 }
5455 }
5456 else if ( inCaps.dwChannels == 1 ) {
5457 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5458 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5459 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5460 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5461 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5462 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5463 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5464 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5465
5466 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5467 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5468 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5469 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5470 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5471 }
5472 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5473 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5474 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5475 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5476 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5477 }
5478 }
5479 else info.inputChannels = 0; // technically, this would be an error
5480
5481 input->Release();
5482
5483 if ( info.inputChannels == 0 ) return info;
5484
5485 // Copy the supported rates to the info structure but avoid duplication.
5486 bool found;
5487 for ( unsigned int i=0; i<rates.size(); i++ ) {
5488 found = false;
5489 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5490 if ( rates[i] == info.sampleRates[j] ) {
5491 found = true;
5492 break;
5493 }
5494 }
5495 if ( found == false ) info.sampleRates.push_back( rates[i] );
5496 }
5497 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5498
5499 // If device opens for both playback and capture, we determine the channels.
5500 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5501 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5502
5503 if ( device == 0 ) info.isDefaultInput = true;
5504
5505 // Copy name and return.
5506 info.name = dsDevices[ device ].name;
5507 info.probed = true;
5508 return info;
5509 }
5510
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5511 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5512 unsigned int firstChannel, unsigned int sampleRate,
5513 RtAudioFormat format, unsigned int *bufferSize,
5514 RtAudio::StreamOptions *options )
5515 {
5516 if ( channels + firstChannel > 2 ) {
5517 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5518 return FAILURE;
5519 }
5520
5521 size_t nDevices = dsDevices.size();
5522 if ( nDevices == 0 ) {
5523 // This should not happen because a check is made before this function is called.
5524 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5525 return FAILURE;
5526 }
5527
5528 if ( device >= nDevices ) {
5529 // This should not happen because a check is made before this function is called.
5530 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5531 return FAILURE;
5532 }
5533
5534 if ( mode == OUTPUT ) {
5535 if ( dsDevices[ device ].validId[0] == false ) {
5536 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5537 errorText_ = errorStream_.str();
5538 return FAILURE;
5539 }
5540 }
5541 else { // mode == INPUT
5542 if ( dsDevices[ device ].validId[1] == false ) {
5543 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5544 errorText_ = errorStream_.str();
5545 return FAILURE;
5546 }
5547 }
5548
5549 // According to a note in PortAudio, using GetDesktopWindow()
5550 // instead of GetForegroundWindow() is supposed to avoid problems
5551 // that occur when the application's window is not the foreground
5552 // window. Also, if the application window closes before the
5553 // DirectSound buffer, DirectSound can crash. In the past, I had
5554 // problems when using GetDesktopWindow() but it seems fine now
5555 // (January 2010). I'll leave it commented here.
5556 // HWND hWnd = GetForegroundWindow();
5557 HWND hWnd = GetDesktopWindow();
5558
5559 // Check the numberOfBuffers parameter and limit the lowest value to
5560 // two. This is a judgement call and a value of two is probably too
5561 // low for capture, but it should work for playback.
5562 int nBuffers = 0;
5563 if ( options ) nBuffers = options->numberOfBuffers;
5564 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5565 if ( nBuffers < 2 ) nBuffers = 3;
5566
5567 // Check the lower range of the user-specified buffer size and set
5568 // (arbitrarily) to a lower bound of 32.
5569 if ( *bufferSize < 32 ) *bufferSize = 32;
5570
5571 // Create the wave format structure. The data format setting will
5572 // be determined later.
5573 WAVEFORMATEX waveFormat;
5574 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5575 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5576 waveFormat.nChannels = channels + firstChannel;
5577 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5578
5579 // Determine the device buffer size. By default, we'll use the value
5580 // defined above (32K), but we will grow it to make allowances for
5581 // very large software buffer sizes.
5582 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5583 DWORD dsPointerLeadTime = 0;
5584
5585 void *ohandle = 0, *bhandle = 0;
5586 HRESULT result;
5587 if ( mode == OUTPUT ) {
5588
5589 LPDIRECTSOUND output;
5590 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5591 if ( FAILED( result ) ) {
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5593 errorText_ = errorStream_.str();
5594 return FAILURE;
5595 }
5596
5597 DSCAPS outCaps;
5598 outCaps.dwSize = sizeof( outCaps );
5599 result = output->GetCaps( &outCaps );
5600 if ( FAILED( result ) ) {
5601 output->Release();
5602 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5603 errorText_ = errorStream_.str();
5604 return FAILURE;
5605 }
5606
5607 // Check channel information.
5608 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5609 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5610 errorText_ = errorStream_.str();
5611 return FAILURE;
5612 }
5613
5614 // Check format information. Use 16-bit format unless not
5615 // supported or user requests 8-bit.
5616 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5617 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5618 waveFormat.wBitsPerSample = 16;
5619 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5620 }
5621 else {
5622 waveFormat.wBitsPerSample = 8;
5623 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5624 }
5625 stream_.userFormat = format;
5626
5627 // Update wave format structure and buffer information.
5628 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5629 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5630 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5631
5632 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5633 while ( dsPointerLeadTime * 2U > dsBufferSize )
5634 dsBufferSize *= 2;
5635
5636 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5637 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5638 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5639 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5640 if ( FAILED( result ) ) {
5641 output->Release();
5642 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5643 errorText_ = errorStream_.str();
5644 return FAILURE;
5645 }
5646
5647 // Even though we will write to the secondary buffer, we need to
5648 // access the primary buffer to set the correct output format
5649 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5650 // buffer description.
5651 DSBUFFERDESC bufferDescription;
5652 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5653 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5654 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5655
5656 // Obtain the primary buffer
5657 LPDIRECTSOUNDBUFFER buffer;
5658 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5659 if ( FAILED( result ) ) {
5660 output->Release();
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5662 errorText_ = errorStream_.str();
5663 return FAILURE;
5664 }
5665
5666 // Set the primary DS buffer sound format.
5667 result = buffer->SetFormat( &waveFormat );
5668 if ( FAILED( result ) ) {
5669 output->Release();
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5671 errorText_ = errorStream_.str();
5672 return FAILURE;
5673 }
5674
5675 // Setup the secondary DS buffer description.
5676 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5677 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5678 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5679 DSBCAPS_GLOBALFOCUS |
5680 DSBCAPS_GETCURRENTPOSITION2 |
5681 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5682 bufferDescription.dwBufferBytes = dsBufferSize;
5683 bufferDescription.lpwfxFormat = &waveFormat;
5684
5685 // Try to create the secondary DS buffer. If that doesn't work,
5686 // try to use software mixing. Otherwise, there's a problem.
5687 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5688 if ( FAILED( result ) ) {
5689 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5690 DSBCAPS_GLOBALFOCUS |
5691 DSBCAPS_GETCURRENTPOSITION2 |
5692 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5693 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5694 if ( FAILED( result ) ) {
5695 output->Release();
5696 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5697 errorText_ = errorStream_.str();
5698 return FAILURE;
5699 }
5700 }
5701
5702 // Get the buffer size ... might be different from what we specified.
5703 DSBCAPS dsbcaps;
5704 dsbcaps.dwSize = sizeof( DSBCAPS );
5705 result = buffer->GetCaps( &dsbcaps );
5706 if ( FAILED( result ) ) {
5707 output->Release();
5708 buffer->Release();
5709 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5710 errorText_ = errorStream_.str();
5711 return FAILURE;
5712 }
5713
5714 dsBufferSize = dsbcaps.dwBufferBytes;
5715
5716 // Lock the DS buffer
5717 LPVOID audioPtr;
5718 DWORD dataLen;
5719 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5720 if ( FAILED( result ) ) {
5721 output->Release();
5722 buffer->Release();
5723 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5724 errorText_ = errorStream_.str();
5725 return FAILURE;
5726 }
5727
5728 // Zero the DS buffer
5729 ZeroMemory( audioPtr, dataLen );
5730
5731 // Unlock the DS buffer
5732 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5733 if ( FAILED( result ) ) {
5734 output->Release();
5735 buffer->Release();
5736 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5737 errorText_ = errorStream_.str();
5738 return FAILURE;
5739 }
5740
5741 ohandle = (void *) output;
5742 bhandle = (void *) buffer;
5743 }
5744
5745 if ( mode == INPUT ) {
5746
5747 LPDIRECTSOUNDCAPTURE input;
5748 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5749 if ( FAILED( result ) ) {
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5751 errorText_ = errorStream_.str();
5752 return FAILURE;
5753 }
5754
5755 DSCCAPS inCaps;
5756 inCaps.dwSize = sizeof( inCaps );
5757 result = input->GetCaps( &inCaps );
5758 if ( FAILED( result ) ) {
5759 input->Release();
5760 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5761 errorText_ = errorStream_.str();
5762 return FAILURE;
5763 }
5764
5765 // Check channel information.
5766 if ( inCaps.dwChannels < channels + firstChannel ) {
5767 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5768 return FAILURE;
5769 }
5770
5771 // Check format information. Use 16-bit format unless user
5772 // requests 8-bit.
5773 DWORD deviceFormats;
5774 if ( channels + firstChannel == 2 ) {
5775 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5776 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5777 waveFormat.wBitsPerSample = 8;
5778 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5779 }
5780 else { // assume 16-bit is supported
5781 waveFormat.wBitsPerSample = 16;
5782 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5783 }
5784 }
5785 else { // channel == 1
5786 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5787 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5788 waveFormat.wBitsPerSample = 8;
5789 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5790 }
5791 else { // assume 16-bit is supported
5792 waveFormat.wBitsPerSample = 16;
5793 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5794 }
5795 }
5796 stream_.userFormat = format;
5797
5798 // Update wave format structure and buffer information.
5799 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5800 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5801 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5802
5803 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5804 while ( dsPointerLeadTime * 2U > dsBufferSize )
5805 dsBufferSize *= 2;
5806
5807 // Setup the secondary DS buffer description.
5808 DSCBUFFERDESC bufferDescription;
5809 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5810 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5811 bufferDescription.dwFlags = 0;
5812 bufferDescription.dwReserved = 0;
5813 bufferDescription.dwBufferBytes = dsBufferSize;
5814 bufferDescription.lpwfxFormat = &waveFormat;
5815
5816 // Create the capture buffer.
5817 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5818 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5819 if ( FAILED( result ) ) {
5820 input->Release();
5821 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5822 errorText_ = errorStream_.str();
5823 return FAILURE;
5824 }
5825
5826 // Get the buffer size ... might be different from what we specified.
5827 DSCBCAPS dscbcaps;
5828 dscbcaps.dwSize = sizeof( DSCBCAPS );
5829 result = buffer->GetCaps( &dscbcaps );
5830 if ( FAILED( result ) ) {
5831 input->Release();
5832 buffer->Release();
5833 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5834 errorText_ = errorStream_.str();
5835 return FAILURE;
5836 }
5837
5838 dsBufferSize = dscbcaps.dwBufferBytes;
5839
5840 // NOTE: We could have a problem here if this is a duplex stream
5841 // and the play and capture hardware buffer sizes are different
5842 // (I'm actually not sure if that is a problem or not).
5843 // Currently, we are not verifying that.
5844
5845 // Lock the capture buffer
5846 LPVOID audioPtr;
5847 DWORD dataLen;
5848 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5849 if ( FAILED( result ) ) {
5850 input->Release();
5851 buffer->Release();
5852 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5853 errorText_ = errorStream_.str();
5854 return FAILURE;
5855 }
5856
5857 // Zero the buffer
5858 ZeroMemory( audioPtr, dataLen );
5859
5860 // Unlock the buffer
5861 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5862 if ( FAILED( result ) ) {
5863 input->Release();
5864 buffer->Release();
5865 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5866 errorText_ = errorStream_.str();
5867 return FAILURE;
5868 }
5869
5870 ohandle = (void *) input;
5871 bhandle = (void *) buffer;
5872 }
5873
5874 // Set various stream parameters
5875 DsHandle *handle = 0;
5876 stream_.nDeviceChannels[mode] = channels + firstChannel;
5877 stream_.nUserChannels[mode] = channels;
5878 stream_.bufferSize = *bufferSize;
5879 stream_.channelOffset[mode] = firstChannel;
5880 stream_.deviceInterleaved[mode] = true;
5881 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5882 else stream_.userInterleaved = true;
5883
5884 // Set flag for buffer conversion
5885 stream_.doConvertBuffer[mode] = false;
5886 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5887 stream_.doConvertBuffer[mode] = true;
5888 if (stream_.userFormat != stream_.deviceFormat[mode])
5889 stream_.doConvertBuffer[mode] = true;
5890 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5891 stream_.nUserChannels[mode] > 1 )
5892 stream_.doConvertBuffer[mode] = true;
5893
5894 // Allocate necessary internal buffers
5895 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5896 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5897 if ( stream_.userBuffer[mode] == NULL ) {
5898 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5899 goto error;
5900 }
5901
5902 if ( stream_.doConvertBuffer[mode] ) {
5903
5904 bool makeBuffer = true;
5905 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5906 if ( mode == INPUT ) {
5907 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5908 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5909 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5910 }
5911 }
5912
5913 if ( makeBuffer ) {
5914 bufferBytes *= *bufferSize;
5915 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5916 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5917 if ( stream_.deviceBuffer == NULL ) {
5918 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5919 goto error;
5920 }
5921 }
5922 }
5923
5924 // Allocate our DsHandle structures for the stream.
5925 if ( stream_.apiHandle == 0 ) {
5926 try {
5927 handle = new DsHandle;
5928 }
5929 catch ( std::bad_alloc& ) {
5930 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5931 goto error;
5932 }
5933
5934 // Create a manual-reset event.
5935 handle->condition = CreateEvent( NULL, // no security
5936 TRUE, // manual-reset
5937 FALSE, // non-signaled initially
5938 NULL ); // unnamed
5939 stream_.apiHandle = (void *) handle;
5940 }
5941 else
5942 handle = (DsHandle *) stream_.apiHandle;
5943 handle->id[mode] = ohandle;
5944 handle->buffer[mode] = bhandle;
5945 handle->dsBufferSize[mode] = dsBufferSize;
5946 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5947
5948 stream_.device[mode] = device;
5949 stream_.state = STREAM_STOPPED;
5950 if ( stream_.mode == OUTPUT && mode == INPUT )
5951 // We had already set up an output stream.
5952 stream_.mode = DUPLEX;
5953 else
5954 stream_.mode = mode;
5955 stream_.nBuffers = nBuffers;
5956 stream_.sampleRate = sampleRate;
5957
5958 // Setup the buffer conversion information structure.
5959 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5960
5961 // Setup the callback thread.
5962 if ( stream_.callbackInfo.isRunning == false ) {
5963 unsigned threadId;
5964 stream_.callbackInfo.isRunning = true;
5965 stream_.callbackInfo.object = (void *) this;
5966 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5967 &stream_.callbackInfo, 0, &threadId );
5968 if ( stream_.callbackInfo.thread == 0 ) {
5969 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5970 goto error;
5971 }
5972
5973 // Boost DS thread priority
5974 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5975 }
5976 return SUCCESS;
5977
5978 error:
5979 if ( handle ) {
5980 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5981 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5982 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5983 if ( buffer ) buffer->Release();
5984 object->Release();
5985 }
5986 if ( handle->buffer[1] ) {
5987 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5988 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5989 if ( buffer ) buffer->Release();
5990 object->Release();
5991 }
5992 CloseHandle( handle->condition );
5993 delete handle;
5994 stream_.apiHandle = 0;
5995 }
5996
5997 for ( int i=0; i<2; i++ ) {
5998 if ( stream_.userBuffer[i] ) {
5999 free( stream_.userBuffer[i] );
6000 stream_.userBuffer[i] = 0;
6001 }
6002 }
6003
6004 if ( stream_.deviceBuffer ) {
6005 free( stream_.deviceBuffer );
6006 stream_.deviceBuffer = 0;
6007 }
6008
6009 stream_.state = STREAM_CLOSED;
6010 return FAILURE;
6011 }
6012
closeStream()6013 void RtApiDs :: closeStream()
6014 {
6015 if ( stream_.state == STREAM_CLOSED ) {
6016 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6017 error( RtAudioError::WARNING );
6018 return;
6019 }
6020
6021 // Stop the callback thread.
6022 stream_.callbackInfo.isRunning = false;
6023 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6024 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6025
6026 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6027 if ( handle ) {
6028 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6029 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6030 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6031 if ( buffer ) {
6032 buffer->Stop();
6033 buffer->Release();
6034 }
6035 object->Release();
6036 }
6037 if ( handle->buffer[1] ) {
6038 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6039 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6040 if ( buffer ) {
6041 buffer->Stop();
6042 buffer->Release();
6043 }
6044 object->Release();
6045 }
6046 CloseHandle( handle->condition );
6047 delete handle;
6048 stream_.apiHandle = 0;
6049 }
6050
6051 for ( int i=0; i<2; i++ ) {
6052 if ( stream_.userBuffer[i] ) {
6053 free( stream_.userBuffer[i] );
6054 stream_.userBuffer[i] = 0;
6055 }
6056 }
6057
6058 if ( stream_.deviceBuffer ) {
6059 free( stream_.deviceBuffer );
6060 stream_.deviceBuffer = 0;
6061 }
6062
6063 stream_.mode = UNINITIALIZED;
6064 stream_.state = STREAM_CLOSED;
6065 }
6066
startStream()6067 void RtApiDs :: startStream()
6068 {
6069 verifyStream();
6070 if ( stream_.state == STREAM_RUNNING ) {
6071 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6072 error( RtAudioError::WARNING );
6073 return;
6074 }
6075
6076 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6077
6078 // Increase scheduler frequency on lesser windows (a side-effect of
6079 // increasing timer accuracy). On greater windows (Win2K or later),
6080 // this is already in effect.
6081 timeBeginPeriod( 1 );
6082
6083 buffersRolling = false;
6084 duplexPrerollBytes = 0;
6085
6086 if ( stream_.mode == DUPLEX ) {
6087 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6088 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6089 }
6090
6091 HRESULT result = 0;
6092 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6093
6094 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6095 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6096 if ( FAILED( result ) ) {
6097 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6098 errorText_ = errorStream_.str();
6099 goto unlock;
6100 }
6101 }
6102
6103 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6104
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6106 result = buffer->Start( DSCBSTART_LOOPING );
6107 if ( FAILED( result ) ) {
6108 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6109 errorText_ = errorStream_.str();
6110 goto unlock;
6111 }
6112 }
6113
6114 handle->drainCounter = 0;
6115 handle->internalDrain = false;
6116 ResetEvent( handle->condition );
6117 stream_.state = STREAM_RUNNING;
6118
6119 unlock:
6120 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6121 }
6122
stopStream()6123 void RtApiDs :: stopStream()
6124 {
6125 verifyStream();
6126 if ( stream_.state == STREAM_STOPPED ) {
6127 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6128 error( RtAudioError::WARNING );
6129 return;
6130 }
6131
6132 HRESULT result = 0;
6133 LPVOID audioPtr;
6134 DWORD dataLen;
6135 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6137 if ( handle->drainCounter == 0 ) {
6138 handle->drainCounter = 2;
6139 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6140 }
6141
6142 stream_.state = STREAM_STOPPED;
6143
6144 MUTEX_LOCK( &stream_.mutex );
6145
6146 // Stop the buffer and clear memory
6147 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6148 result = buffer->Stop();
6149 if ( FAILED( result ) ) {
6150 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6151 errorText_ = errorStream_.str();
6152 goto unlock;
6153 }
6154
6155 // Lock the buffer and clear it so that if we start to play again,
6156 // we won't have old data playing.
6157 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6158 if ( FAILED( result ) ) {
6159 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6160 errorText_ = errorStream_.str();
6161 goto unlock;
6162 }
6163
6164 // Zero the DS buffer
6165 ZeroMemory( audioPtr, dataLen );
6166
6167 // Unlock the DS buffer
6168 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6169 if ( FAILED( result ) ) {
6170 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6171 errorText_ = errorStream_.str();
6172 goto unlock;
6173 }
6174
6175 // If we start playing again, we must begin at beginning of buffer.
6176 handle->bufferPointer[0] = 0;
6177 }
6178
6179 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6180 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6181 audioPtr = NULL;
6182 dataLen = 0;
6183
6184 stream_.state = STREAM_STOPPED;
6185
6186 if ( stream_.mode != DUPLEX )
6187 MUTEX_LOCK( &stream_.mutex );
6188
6189 result = buffer->Stop();
6190 if ( FAILED( result ) ) {
6191 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6192 errorText_ = errorStream_.str();
6193 goto unlock;
6194 }
6195
6196 // Lock the buffer and clear it so that if we start to play again,
6197 // we won't have old data playing.
6198 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6199 if ( FAILED( result ) ) {
6200 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6201 errorText_ = errorStream_.str();
6202 goto unlock;
6203 }
6204
6205 // Zero the DS buffer
6206 ZeroMemory( audioPtr, dataLen );
6207
6208 // Unlock the DS buffer
6209 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6210 if ( FAILED( result ) ) {
6211 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6212 errorText_ = errorStream_.str();
6213 goto unlock;
6214 }
6215
6216 // If we start recording again, we must begin at beginning of buffer.
6217 handle->bufferPointer[1] = 0;
6218 }
6219
6220 unlock:
6221 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6222 MUTEX_UNLOCK( &stream_.mutex );
6223
6224 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6225 }
6226
abortStream()6227 void RtApiDs :: abortStream()
6228 {
6229 verifyStream();
6230 if ( stream_.state == STREAM_STOPPED ) {
6231 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6232 error( RtAudioError::WARNING );
6233 return;
6234 }
6235
6236 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6237 handle->drainCounter = 2;
6238
6239 stopStream();
6240 }
6241
callbackEvent()6242 void RtApiDs :: callbackEvent()
6243 {
6244 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6245 Sleep( 50 ); // sleep 50 milliseconds
6246 return;
6247 }
6248
6249 if ( stream_.state == STREAM_CLOSED ) {
6250 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6251 error( RtAudioError::WARNING );
6252 return;
6253 }
6254
6255 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6256 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6257
6258 // Check if we were draining the stream and signal is finished.
6259 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6260
6261 stream_.state = STREAM_STOPPING;
6262 if ( handle->internalDrain == false )
6263 SetEvent( handle->condition );
6264 else
6265 stopStream();
6266 return;
6267 }
6268
6269 // Invoke user callback to get fresh output data UNLESS we are
6270 // draining stream.
6271 if ( handle->drainCounter == 0 ) {
6272 RtAudioCallback callback = (RtAudioCallback) info->callback;
6273 double streamTime = getStreamTime();
6274 RtAudioStreamStatus status = 0;
6275 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6277 handle->xrun[0] = false;
6278 }
6279 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6280 status |= RTAUDIO_INPUT_OVERFLOW;
6281 handle->xrun[1] = false;
6282 }
6283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6284 stream_.bufferSize, streamTime, status, info->userData );
6285 if ( cbReturnValue == 2 ) {
6286 stream_.state = STREAM_STOPPING;
6287 handle->drainCounter = 2;
6288 abortStream();
6289 return;
6290 }
6291 else if ( cbReturnValue == 1 ) {
6292 handle->drainCounter = 1;
6293 handle->internalDrain = true;
6294 }
6295 }
6296
6297 HRESULT result;
6298 DWORD currentWritePointer, safeWritePointer;
6299 DWORD currentReadPointer, safeReadPointer;
6300 UINT nextWritePointer;
6301
6302 LPVOID buffer1 = NULL;
6303 LPVOID buffer2 = NULL;
6304 DWORD bufferSize1 = 0;
6305 DWORD bufferSize2 = 0;
6306
6307 char *buffer;
6308 long bufferBytes;
6309
6310 MUTEX_LOCK( &stream_.mutex );
6311 if ( stream_.state == STREAM_STOPPED ) {
6312 MUTEX_UNLOCK( &stream_.mutex );
6313 return;
6314 }
6315
6316 if ( buffersRolling == false ) {
6317 if ( stream_.mode == DUPLEX ) {
6318 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6319
6320 // It takes a while for the devices to get rolling. As a result,
6321 // there's no guarantee that the capture and write device pointers
6322 // will move in lockstep. Wait here for both devices to start
6323 // rolling, and then set our buffer pointers accordingly.
6324 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6325 // bytes later than the write buffer.
6326
6327 // Stub: a serious risk of having a pre-emptive scheduling round
6328 // take place between the two GetCurrentPosition calls... but I'm
6329 // really not sure how to solve the problem. Temporarily boost to
6330 // Realtime priority, maybe; but I'm not sure what priority the
6331 // DirectSound service threads run at. We *should* be roughly
6332 // within a ms or so of correct.
6333
6334 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6335 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6336
6337 DWORD startSafeWritePointer, startSafeReadPointer;
6338
6339 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6340 if ( FAILED( result ) ) {
6341 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6342 errorText_ = errorStream_.str();
6343 MUTEX_UNLOCK( &stream_.mutex );
6344 error( RtAudioError::SYSTEM_ERROR );
6345 return;
6346 }
6347 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6348 if ( FAILED( result ) ) {
6349 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6350 errorText_ = errorStream_.str();
6351 MUTEX_UNLOCK( &stream_.mutex );
6352 error( RtAudioError::SYSTEM_ERROR );
6353 return;
6354 }
6355 while ( true ) {
6356 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6357 if ( FAILED( result ) ) {
6358 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6359 errorText_ = errorStream_.str();
6360 MUTEX_UNLOCK( &stream_.mutex );
6361 error( RtAudioError::SYSTEM_ERROR );
6362 return;
6363 }
6364 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6365 if ( FAILED( result ) ) {
6366 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6367 errorText_ = errorStream_.str();
6368 MUTEX_UNLOCK( &stream_.mutex );
6369 error( RtAudioError::SYSTEM_ERROR );
6370 return;
6371 }
6372 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6373 Sleep( 1 );
6374 }
6375
6376 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6377
6378 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6379 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6380 handle->bufferPointer[1] = safeReadPointer;
6381 }
6382 else if ( stream_.mode == OUTPUT ) {
6383
6384 // Set the proper nextWritePosition after initial startup.
6385 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6386 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6387 if ( FAILED( result ) ) {
6388 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6389 errorText_ = errorStream_.str();
6390 MUTEX_UNLOCK( &stream_.mutex );
6391 error( RtAudioError::SYSTEM_ERROR );
6392 return;
6393 }
6394 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6395 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6396 }
6397
6398 buffersRolling = true;
6399 }
6400
6401 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6402
6403 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6404
6405 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6406 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6407 bufferBytes *= formatBytes( stream_.userFormat );
6408 memset( stream_.userBuffer[0], 0, bufferBytes );
6409 }
6410
6411 // Setup parameters and do buffer conversion if necessary.
6412 if ( stream_.doConvertBuffer[0] ) {
6413 buffer = stream_.deviceBuffer;
6414 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6415 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6416 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6417 }
6418 else {
6419 buffer = stream_.userBuffer[0];
6420 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6421 bufferBytes *= formatBytes( stream_.userFormat );
6422 }
6423
6424 // No byte swapping necessary in DirectSound implementation.
6425
6426 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6427 // unsigned. So, we need to convert our signed 8-bit data here to
6428 // unsigned.
6429 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6430 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6431
6432 DWORD dsBufferSize = handle->dsBufferSize[0];
6433 nextWritePointer = handle->bufferPointer[0];
6434
6435 DWORD endWrite, leadPointer;
6436 while ( true ) {
6437 // Find out where the read and "safe write" pointers are.
6438 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6439 if ( FAILED( result ) ) {
6440 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6441 errorText_ = errorStream_.str();
6442 MUTEX_UNLOCK( &stream_.mutex );
6443 error( RtAudioError::SYSTEM_ERROR );
6444 return;
6445 }
6446
6447 // We will copy our output buffer into the region between
6448 // safeWritePointer and leadPointer. If leadPointer is not
6449 // beyond the next endWrite position, wait until it is.
6450 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6451 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6452 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6453 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6454 endWrite = nextWritePointer + bufferBytes;
6455
6456 // Check whether the entire write region is behind the play pointer.
6457 if ( leadPointer >= endWrite ) break;
6458
6459 // If we are here, then we must wait until the leadPointer advances
6460 // beyond the end of our next write region. We use the
6461 // Sleep() function to suspend operation until that happens.
6462 double millis = ( endWrite - leadPointer ) * 1000.0;
6463 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6464 if ( millis < 1.0 ) millis = 1.0;
6465 Sleep( (DWORD) millis );
6466 }
6467
6468 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6469 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6470 // We've strayed into the forbidden zone ... resync the read pointer.
6471 handle->xrun[0] = true;
6472 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6473 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6474 handle->bufferPointer[0] = nextWritePointer;
6475 endWrite = nextWritePointer + bufferBytes;
6476 }
6477
6478 // Lock free space in the buffer
6479 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6480 &bufferSize1, &buffer2, &bufferSize2, 0 );
6481 if ( FAILED( result ) ) {
6482 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6483 errorText_ = errorStream_.str();
6484 MUTEX_UNLOCK( &stream_.mutex );
6485 error( RtAudioError::SYSTEM_ERROR );
6486 return;
6487 }
6488
6489 // Copy our buffer into the DS buffer
6490 CopyMemory( buffer1, buffer, bufferSize1 );
6491 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6492
6493 // Update our buffer offset and unlock sound buffer
6494 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6497 errorText_ = errorStream_.str();
6498 MUTEX_UNLOCK( &stream_.mutex );
6499 error( RtAudioError::SYSTEM_ERROR );
6500 return;
6501 }
6502 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6503 handle->bufferPointer[0] = nextWritePointer;
6504 }
6505
6506 // Don't bother draining input
6507 if ( handle->drainCounter ) {
6508 handle->drainCounter++;
6509 goto unlock;
6510 }
6511
6512 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6513
6514 // Setup parameters.
6515 if ( stream_.doConvertBuffer[1] ) {
6516 buffer = stream_.deviceBuffer;
6517 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6518 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6519 }
6520 else {
6521 buffer = stream_.userBuffer[1];
6522 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6523 bufferBytes *= formatBytes( stream_.userFormat );
6524 }
6525
6526 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6527 long nextReadPointer = handle->bufferPointer[1];
6528 DWORD dsBufferSize = handle->dsBufferSize[1];
6529
6530 // Find out where the write and "safe read" pointers are.
6531 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6532 if ( FAILED( result ) ) {
6533 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6534 errorText_ = errorStream_.str();
6535 MUTEX_UNLOCK( &stream_.mutex );
6536 error( RtAudioError::SYSTEM_ERROR );
6537 return;
6538 }
6539
6540 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6541 DWORD endRead = nextReadPointer + bufferBytes;
6542
6543 // Handling depends on whether we are INPUT or DUPLEX.
6544 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6545 // then a wait here will drag the write pointers into the forbidden zone.
6546 //
6547 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6548 // it's in a safe position. This causes dropouts, but it seems to be the only
6549 // practical way to sync up the read and write pointers reliably, given the
6550 // the very complex relationship between phase and increment of the read and write
6551 // pointers.
6552 //
6553 // In order to minimize audible dropouts in DUPLEX mode, we will
6554 // provide a pre-roll period of 0.5 seconds in which we return
6555 // zeros from the read buffer while the pointers sync up.
6556
6557 if ( stream_.mode == DUPLEX ) {
6558 if ( safeReadPointer < endRead ) {
6559 if ( duplexPrerollBytes <= 0 ) {
6560 // Pre-roll time over. Be more aggressive.
6561 int adjustment = endRead-safeReadPointer;
6562
6563 handle->xrun[1] = true;
6564 // Two cases:
6565 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6566 // and perform fine adjustments later.
6567 // - small adjustments: back off by twice as much.
6568 if ( adjustment >= 2*bufferBytes )
6569 nextReadPointer = safeReadPointer-2*bufferBytes;
6570 else
6571 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6572
6573 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6574
6575 }
6576 else {
6577 // In pre=roll time. Just do it.
6578 nextReadPointer = safeReadPointer - bufferBytes;
6579 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6580 }
6581 endRead = nextReadPointer + bufferBytes;
6582 }
6583 }
6584 else { // mode == INPUT
6585 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6586 // See comments for playback.
6587 double millis = (endRead - safeReadPointer) * 1000.0;
6588 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6589 if ( millis < 1.0 ) millis = 1.0;
6590 Sleep( (DWORD) millis );
6591
6592 // Wake up and find out where we are now.
6593 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6594 if ( FAILED( result ) ) {
6595 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6596 errorText_ = errorStream_.str();
6597 MUTEX_UNLOCK( &stream_.mutex );
6598 error( RtAudioError::SYSTEM_ERROR );
6599 return;
6600 }
6601
6602 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6603 }
6604 }
6605
6606 // Lock free space in the buffer
6607 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6608 &bufferSize1, &buffer2, &bufferSize2, 0 );
6609 if ( FAILED( result ) ) {
6610 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6611 errorText_ = errorStream_.str();
6612 MUTEX_UNLOCK( &stream_.mutex );
6613 error( RtAudioError::SYSTEM_ERROR );
6614 return;
6615 }
6616
6617 if ( duplexPrerollBytes <= 0 ) {
6618 // Copy our buffer into the DS buffer
6619 CopyMemory( buffer, buffer1, bufferSize1 );
6620 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6621 }
6622 else {
6623 memset( buffer, 0, bufferSize1 );
6624 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6625 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6626 }
6627
6628 // Update our buffer offset and unlock sound buffer
6629 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6630 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6631 if ( FAILED( result ) ) {
6632 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6633 errorText_ = errorStream_.str();
6634 MUTEX_UNLOCK( &stream_.mutex );
6635 error( RtAudioError::SYSTEM_ERROR );
6636 return;
6637 }
6638 handle->bufferPointer[1] = nextReadPointer;
6639
6640 // No byte swapping necessary in DirectSound implementation.
6641
6642 // If necessary, convert 8-bit data from unsigned to signed.
6643 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6644 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6645
6646 // Do buffer conversion if necessary.
6647 if ( stream_.doConvertBuffer[1] )
6648 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6649 }
6650
6651 unlock:
6652 MUTEX_UNLOCK( &stream_.mutex );
6653 RtApi::tickStreamTime();
6654 }
6655
6656 // Definitions for utility functions and callbacks
6657 // specific to the DirectSound implementation.
6658
callbackHandler(void * ptr)6659 static unsigned __stdcall callbackHandler( void *ptr )
6660 {
6661 CallbackInfo *info = (CallbackInfo *) ptr;
6662 RtApiDs *object = (RtApiDs *) info->object;
6663 bool* isRunning = &info->isRunning;
6664
6665 while ( *isRunning == true ) {
6666 object->callbackEvent();
6667 }
6668
6669 _endthreadex( 0 );
6670 return 0;
6671 }
6672
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6673 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6674 LPCTSTR description,
6675 LPCTSTR /*module*/,
6676 LPVOID lpContext )
6677 {
6678 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6679 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6680
6681 HRESULT hr;
6682 bool validDevice = false;
6683 if ( probeInfo.isInput == true ) {
6684 DSCCAPS caps;
6685 LPDIRECTSOUNDCAPTURE object;
6686
6687 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6688 if ( hr != DS_OK ) return TRUE;
6689
6690 caps.dwSize = sizeof(caps);
6691 hr = object->GetCaps( &caps );
6692 if ( hr == DS_OK ) {
6693 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6694 validDevice = true;
6695 }
6696 object->Release();
6697 }
6698 else {
6699 DSCAPS caps;
6700 LPDIRECTSOUND object;
6701 hr = DirectSoundCreate( lpguid, &object, NULL );
6702 if ( hr != DS_OK ) return TRUE;
6703
6704 caps.dwSize = sizeof(caps);
6705 hr = object->GetCaps( &caps );
6706 if ( hr == DS_OK ) {
6707 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6708 validDevice = true;
6709 }
6710 object->Release();
6711 }
6712
6713 // If good device, then save its name and guid.
6714 std::string name = convertCharPointerToStdString( description );
6715 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6716 if ( lpguid == NULL )
6717 name = "Default Device";
6718 if ( validDevice ) {
6719 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6720 if ( dsDevices[i].name == name ) {
6721 dsDevices[i].found = true;
6722 if ( probeInfo.isInput ) {
6723 dsDevices[i].id[1] = lpguid;
6724 dsDevices[i].validId[1] = true;
6725 }
6726 else {
6727 dsDevices[i].id[0] = lpguid;
6728 dsDevices[i].validId[0] = true;
6729 }
6730 return TRUE;
6731 }
6732 }
6733
6734 DsDevice device;
6735 device.name = name;
6736 device.found = true;
6737 if ( probeInfo.isInput ) {
6738 device.id[1] = lpguid;
6739 device.validId[1] = true;
6740 }
6741 else {
6742 device.id[0] = lpguid;
6743 device.validId[0] = true;
6744 }
6745 dsDevices.push_back( device );
6746 }
6747
6748 return TRUE;
6749 }
6750
getErrorString(int code)6751 static const char* getErrorString( int code )
6752 {
6753 switch ( code ) {
6754
6755 case DSERR_ALLOCATED:
6756 return "Already allocated";
6757
6758 case DSERR_CONTROLUNAVAIL:
6759 return "Control unavailable";
6760
6761 case DSERR_INVALIDPARAM:
6762 return "Invalid parameter";
6763
6764 case DSERR_INVALIDCALL:
6765 return "Invalid call";
6766
6767 case DSERR_GENERIC:
6768 return "Generic error";
6769
6770 case DSERR_PRIOLEVELNEEDED:
6771 return "Priority level needed";
6772
6773 case DSERR_OUTOFMEMORY:
6774 return "Out of memory";
6775
6776 case DSERR_BADFORMAT:
6777 return "The sample rate or the channel format is not supported";
6778
6779 case DSERR_UNSUPPORTED:
6780 return "Not supported";
6781
6782 case DSERR_NODRIVER:
6783 return "No driver";
6784
6785 case DSERR_ALREADYINITIALIZED:
6786 return "Already initialized";
6787
6788 case DSERR_NOAGGREGATION:
6789 return "No aggregation";
6790
6791 case DSERR_BUFFERLOST:
6792 return "Buffer lost";
6793
6794 case DSERR_OTHERAPPHASPRIO:
6795 return "Another application already has priority";
6796
6797 case DSERR_UNINITIALIZED:
6798 return "Uninitialized";
6799
6800 default:
6801 return "DirectSound unknown error";
6802 }
6803 }
6804 //******************** End of __WINDOWS_DS__ *********************//
6805 #endif
6806
6807
6808 #if defined(__LINUX_ALSA__)
6809
6810 #include <alsa/asoundlib.h>
6811 #include <unistd.h>
6812
6813 // A structure to hold various information related to the ALSA API
6814 // implementation.
6815 struct AlsaHandle {
6816 snd_pcm_t *handles[2];
6817 bool synchronized;
6818 bool xrun[2];
6819 pthread_cond_t runnable_cv;
6820 bool runnable;
6821
AlsaHandleAlsaHandle6822 AlsaHandle()
6823 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6824 };
6825
6826 static void *alsaCallbackHandler( void * ptr );
6827
RtApiAlsa()6828 RtApiAlsa :: RtApiAlsa()
6829 {
6830 // Nothing to do here.
6831 }
6832
~RtApiAlsa()6833 RtApiAlsa :: ~RtApiAlsa()
6834 {
6835 if ( stream_.state != STREAM_CLOSED ) closeStream();
6836 }
6837
getDeviceCount(void)6838 unsigned int RtApiAlsa :: getDeviceCount( void )
6839 {
6840 unsigned nDevices = 0;
6841 int result, subdevice, card;
6842 char name[64];
6843 snd_ctl_t *handle;
6844
6845 // Count cards and devices
6846 card = -1;
6847 snd_card_next( &card );
6848 while ( card >= 0 ) {
6849 sprintf( name, "hw:%d", card );
6850 result = snd_ctl_open( &handle, name, 0 );
6851 if ( result < 0 ) {
6852 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6853 errorText_ = errorStream_.str();
6854 error( RtAudioError::WARNING );
6855 goto nextcard;
6856 }
6857 subdevice = -1;
6858 while( 1 ) {
6859 result = snd_ctl_pcm_next_device( handle, &subdevice );
6860 if ( result < 0 ) {
6861 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6862 errorText_ = errorStream_.str();
6863 error( RtAudioError::WARNING );
6864 break;
6865 }
6866 if ( subdevice < 0 )
6867 break;
6868 nDevices++;
6869 }
6870 nextcard:
6871 snd_ctl_close( handle );
6872 snd_card_next( &card );
6873 }
6874
6875 result = snd_ctl_open( &handle, "default", 0 );
6876 if (result == 0) {
6877 nDevices++;
6878 snd_ctl_close( handle );
6879 }
6880
6881 return nDevices;
6882 }
6883
getDeviceInfo(unsigned int device)6884 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6885 {
6886 RtAudio::DeviceInfo info;
6887 info.probed = false;
6888
6889 unsigned nDevices = 0;
6890 int result, subdevice, card;
6891 char name[64];
6892 snd_ctl_t *chandle;
6893
6894 // Count cards and devices
6895 card = -1;
6896 subdevice = -1;
6897 snd_card_next( &card );
6898 while ( card >= 0 ) {
6899 sprintf( name, "hw:%d", card );
6900 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6901 if ( result < 0 ) {
6902 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6903 errorText_ = errorStream_.str();
6904 error( RtAudioError::WARNING );
6905 goto nextcard;
6906 }
6907 subdevice = -1;
6908 while( 1 ) {
6909 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6910 if ( result < 0 ) {
6911 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6912 errorText_ = errorStream_.str();
6913 error( RtAudioError::WARNING );
6914 break;
6915 }
6916 if ( subdevice < 0 ) break;
6917 if ( nDevices == device ) {
6918 sprintf( name, "hw:%d,%d", card, subdevice );
6919 goto foundDevice;
6920 }
6921 nDevices++;
6922 }
6923 nextcard:
6924 snd_ctl_close( chandle );
6925 snd_card_next( &card );
6926 }
6927
6928 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6929 if ( result == 0 ) {
6930 if ( nDevices == device ) {
6931 strcpy( name, "default" );
6932 goto foundDevice;
6933 }
6934 nDevices++;
6935 }
6936
6937 if ( nDevices == 0 ) {
6938 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6939 error( RtAudioError::INVALID_USE );
6940 return info;
6941 }
6942
6943 if ( device >= nDevices ) {
6944 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6945 error( RtAudioError::INVALID_USE );
6946 return info;
6947 }
6948
6949 foundDevice:
6950
6951 // If a stream is already open, we cannot probe the stream devices.
6952 // Thus, use the saved results.
6953 if ( stream_.state != STREAM_CLOSED &&
6954 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6955 snd_ctl_close( chandle );
6956 if ( device >= devices_.size() ) {
6957 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6958 error( RtAudioError::WARNING );
6959 return info;
6960 }
6961 return devices_[ device ];
6962 }
6963
6964 int openMode = SND_PCM_ASYNC;
6965 snd_pcm_stream_t stream;
6966 snd_pcm_info_t *pcminfo;
6967 snd_pcm_info_alloca( &pcminfo );
6968 snd_pcm_t *phandle;
6969 snd_pcm_hw_params_t *params;
6970 snd_pcm_hw_params_alloca( ¶ms );
6971
6972 // First try for playback unless default device (which has subdev -1)
6973 stream = SND_PCM_STREAM_PLAYBACK;
6974 snd_pcm_info_set_stream( pcminfo, stream );
6975 if ( subdevice != -1 ) {
6976 snd_pcm_info_set_device( pcminfo, subdevice );
6977 snd_pcm_info_set_subdevice( pcminfo, 0 );
6978
6979 result = snd_ctl_pcm_info( chandle, pcminfo );
6980 if ( result < 0 ) {
6981 // Device probably doesn't support playback.
6982 goto captureProbe;
6983 }
6984 }
6985
6986 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6987 if ( result < 0 ) {
6988 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6989 errorText_ = errorStream_.str();
6990 error( RtAudioError::WARNING );
6991 goto captureProbe;
6992 }
6993
6994 // The device is open ... fill the parameter structure.
6995 result = snd_pcm_hw_params_any( phandle, params );
6996 if ( result < 0 ) {
6997 snd_pcm_close( phandle );
6998 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6999 errorText_ = errorStream_.str();
7000 error( RtAudioError::WARNING );
7001 goto captureProbe;
7002 }
7003
7004 // Get output channel information.
7005 unsigned int value;
7006 result = snd_pcm_hw_params_get_channels_max( params, &value );
7007 if ( result < 0 ) {
7008 snd_pcm_close( phandle );
7009 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7010 errorText_ = errorStream_.str();
7011 error( RtAudioError::WARNING );
7012 goto captureProbe;
7013 }
7014 info.outputChannels = value;
7015 snd_pcm_close( phandle );
7016
7017 captureProbe:
7018 stream = SND_PCM_STREAM_CAPTURE;
7019 snd_pcm_info_set_stream( pcminfo, stream );
7020
7021 // Now try for capture unless default device (with subdev = -1)
7022 if ( subdevice != -1 ) {
7023 result = snd_ctl_pcm_info( chandle, pcminfo );
7024 snd_ctl_close( chandle );
7025 if ( result < 0 ) {
7026 // Device probably doesn't support capture.
7027 if ( info.outputChannels == 0 ) return info;
7028 goto probeParameters;
7029 }
7030 }
7031 else
7032 snd_ctl_close( chandle );
7033
7034 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7035 if ( result < 0 ) {
7036 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7037 errorText_ = errorStream_.str();
7038 error( RtAudioError::WARNING );
7039 if ( info.outputChannels == 0 ) return info;
7040 goto probeParameters;
7041 }
7042
7043 // The device is open ... fill the parameter structure.
7044 result = snd_pcm_hw_params_any( phandle, params );
7045 if ( result < 0 ) {
7046 snd_pcm_close( phandle );
7047 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7048 errorText_ = errorStream_.str();
7049 error( RtAudioError::WARNING );
7050 if ( info.outputChannels == 0 ) return info;
7051 goto probeParameters;
7052 }
7053
7054 result = snd_pcm_hw_params_get_channels_max( params, &value );
7055 if ( result < 0 ) {
7056 snd_pcm_close( phandle );
7057 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7058 errorText_ = errorStream_.str();
7059 error( RtAudioError::WARNING );
7060 if ( info.outputChannels == 0 ) return info;
7061 goto probeParameters;
7062 }
7063 info.inputChannels = value;
7064 snd_pcm_close( phandle );
7065
7066 // If device opens for both playback and capture, we determine the channels.
7067 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7068 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7069
7070 // ALSA doesn't provide default devices so we'll use the first available one.
7071 if ( device == 0 && info.outputChannels > 0 )
7072 info.isDefaultOutput = true;
7073 if ( device == 0 && info.inputChannels > 0 )
7074 info.isDefaultInput = true;
7075
7076 probeParameters:
7077 // At this point, we just need to figure out the supported data
7078 // formats and sample rates. We'll proceed by opening the device in
7079 // the direction with the maximum number of channels, or playback if
7080 // they are equal. This might limit our sample rate options, but so
7081 // be it.
7082
7083 if ( info.outputChannels >= info.inputChannels )
7084 stream = SND_PCM_STREAM_PLAYBACK;
7085 else
7086 stream = SND_PCM_STREAM_CAPTURE;
7087 snd_pcm_info_set_stream( pcminfo, stream );
7088
7089 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7090 if ( result < 0 ) {
7091 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7092 errorText_ = errorStream_.str();
7093 error( RtAudioError::WARNING );
7094 return info;
7095 }
7096
7097 // The device is open ... fill the parameter structure.
7098 result = snd_pcm_hw_params_any( phandle, params );
7099 if ( result < 0 ) {
7100 snd_pcm_close( phandle );
7101 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7102 errorText_ = errorStream_.str();
7103 error( RtAudioError::WARNING );
7104 return info;
7105 }
7106
7107 // Test our discrete set of sample rate values.
7108 info.sampleRates.clear();
7109 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7110 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7111 info.sampleRates.push_back( SAMPLE_RATES[i] );
7112
7113 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7114 info.preferredSampleRate = SAMPLE_RATES[i];
7115 }
7116 }
7117 if ( info.sampleRates.size() == 0 ) {
7118 snd_pcm_close( phandle );
7119 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7120 errorText_ = errorStream_.str();
7121 error( RtAudioError::WARNING );
7122 return info;
7123 }
7124
7125 // Probe the supported data formats ... we don't care about endian-ness just yet
7126 snd_pcm_format_t format;
7127 info.nativeFormats = 0;
7128 format = SND_PCM_FORMAT_S8;
7129 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7130 info.nativeFormats |= RTAUDIO_SINT8;
7131 format = SND_PCM_FORMAT_S16;
7132 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7133 info.nativeFormats |= RTAUDIO_SINT16;
7134 format = SND_PCM_FORMAT_S24;
7135 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7136 info.nativeFormats |= RTAUDIO_SINT24;
7137 format = SND_PCM_FORMAT_S32;
7138 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7139 info.nativeFormats |= RTAUDIO_SINT32;
7140 format = SND_PCM_FORMAT_FLOAT;
7141 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7142 info.nativeFormats |= RTAUDIO_FLOAT32;
7143 format = SND_PCM_FORMAT_FLOAT64;
7144 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7145 info.nativeFormats |= RTAUDIO_FLOAT64;
7146
7147 // Check that we have at least one supported format
7148 if ( info.nativeFormats == 0 ) {
7149 snd_pcm_close( phandle );
7150 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7151 errorText_ = errorStream_.str();
7152 error( RtAudioError::WARNING );
7153 return info;
7154 }
7155
7156 // Get the device name
7157 char *cardname;
7158 result = snd_card_get_name( card, &cardname );
7159 if ( result >= 0 ) {
7160 sprintf( name, "hw:%s,%d", cardname, subdevice );
7161 free( cardname );
7162 }
7163 info.name = name;
7164
7165 // That's all ... close the device and return
7166 snd_pcm_close( phandle );
7167 info.probed = true;
7168 return info;
7169 }
7170
saveDeviceInfo(void)7171 void RtApiAlsa :: saveDeviceInfo( void )
7172 {
7173 devices_.clear();
7174
7175 unsigned int nDevices = getDeviceCount();
7176 devices_.resize( nDevices );
7177 for ( unsigned int i=0; i<nDevices; i++ )
7178 devices_[i] = getDeviceInfo( i );
7179 }
7180
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7181 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7182 unsigned int firstChannel, unsigned int sampleRate,
7183 RtAudioFormat format, unsigned int *bufferSize,
7184 RtAudio::StreamOptions *options )
7185
7186 {
7187 #if defined(__RTAUDIO_DEBUG__)
7188 snd_output_t *out;
7189 snd_output_stdio_attach(&out, stderr, 0);
7190 #endif
7191
7192 // I'm not using the "plug" interface ... too much inconsistent behavior.
7193
7194 unsigned nDevices = 0;
7195 int result, subdevice, card;
7196 char name[64];
7197 snd_ctl_t *chandle;
7198
7199 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7200 snprintf(name, sizeof(name), "%s", "default");
7201 else {
7202 // Count cards and devices
7203 card = -1;
7204 snd_card_next( &card );
7205 while ( card >= 0 ) {
7206 sprintf( name, "hw:%d", card );
7207 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7208 if ( result < 0 ) {
7209 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7210 errorText_ = errorStream_.str();
7211 return FAILURE;
7212 }
7213 subdevice = -1;
7214 while( 1 ) {
7215 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7216 if ( result < 0 ) break;
7217 if ( subdevice < 0 ) break;
7218 if ( nDevices == device ) {
7219 sprintf( name, "hw:%d,%d", card, subdevice );
7220 snd_ctl_close( chandle );
7221 goto foundDevice;
7222 }
7223 nDevices++;
7224 }
7225 snd_ctl_close( chandle );
7226 snd_card_next( &card );
7227 }
7228
7229 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7230 if ( result == 0 ) {
7231 if ( nDevices == device ) {
7232 strcpy( name, "default" );
7233 goto foundDevice;
7234 }
7235 nDevices++;
7236 }
7237
7238 if ( nDevices == 0 ) {
7239 // This should not happen because a check is made before this function is called.
7240 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7241 return FAILURE;
7242 }
7243
7244 if ( device >= nDevices ) {
7245 // This should not happen because a check is made before this function is called.
7246 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7247 return FAILURE;
7248 }
7249 }
7250
7251 foundDevice:
7252
7253 // The getDeviceInfo() function will not work for a device that is
7254 // already open. Thus, we'll probe the system before opening a
7255 // stream and save the results for use by getDeviceInfo().
7256 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7257 this->saveDeviceInfo();
7258
7259 snd_pcm_stream_t stream;
7260 if ( mode == OUTPUT )
7261 stream = SND_PCM_STREAM_PLAYBACK;
7262 else
7263 stream = SND_PCM_STREAM_CAPTURE;
7264
7265 snd_pcm_t *phandle;
7266 int openMode = SND_PCM_ASYNC;
7267 result = snd_pcm_open( &phandle, name, stream, openMode );
7268 if ( result < 0 ) {
7269 if ( mode == OUTPUT )
7270 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7271 else
7272 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7273 errorText_ = errorStream_.str();
7274 return FAILURE;
7275 }
7276
7277 // Fill the parameter structure.
7278 snd_pcm_hw_params_t *hw_params;
7279 snd_pcm_hw_params_alloca( &hw_params );
7280 result = snd_pcm_hw_params_any( phandle, hw_params );
7281 if ( result < 0 ) {
7282 snd_pcm_close( phandle );
7283 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7284 errorText_ = errorStream_.str();
7285 return FAILURE;
7286 }
7287
7288 #if defined(__RTAUDIO_DEBUG__)
7289 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7290 snd_pcm_hw_params_dump( hw_params, out );
7291 #endif
7292
7293 // Set access ... check user preference.
7294 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7295 stream_.userInterleaved = false;
7296 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7297 if ( result < 0 ) {
7298 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7299 stream_.deviceInterleaved[mode] = true;
7300 }
7301 else
7302 stream_.deviceInterleaved[mode] = false;
7303 }
7304 else {
7305 stream_.userInterleaved = true;
7306 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7307 if ( result < 0 ) {
7308 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7309 stream_.deviceInterleaved[mode] = false;
7310 }
7311 else
7312 stream_.deviceInterleaved[mode] = true;
7313 }
7314
7315 if ( result < 0 ) {
7316 snd_pcm_close( phandle );
7317 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7318 errorText_ = errorStream_.str();
7319 return FAILURE;
7320 }
7321
7322 // Determine how to set the device format.
7323 stream_.userFormat = format;
7324 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7325
7326 if ( format == RTAUDIO_SINT8 )
7327 deviceFormat = SND_PCM_FORMAT_S8;
7328 else if ( format == RTAUDIO_SINT16 )
7329 deviceFormat = SND_PCM_FORMAT_S16;
7330 else if ( format == RTAUDIO_SINT24 )
7331 deviceFormat = SND_PCM_FORMAT_S24;
7332 else if ( format == RTAUDIO_SINT32 )
7333 deviceFormat = SND_PCM_FORMAT_S32;
7334 else if ( format == RTAUDIO_FLOAT32 )
7335 deviceFormat = SND_PCM_FORMAT_FLOAT;
7336 else if ( format == RTAUDIO_FLOAT64 )
7337 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7338
7339 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7340 stream_.deviceFormat[mode] = format;
7341 goto setFormat;
7342 }
7343
7344 // The user requested format is not natively supported by the device.
7345 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7346 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7347 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7348 goto setFormat;
7349 }
7350
7351 deviceFormat = SND_PCM_FORMAT_FLOAT;
7352 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7353 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7354 goto setFormat;
7355 }
7356
7357 deviceFormat = SND_PCM_FORMAT_S32;
7358 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7359 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7360 goto setFormat;
7361 }
7362
7363 deviceFormat = SND_PCM_FORMAT_S24;
7364 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7365 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7366 goto setFormat;
7367 }
7368
7369 deviceFormat = SND_PCM_FORMAT_S16;
7370 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7371 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7372 goto setFormat;
7373 }
7374
7375 deviceFormat = SND_PCM_FORMAT_S8;
7376 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7377 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7378 goto setFormat;
7379 }
7380
7381 // If we get here, no supported format was found.
7382 snd_pcm_close( phandle );
7383 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7384 errorText_ = errorStream_.str();
7385 return FAILURE;
7386
7387 setFormat:
7388 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7389 if ( result < 0 ) {
7390 snd_pcm_close( phandle );
7391 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7392 errorText_ = errorStream_.str();
7393 return FAILURE;
7394 }
7395
7396 // Determine whether byte-swaping is necessary.
7397 stream_.doByteSwap[mode] = false;
7398 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7399 result = snd_pcm_format_cpu_endian( deviceFormat );
7400 if ( result == 0 )
7401 stream_.doByteSwap[mode] = true;
7402 else if (result < 0) {
7403 snd_pcm_close( phandle );
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7405 errorText_ = errorStream_.str();
7406 return FAILURE;
7407 }
7408 }
7409
7410 // Set the sample rate.
7411 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7412 if ( result < 0 ) {
7413 snd_pcm_close( phandle );
7414 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7415 errorText_ = errorStream_.str();
7416 return FAILURE;
7417 }
7418
7419 // Determine the number of channels for this device. We support a possible
7420 // minimum device channel number > than the value requested by the user.
7421 stream_.nUserChannels[mode] = channels;
7422 unsigned int value;
7423 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7424 unsigned int deviceChannels = value;
7425 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7426 snd_pcm_close( phandle );
7427 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7428 errorText_ = errorStream_.str();
7429 return FAILURE;
7430 }
7431
7432 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7433 if ( result < 0 ) {
7434 snd_pcm_close( phandle );
7435 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7436 errorText_ = errorStream_.str();
7437 return FAILURE;
7438 }
7439 deviceChannels = value;
7440 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7441 stream_.nDeviceChannels[mode] = deviceChannels;
7442
7443 // Set the device channels.
7444 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7445 if ( result < 0 ) {
7446 snd_pcm_close( phandle );
7447 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7448 errorText_ = errorStream_.str();
7449 return FAILURE;
7450 }
7451
7452 // Set the buffer (or period) size.
7453 int dir = 0;
7454 snd_pcm_uframes_t periodSize = *bufferSize;
7455 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7456 if ( result < 0 ) {
7457 snd_pcm_close( phandle );
7458 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7459 errorText_ = errorStream_.str();
7460 return FAILURE;
7461 }
7462 *bufferSize = periodSize;
7463
7464 // Set the buffer number, which in ALSA is referred to as the "period".
7465 unsigned int periods = 0;
7466 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7467 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7468 if ( periods < 2 ) periods = 4; // a fairly safe default value
7469 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7470 if ( result < 0 ) {
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7473 errorText_ = errorStream_.str();
7474 return FAILURE;
7475 }
7476
7477 // If attempting to setup a duplex stream, the bufferSize parameter
7478 // MUST be the same in both directions!
7479 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7480 snd_pcm_close( phandle );
7481 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7482 errorText_ = errorStream_.str();
7483 return FAILURE;
7484 }
7485
7486 stream_.bufferSize = *bufferSize;
7487
7488 // Install the hardware configuration
7489 result = snd_pcm_hw_params( phandle, hw_params );
7490 if ( result < 0 ) {
7491 snd_pcm_close( phandle );
7492 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7493 errorText_ = errorStream_.str();
7494 return FAILURE;
7495 }
7496
7497 #if defined(__RTAUDIO_DEBUG__)
7498 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7499 snd_pcm_hw_params_dump( hw_params, out );
7500 #endif
7501
7502 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7503 snd_pcm_sw_params_t *sw_params = NULL;
7504 snd_pcm_sw_params_alloca( &sw_params );
7505 snd_pcm_sw_params_current( phandle, sw_params );
7506 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7507 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7508 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7509
7510 // The following two settings were suggested by Theo Veenker
7511 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7512 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7513
7514 // here are two options for a fix
7515 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7516 snd_pcm_uframes_t val;
7517 snd_pcm_sw_params_get_boundary( sw_params, &val );
7518 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7519
7520 result = snd_pcm_sw_params( phandle, sw_params );
7521 if ( result < 0 ) {
7522 snd_pcm_close( phandle );
7523 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7524 errorText_ = errorStream_.str();
7525 return FAILURE;
7526 }
7527
7528 #if defined(__RTAUDIO_DEBUG__)
7529 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7530 snd_pcm_sw_params_dump( sw_params, out );
7531 #endif
7532
7533 // Set flags for buffer conversion
7534 stream_.doConvertBuffer[mode] = false;
7535 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7536 stream_.doConvertBuffer[mode] = true;
7537 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7538 stream_.doConvertBuffer[mode] = true;
7539 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7540 stream_.nUserChannels[mode] > 1 )
7541 stream_.doConvertBuffer[mode] = true;
7542
7543 // Allocate the ApiHandle if necessary and then save.
7544 AlsaHandle *apiInfo = 0;
7545 if ( stream_.apiHandle == 0 ) {
7546 try {
7547 apiInfo = (AlsaHandle *) new AlsaHandle;
7548 }
7549 catch ( std::bad_alloc& ) {
7550 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7551 goto error;
7552 }
7553
7554 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7555 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7556 goto error;
7557 }
7558
7559 stream_.apiHandle = (void *) apiInfo;
7560 apiInfo->handles[0] = 0;
7561 apiInfo->handles[1] = 0;
7562 }
7563 else {
7564 apiInfo = (AlsaHandle *) stream_.apiHandle;
7565 }
7566 apiInfo->handles[mode] = phandle;
7567 phandle = 0;
7568
7569 // Allocate necessary internal buffers.
7570 unsigned long bufferBytes;
7571 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7572 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7573 if ( stream_.userBuffer[mode] == NULL ) {
7574 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7575 goto error;
7576 }
7577
7578 if ( stream_.doConvertBuffer[mode] ) {
7579
7580 bool makeBuffer = true;
7581 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7582 if ( mode == INPUT ) {
7583 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7584 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7585 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7586 }
7587 }
7588
7589 if ( makeBuffer ) {
7590 bufferBytes *= *bufferSize;
7591 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7592 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7593 if ( stream_.deviceBuffer == NULL ) {
7594 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7595 goto error;
7596 }
7597 }
7598 }
7599
7600 stream_.sampleRate = sampleRate;
7601 stream_.nBuffers = periods;
7602 stream_.device[mode] = device;
7603 stream_.state = STREAM_STOPPED;
7604
7605 // Setup the buffer conversion information structure.
7606 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7607
7608 // Setup thread if necessary.
7609 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7610 // We had already set up an output stream.
7611 stream_.mode = DUPLEX;
7612 // Link the streams if possible.
7613 apiInfo->synchronized = false;
7614 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7615 apiInfo->synchronized = true;
7616 else {
7617 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7618 error( RtAudioError::WARNING );
7619 }
7620 }
7621 else {
7622 stream_.mode = mode;
7623
7624 // Setup callback thread.
7625 stream_.callbackInfo.object = (void *) this;
7626
7627 // Set the thread attributes for joinable and realtime scheduling
7628 // priority (optional). The higher priority will only take affect
7629 // if the program is run as root or suid. Note, under Linux
7630 // processes with CAP_SYS_NICE privilege, a user can change
7631 // scheduling policy and priority (thus need not be root). See
7632 // POSIX "capabilities".
7633 pthread_attr_t attr;
7634 pthread_attr_init( &attr );
7635 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7636
7637 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7638 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7639 // We previously attempted to increase the audio callback priority
7640 // to SCHED_RR here via the attributes. However, while no errors
7641 // were reported in doing so, it did not work. So, now this is
7642 // done in the alsaCallbackHandler function.
7643 stream_.callbackInfo.doRealtime = true;
7644 int priority = options->priority;
7645 int min = sched_get_priority_min( SCHED_RR );
7646 int max = sched_get_priority_max( SCHED_RR );
7647 if ( priority < min ) priority = min;
7648 else if ( priority > max ) priority = max;
7649 stream_.callbackInfo.priority = priority;
7650 }
7651 #endif
7652
7653 stream_.callbackInfo.isRunning = true;
7654 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7655 pthread_attr_destroy( &attr );
7656 if ( result ) {
7657 stream_.callbackInfo.isRunning = false;
7658 errorText_ = "RtApiAlsa::error creating callback thread!";
7659 goto error;
7660 }
7661 }
7662
7663 return SUCCESS;
7664
7665 error:
7666 if ( apiInfo ) {
7667 pthread_cond_destroy( &apiInfo->runnable_cv );
7668 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7669 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7670 delete apiInfo;
7671 stream_.apiHandle = 0;
7672 }
7673
7674 if ( phandle) snd_pcm_close( phandle );
7675
7676 for ( int i=0; i<2; i++ ) {
7677 if ( stream_.userBuffer[i] ) {
7678 free( stream_.userBuffer[i] );
7679 stream_.userBuffer[i] = 0;
7680 }
7681 }
7682
7683 if ( stream_.deviceBuffer ) {
7684 free( stream_.deviceBuffer );
7685 stream_.deviceBuffer = 0;
7686 }
7687
7688 stream_.state = STREAM_CLOSED;
7689 return FAILURE;
7690 }
7691
closeStream()7692 void RtApiAlsa :: closeStream()
7693 {
7694 if ( stream_.state == STREAM_CLOSED ) {
7695 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7696 error( RtAudioError::WARNING );
7697 return;
7698 }
7699
7700 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7701 stream_.callbackInfo.isRunning = false;
7702 MUTEX_LOCK( &stream_.mutex );
7703 if ( stream_.state == STREAM_STOPPED ) {
7704 apiInfo->runnable = true;
7705 pthread_cond_signal( &apiInfo->runnable_cv );
7706 }
7707 MUTEX_UNLOCK( &stream_.mutex );
7708 pthread_join( stream_.callbackInfo.thread, NULL );
7709
7710 if ( stream_.state == STREAM_RUNNING ) {
7711 stream_.state = STREAM_STOPPED;
7712 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7713 snd_pcm_drop( apiInfo->handles[0] );
7714 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7715 snd_pcm_drop( apiInfo->handles[1] );
7716 }
7717
7718 if ( apiInfo ) {
7719 pthread_cond_destroy( &apiInfo->runnable_cv );
7720 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7721 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7722 delete apiInfo;
7723 stream_.apiHandle = 0;
7724 }
7725
7726 for ( int i=0; i<2; i++ ) {
7727 if ( stream_.userBuffer[i] ) {
7728 free( stream_.userBuffer[i] );
7729 stream_.userBuffer[i] = 0;
7730 }
7731 }
7732
7733 if ( stream_.deviceBuffer ) {
7734 free( stream_.deviceBuffer );
7735 stream_.deviceBuffer = 0;
7736 }
7737
7738 stream_.mode = UNINITIALIZED;
7739 stream_.state = STREAM_CLOSED;
7740 }
7741
startStream()7742 void RtApiAlsa :: startStream()
7743 {
7744 // This method calls snd_pcm_prepare if the device isn't already in that state.
7745
7746 verifyStream();
7747 if ( stream_.state == STREAM_RUNNING ) {
7748 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7749 error( RtAudioError::WARNING );
7750 return;
7751 }
7752
7753 MUTEX_LOCK( &stream_.mutex );
7754
7755 int result = 0;
7756 snd_pcm_state_t state;
7757 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7758 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7759 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7760 state = snd_pcm_state( handle[0] );
7761 if ( state != SND_PCM_STATE_PREPARED ) {
7762 result = snd_pcm_prepare( handle[0] );
7763 if ( result < 0 ) {
7764 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7765 errorText_ = errorStream_.str();
7766 goto unlock;
7767 }
7768 }
7769 }
7770
7771 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7772 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7773 state = snd_pcm_state( handle[1] );
7774 if ( state != SND_PCM_STATE_PREPARED ) {
7775 result = snd_pcm_prepare( handle[1] );
7776 if ( result < 0 ) {
7777 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7778 errorText_ = errorStream_.str();
7779 goto unlock;
7780 }
7781 }
7782 }
7783
7784 stream_.state = STREAM_RUNNING;
7785
7786 unlock:
7787 apiInfo->runnable = true;
7788 pthread_cond_signal( &apiInfo->runnable_cv );
7789 MUTEX_UNLOCK( &stream_.mutex );
7790
7791 if ( result >= 0 ) return;
7792 error( RtAudioError::SYSTEM_ERROR );
7793 }
7794
stopStream()7795 void RtApiAlsa :: stopStream()
7796 {
7797 verifyStream();
7798 if ( stream_.state == STREAM_STOPPED ) {
7799 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7800 error( RtAudioError::WARNING );
7801 return;
7802 }
7803
7804 stream_.state = STREAM_STOPPED;
7805 MUTEX_LOCK( &stream_.mutex );
7806
7807 int result = 0;
7808 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7809 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7810 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7811 if ( apiInfo->synchronized )
7812 result = snd_pcm_drop( handle[0] );
7813 else
7814 result = snd_pcm_drain( handle[0] );
7815 if ( result < 0 ) {
7816 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7817 errorText_ = errorStream_.str();
7818 goto unlock;
7819 }
7820 }
7821
7822 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7823 result = snd_pcm_drop( handle[1] );
7824 if ( result < 0 ) {
7825 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7826 errorText_ = errorStream_.str();
7827 goto unlock;
7828 }
7829 }
7830
7831 unlock:
7832 apiInfo->runnable = false; // fixes high CPU usage when stopped
7833 MUTEX_UNLOCK( &stream_.mutex );
7834
7835 if ( result >= 0 ) return;
7836 error( RtAudioError::SYSTEM_ERROR );
7837 }
7838
abortStream()7839 void RtApiAlsa :: abortStream()
7840 {
7841 verifyStream();
7842 if ( stream_.state == STREAM_STOPPED ) {
7843 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7844 error( RtAudioError::WARNING );
7845 return;
7846 }
7847
7848 stream_.state = STREAM_STOPPED;
7849 MUTEX_LOCK( &stream_.mutex );
7850
7851 int result = 0;
7852 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7853 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7854 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7855 result = snd_pcm_drop( handle[0] );
7856 if ( result < 0 ) {
7857 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7858 errorText_ = errorStream_.str();
7859 goto unlock;
7860 }
7861 }
7862
7863 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7864 result = snd_pcm_drop( handle[1] );
7865 if ( result < 0 ) {
7866 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7867 errorText_ = errorStream_.str();
7868 goto unlock;
7869 }
7870 }
7871
7872 unlock:
7873 apiInfo->runnable = false; // fixes high CPU usage when stopped
7874 MUTEX_UNLOCK( &stream_.mutex );
7875
7876 if ( result >= 0 ) return;
7877 error( RtAudioError::SYSTEM_ERROR );
7878 }
7879
callbackEvent()7880 void RtApiAlsa :: callbackEvent()
7881 {
7882 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7883 if ( stream_.state == STREAM_STOPPED ) {
7884 MUTEX_LOCK( &stream_.mutex );
7885 while ( !apiInfo->runnable )
7886 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7887
7888 if ( stream_.state != STREAM_RUNNING ) {
7889 MUTEX_UNLOCK( &stream_.mutex );
7890 return;
7891 }
7892 MUTEX_UNLOCK( &stream_.mutex );
7893 }
7894
7895 if ( stream_.state == STREAM_CLOSED ) {
7896 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7897 error( RtAudioError::WARNING );
7898 return;
7899 }
7900
7901 int doStopStream = 0;
7902 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7903 double streamTime = getStreamTime();
7904 RtAudioStreamStatus status = 0;
7905 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7906 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7907 apiInfo->xrun[0] = false;
7908 }
7909 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7910 status |= RTAUDIO_INPUT_OVERFLOW;
7911 apiInfo->xrun[1] = false;
7912 }
7913 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7914 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7915
7916 if ( doStopStream == 2 ) {
7917 abortStream();
7918 return;
7919 }
7920
7921 MUTEX_LOCK( &stream_.mutex );
7922
7923 // The state might change while waiting on a mutex.
7924 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7925
7926 int result;
7927 char *buffer;
7928 int channels;
7929 snd_pcm_t **handle;
7930 snd_pcm_sframes_t frames;
7931 RtAudioFormat format;
7932 handle = (snd_pcm_t **) apiInfo->handles;
7933
7934 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7935
7936 // Setup parameters.
7937 if ( stream_.doConvertBuffer[1] ) {
7938 buffer = stream_.deviceBuffer;
7939 channels = stream_.nDeviceChannels[1];
7940 format = stream_.deviceFormat[1];
7941 }
7942 else {
7943 buffer = stream_.userBuffer[1];
7944 channels = stream_.nUserChannels[1];
7945 format = stream_.userFormat;
7946 }
7947
7948 // Read samples from device in interleaved/non-interleaved format.
7949 if ( stream_.deviceInterleaved[1] )
7950 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7951 else {
7952 void *bufs[channels];
7953 size_t offset = stream_.bufferSize * formatBytes( format );
7954 for ( int i=0; i<channels; i++ )
7955 bufs[i] = (void *) (buffer + (i * offset));
7956 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7957 }
7958
7959 if ( result < (int) stream_.bufferSize ) {
7960 // Either an error or overrun occurred.
7961 if ( result == -EPIPE ) {
7962 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7963 if ( state == SND_PCM_STATE_XRUN ) {
7964 apiInfo->xrun[1] = true;
7965 result = snd_pcm_prepare( handle[1] );
7966 if ( result < 0 ) {
7967 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7968 errorText_ = errorStream_.str();
7969 }
7970 }
7971 else {
7972 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7973 errorText_ = errorStream_.str();
7974 }
7975 }
7976 else {
7977 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7978 errorText_ = errorStream_.str();
7979 }
7980 error( RtAudioError::WARNING );
7981 goto tryOutput;
7982 }
7983
7984 // Do byte swapping if necessary.
7985 if ( stream_.doByteSwap[1] )
7986 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7987
7988 // Do buffer conversion if necessary.
7989 if ( stream_.doConvertBuffer[1] )
7990 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7991
7992 // Check stream latency
7993 result = snd_pcm_delay( handle[1], &frames );
7994 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7995 }
7996
7997 tryOutput:
7998
7999 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8000
8001 // Setup parameters and do buffer conversion if necessary.
8002 if ( stream_.doConvertBuffer[0] ) {
8003 buffer = stream_.deviceBuffer;
8004 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8005 channels = stream_.nDeviceChannels[0];
8006 format = stream_.deviceFormat[0];
8007 }
8008 else {
8009 buffer = stream_.userBuffer[0];
8010 channels = stream_.nUserChannels[0];
8011 format = stream_.userFormat;
8012 }
8013
8014 // Do byte swapping if necessary.
8015 if ( stream_.doByteSwap[0] )
8016 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8017
8018 // Write samples to device in interleaved/non-interleaved format.
8019 if ( stream_.deviceInterleaved[0] )
8020 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8021 else {
8022 void *bufs[channels];
8023 size_t offset = stream_.bufferSize * formatBytes( format );
8024 for ( int i=0; i<channels; i++ )
8025 bufs[i] = (void *) (buffer + (i * offset));
8026 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8027 }
8028
8029 if ( result < (int) stream_.bufferSize ) {
8030 // Either an error or underrun occurred.
8031 if ( result == -EPIPE ) {
8032 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8033 if ( state == SND_PCM_STATE_XRUN ) {
8034 apiInfo->xrun[0] = true;
8035 result = snd_pcm_prepare( handle[0] );
8036 if ( result < 0 ) {
8037 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8038 errorText_ = errorStream_.str();
8039 }
8040 else
8041 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8042 }
8043 else {
8044 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8045 errorText_ = errorStream_.str();
8046 }
8047 }
8048 else {
8049 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8050 errorText_ = errorStream_.str();
8051 }
8052 error( RtAudioError::WARNING );
8053 goto unlock;
8054 }
8055
8056 // Check stream latency
8057 result = snd_pcm_delay( handle[0], &frames );
8058 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8059 }
8060
8061 unlock:
8062 MUTEX_UNLOCK( &stream_.mutex );
8063
8064 RtApi::tickStreamTime();
8065 if ( doStopStream == 1 ) this->stopStream();
8066 }
8067
alsaCallbackHandler(void * ptr)8068 static void *alsaCallbackHandler( void *ptr )
8069 {
8070 CallbackInfo *info = (CallbackInfo *) ptr;
8071 RtApiAlsa *object = (RtApiAlsa *) info->object;
8072 bool *isRunning = &info->isRunning;
8073
8074 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8075 if ( info->doRealtime ) {
8076 pthread_t tID = pthread_self(); // ID of this thread
8077 sched_param prio = { info->priority }; // scheduling priority of thread
8078 pthread_setschedparam( tID, SCHED_RR, &prio );
8079 }
8080 #endif
8081
8082 while ( *isRunning == true ) {
8083 pthread_testcancel();
8084 object->callbackEvent();
8085 }
8086
8087 pthread_exit( NULL );
8088 }
8089
8090 //******************** End of __LINUX_ALSA__ *********************//
8091 #endif
8092
8093 #if defined(__LINUX_PULSE__)
8094
8095 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8096 // and Tristan Matthews.
8097
8098 #include <pulse/error.h>
8099 #include <pulse/simple.h>
8100 #include <cstdio>
8101
8102 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8103 44100, 48000, 96000, 0};
8104
8105 struct rtaudio_pa_format_mapping_t {
8106 RtAudioFormat rtaudio_format;
8107 pa_sample_format_t pa_format;
8108 };
8109
8110 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8111 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8112 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8113 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8114 {0, PA_SAMPLE_INVALID}};
8115
8116 struct PulseAudioHandle {
8117 pa_simple *s_play;
8118 pa_simple *s_rec;
8119 pthread_t thread;
8120 pthread_cond_t runnable_cv;
8121 bool runnable;
PulseAudioHandlePulseAudioHandle8122 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8123 };
8124
~RtApiPulse()8125 RtApiPulse::~RtApiPulse()
8126 {
8127 if ( stream_.state != STREAM_CLOSED )
8128 closeStream();
8129 }
8130
getDeviceCount(void)8131 unsigned int RtApiPulse::getDeviceCount( void )
8132 {
8133 return 1;
8134 }
8135
getDeviceInfo(unsigned int)8136 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8137 {
8138 RtAudio::DeviceInfo info;
8139 info.probed = true;
8140 info.name = "PulseAudio";
8141 info.outputChannels = 2;
8142 info.inputChannels = 2;
8143 info.duplexChannels = 2;
8144 info.isDefaultOutput = true;
8145 info.isDefaultInput = true;
8146
8147 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8148 info.sampleRates.push_back( *sr );
8149
8150 info.preferredSampleRate = 48000;
8151 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8152
8153 return info;
8154 }
8155
pulseaudio_callback(void * user)8156 static void *pulseaudio_callback( void * user )
8157 {
8158 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8159 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8160 volatile bool *isRunning = &cbi->isRunning;
8161
8162 while ( *isRunning ) {
8163 pthread_testcancel();
8164 context->callbackEvent();
8165 }
8166
8167 pthread_exit( NULL );
8168 }
8169
closeStream(void)8170 void RtApiPulse::closeStream( void )
8171 {
8172 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8173
8174 stream_.callbackInfo.isRunning = false;
8175 if ( pah ) {
8176 MUTEX_LOCK( &stream_.mutex );
8177 if ( stream_.state == STREAM_STOPPED ) {
8178 pah->runnable = true;
8179 pthread_cond_signal( &pah->runnable_cv );
8180 }
8181 MUTEX_UNLOCK( &stream_.mutex );
8182
8183 pthread_join( pah->thread, 0 );
8184 if ( pah->s_play ) {
8185 pa_simple_flush( pah->s_play, NULL );
8186 pa_simple_free( pah->s_play );
8187 }
8188 if ( pah->s_rec )
8189 pa_simple_free( pah->s_rec );
8190
8191 pthread_cond_destroy( &pah->runnable_cv );
8192 delete pah;
8193 stream_.apiHandle = 0;
8194 }
8195
8196 if ( stream_.userBuffer[0] ) {
8197 free( stream_.userBuffer[0] );
8198 stream_.userBuffer[0] = 0;
8199 }
8200 if ( stream_.userBuffer[1] ) {
8201 free( stream_.userBuffer[1] );
8202 stream_.userBuffer[1] = 0;
8203 }
8204
8205 stream_.state = STREAM_CLOSED;
8206 stream_.mode = UNINITIALIZED;
8207 }
8208
callbackEvent(void)8209 void RtApiPulse::callbackEvent( void )
8210 {
8211 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8212
8213 if ( stream_.state == STREAM_STOPPED ) {
8214 MUTEX_LOCK( &stream_.mutex );
8215 while ( !pah->runnable )
8216 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8217
8218 if ( stream_.state != STREAM_RUNNING ) {
8219 MUTEX_UNLOCK( &stream_.mutex );
8220 return;
8221 }
8222 MUTEX_UNLOCK( &stream_.mutex );
8223 }
8224
8225 if ( stream_.state == STREAM_CLOSED ) {
8226 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8227 "this shouldn't happen!";
8228 error( RtAudioError::WARNING );
8229 return;
8230 }
8231
8232 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8233 double streamTime = getStreamTime();
8234 RtAudioStreamStatus status = 0;
8235 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8236 stream_.bufferSize, streamTime, status,
8237 stream_.callbackInfo.userData );
8238
8239 if ( doStopStream == 2 ) {
8240 abortStream();
8241 return;
8242 }
8243
8244 MUTEX_LOCK( &stream_.mutex );
8245 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8246 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8247
8248 if ( stream_.state != STREAM_RUNNING )
8249 goto unlock;
8250
8251 int pa_error;
8252 size_t bytes;
8253 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8254 if ( stream_.doConvertBuffer[OUTPUT] ) {
8255 convertBuffer( stream_.deviceBuffer,
8256 stream_.userBuffer[OUTPUT],
8257 stream_.convertInfo[OUTPUT] );
8258 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8259 formatBytes( stream_.deviceFormat[OUTPUT] );
8260 } else
8261 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8262 formatBytes( stream_.userFormat );
8263
8264 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8265 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8266 pa_strerror( pa_error ) << ".";
8267 errorText_ = errorStream_.str();
8268 error( RtAudioError::WARNING );
8269 }
8270 }
8271
8272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8273 if ( stream_.doConvertBuffer[INPUT] )
8274 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8275 formatBytes( stream_.deviceFormat[INPUT] );
8276 else
8277 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8278 formatBytes( stream_.userFormat );
8279
8280 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8281 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8282 pa_strerror( pa_error ) << ".";
8283 errorText_ = errorStream_.str();
8284 error( RtAudioError::WARNING );
8285 }
8286 if ( stream_.doConvertBuffer[INPUT] ) {
8287 convertBuffer( stream_.userBuffer[INPUT],
8288 stream_.deviceBuffer,
8289 stream_.convertInfo[INPUT] );
8290 }
8291 }
8292
8293 unlock:
8294 MUTEX_UNLOCK( &stream_.mutex );
8295 RtApi::tickStreamTime();
8296
8297 if ( doStopStream == 1 )
8298 stopStream();
8299 }
8300
startStream(void)8301 void RtApiPulse::startStream( void )
8302 {
8303 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8304
8305 if ( stream_.state == STREAM_CLOSED ) {
8306 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8307 error( RtAudioError::INVALID_USE );
8308 return;
8309 }
8310 if ( stream_.state == STREAM_RUNNING ) {
8311 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8312 error( RtAudioError::WARNING );
8313 return;
8314 }
8315
8316 MUTEX_LOCK( &stream_.mutex );
8317
8318 stream_.state = STREAM_RUNNING;
8319
8320 pah->runnable = true;
8321 pthread_cond_signal( &pah->runnable_cv );
8322 MUTEX_UNLOCK( &stream_.mutex );
8323 }
8324
stopStream(void)8325 void RtApiPulse::stopStream( void )
8326 {
8327 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8328
8329 if ( stream_.state == STREAM_CLOSED ) {
8330 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8331 error( RtAudioError::INVALID_USE );
8332 return;
8333 }
8334 if ( stream_.state == STREAM_STOPPED ) {
8335 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8336 error( RtAudioError::WARNING );
8337 return;
8338 }
8339
8340 stream_.state = STREAM_STOPPED;
8341 MUTEX_LOCK( &stream_.mutex );
8342
8343 if ( pah && pah->s_play ) {
8344 int pa_error;
8345 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8346 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8347 pa_strerror( pa_error ) << ".";
8348 errorText_ = errorStream_.str();
8349 MUTEX_UNLOCK( &stream_.mutex );
8350 error( RtAudioError::SYSTEM_ERROR );
8351 return;
8352 }
8353 }
8354
8355 stream_.state = STREAM_STOPPED;
8356 MUTEX_UNLOCK( &stream_.mutex );
8357 }
8358
abortStream(void)8359 void RtApiPulse::abortStream( void )
8360 {
8361 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8362
8363 if ( stream_.state == STREAM_CLOSED ) {
8364 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8365 error( RtAudioError::INVALID_USE );
8366 return;
8367 }
8368 if ( stream_.state == STREAM_STOPPED ) {
8369 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8370 error( RtAudioError::WARNING );
8371 return;
8372 }
8373
8374 stream_.state = STREAM_STOPPED;
8375 MUTEX_LOCK( &stream_.mutex );
8376
8377 if ( pah && pah->s_play ) {
8378 int pa_error;
8379 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8380 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8381 pa_strerror( pa_error ) << ".";
8382 errorText_ = errorStream_.str();
8383 MUTEX_UNLOCK( &stream_.mutex );
8384 error( RtAudioError::SYSTEM_ERROR );
8385 return;
8386 }
8387 }
8388
8389 stream_.state = STREAM_STOPPED;
8390 MUTEX_UNLOCK( &stream_.mutex );
8391 }
8392
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8393 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8394 unsigned int channels, unsigned int firstChannel,
8395 unsigned int sampleRate, RtAudioFormat format,
8396 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8397 {
8398 PulseAudioHandle *pah = 0;
8399 unsigned long bufferBytes = 0;
8400 pa_sample_spec ss;
8401
8402 if ( device != 0 ) return false;
8403 if ( mode != INPUT && mode != OUTPUT ) return false;
8404 if ( channels != 1 && channels != 2 ) {
8405 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8406 return false;
8407 }
8408 ss.channels = channels;
8409
8410 if ( firstChannel != 0 ) return false;
8411
8412 bool sr_found = false;
8413 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8414 if ( sampleRate == *sr ) {
8415 sr_found = true;
8416 stream_.sampleRate = sampleRate;
8417 ss.rate = sampleRate;
8418 break;
8419 }
8420 }
8421 if ( !sr_found ) {
8422 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8423 return false;
8424 }
8425
8426 bool sf_found = 0;
8427 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8428 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8429 if ( format == sf->rtaudio_format ) {
8430 sf_found = true;
8431 stream_.userFormat = sf->rtaudio_format;
8432 stream_.deviceFormat[mode] = stream_.userFormat;
8433 ss.format = sf->pa_format;
8434 break;
8435 }
8436 }
8437 if ( !sf_found ) { // Use internal data format conversion.
8438 stream_.userFormat = format;
8439 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8440 ss.format = PA_SAMPLE_FLOAT32LE;
8441 }
8442
8443 // Set other stream parameters.
8444 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8445 else stream_.userInterleaved = true;
8446 stream_.deviceInterleaved[mode] = true;
8447 stream_.nBuffers = 1;
8448 stream_.doByteSwap[mode] = false;
8449 stream_.nUserChannels[mode] = channels;
8450 stream_.nDeviceChannels[mode] = channels + firstChannel;
8451 stream_.channelOffset[mode] = 0;
8452 std::string streamName = "RtAudio";
8453
8454 // Set flags for buffer conversion.
8455 stream_.doConvertBuffer[mode] = false;
8456 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8457 stream_.doConvertBuffer[mode] = true;
8458 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8459 stream_.doConvertBuffer[mode] = true;
8460
8461 // Allocate necessary internal buffers.
8462 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8463 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8464 if ( stream_.userBuffer[mode] == NULL ) {
8465 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8466 goto error;
8467 }
8468 stream_.bufferSize = *bufferSize;
8469
8470 if ( stream_.doConvertBuffer[mode] ) {
8471
8472 bool makeBuffer = true;
8473 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8474 if ( mode == INPUT ) {
8475 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8476 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8477 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8478 }
8479 }
8480
8481 if ( makeBuffer ) {
8482 bufferBytes *= *bufferSize;
8483 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8484 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8485 if ( stream_.deviceBuffer == NULL ) {
8486 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8487 goto error;
8488 }
8489 }
8490 }
8491
8492 stream_.device[mode] = device;
8493
8494 // Setup the buffer conversion information structure.
8495 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8496
8497 if ( !stream_.apiHandle ) {
8498 PulseAudioHandle *pah = new PulseAudioHandle;
8499 if ( !pah ) {
8500 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8501 goto error;
8502 }
8503
8504 stream_.apiHandle = pah;
8505 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8506 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8507 goto error;
8508 }
8509 }
8510 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8511
8512 int error;
8513 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8514 switch ( mode ) {
8515 case INPUT:
8516 pa_buffer_attr buffer_attr;
8517 buffer_attr.fragsize = bufferBytes;
8518 buffer_attr.maxlength = -1;
8519
8520 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8521 if ( !pah->s_rec ) {
8522 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8523 goto error;
8524 }
8525 break;
8526 case OUTPUT:
8527 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8528 if ( !pah->s_play ) {
8529 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8530 goto error;
8531 }
8532 break;
8533 default:
8534 goto error;
8535 }
8536
8537 if ( stream_.mode == UNINITIALIZED )
8538 stream_.mode = mode;
8539 else if ( stream_.mode == mode )
8540 goto error;
8541 else
8542 stream_.mode = DUPLEX;
8543
8544 if ( !stream_.callbackInfo.isRunning ) {
8545 stream_.callbackInfo.object = this;
8546 stream_.callbackInfo.isRunning = true;
8547 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8548 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8549 goto error;
8550 }
8551 }
8552
8553 stream_.state = STREAM_STOPPED;
8554 return true;
8555
8556 error:
8557 if ( pah && stream_.callbackInfo.isRunning ) {
8558 pthread_cond_destroy( &pah->runnable_cv );
8559 delete pah;
8560 stream_.apiHandle = 0;
8561 }
8562
8563 for ( int i=0; i<2; i++ ) {
8564 if ( stream_.userBuffer[i] ) {
8565 free( stream_.userBuffer[i] );
8566 stream_.userBuffer[i] = 0;
8567 }
8568 }
8569
8570 if ( stream_.deviceBuffer ) {
8571 free( stream_.deviceBuffer );
8572 stream_.deviceBuffer = 0;
8573 }
8574
8575 return FAILURE;
8576 }
8577
8578 //******************** End of __LINUX_PULSE__ *********************//
8579 #endif
8580
8581 #if defined(__LINUX_OSS__)
8582
8583 #include <unistd.h>
8584 #include <sys/ioctl.h>
8585 #include <unistd.h>
8586 #include <fcntl.h>
8587 #include <sys/soundcard.h>
8588 #include <errno.h>
8589 #include <math.h>
8590
8591 static void *ossCallbackHandler(void * ptr);
8592
8593 // A structure to hold various information related to the OSS API
8594 // implementation.
8595 struct OssHandle {
8596 int id[2]; // device ids
8597 bool xrun[2];
8598 bool triggered;
8599 pthread_cond_t runnable;
8600
OssHandleOssHandle8601 OssHandle()
8602 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8603 };
8604
RtApiOss()8605 RtApiOss :: RtApiOss()
8606 {
8607 // Nothing to do here.
8608 }
8609
~RtApiOss()8610 RtApiOss :: ~RtApiOss()
8611 {
8612 if ( stream_.state != STREAM_CLOSED ) closeStream();
8613 }
8614
getDeviceCount(void)8615 unsigned int RtApiOss :: getDeviceCount( void )
8616 {
8617 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8618 if ( mixerfd == -1 ) {
8619 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8620 error( RtAudioError::WARNING );
8621 return 0;
8622 }
8623
8624 oss_sysinfo sysinfo;
8625 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8626 close( mixerfd );
8627 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8628 error( RtAudioError::WARNING );
8629 return 0;
8630 }
8631
8632 close( mixerfd );
8633 return sysinfo.numaudios;
8634 }
8635
getDeviceInfo(unsigned int device)8636 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8637 {
8638 RtAudio::DeviceInfo info;
8639 info.probed = false;
8640
8641 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8642 if ( mixerfd == -1 ) {
8643 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8644 error( RtAudioError::WARNING );
8645 return info;
8646 }
8647
8648 oss_sysinfo sysinfo;
8649 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8650 if ( result == -1 ) {
8651 close( mixerfd );
8652 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8653 error( RtAudioError::WARNING );
8654 return info;
8655 }
8656
8657 unsigned nDevices = sysinfo.numaudios;
8658 if ( nDevices == 0 ) {
8659 close( mixerfd );
8660 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8661 error( RtAudioError::INVALID_USE );
8662 return info;
8663 }
8664
8665 if ( device >= nDevices ) {
8666 close( mixerfd );
8667 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8668 error( RtAudioError::INVALID_USE );
8669 return info;
8670 }
8671
8672 oss_audioinfo ainfo;
8673 ainfo.dev = device;
8674 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8675 close( mixerfd );
8676 if ( result == -1 ) {
8677 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8678 errorText_ = errorStream_.str();
8679 error( RtAudioError::WARNING );
8680 return info;
8681 }
8682
8683 // Probe channels
8684 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8685 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8686 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8687 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8688 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8689 }
8690
8691 // Probe data formats ... do for input
8692 unsigned long mask = ainfo.iformats;
8693 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8694 info.nativeFormats |= RTAUDIO_SINT16;
8695 if ( mask & AFMT_S8 )
8696 info.nativeFormats |= RTAUDIO_SINT8;
8697 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8698 info.nativeFormats |= RTAUDIO_SINT32;
8699 #ifdef AFMT_FLOAT
8700 if ( mask & AFMT_FLOAT )
8701 info.nativeFormats |= RTAUDIO_FLOAT32;
8702 #endif
8703 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8704 info.nativeFormats |= RTAUDIO_SINT24;
8705
8706 // Check that we have at least one supported format
8707 if ( info.nativeFormats == 0 ) {
8708 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8709 errorText_ = errorStream_.str();
8710 error( RtAudioError::WARNING );
8711 return info;
8712 }
8713
8714 // Probe the supported sample rates.
8715 info.sampleRates.clear();
8716 if ( ainfo.nrates ) {
8717 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8718 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8719 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8720 info.sampleRates.push_back( SAMPLE_RATES[k] );
8721
8722 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8723 info.preferredSampleRate = SAMPLE_RATES[k];
8724
8725 break;
8726 }
8727 }
8728 }
8729 }
8730 else {
8731 // Check min and max rate values;
8732 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8733 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8734 info.sampleRates.push_back( SAMPLE_RATES[k] );
8735
8736 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8737 info.preferredSampleRate = SAMPLE_RATES[k];
8738 }
8739 }
8740 }
8741
8742 if ( info.sampleRates.size() == 0 ) {
8743 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8744 errorText_ = errorStream_.str();
8745 error( RtAudioError::WARNING );
8746 }
8747 else {
8748 info.probed = true;
8749 info.name = ainfo.name;
8750 }
8751
8752 return info;
8753 }
8754
8755
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8756 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8757 unsigned int firstChannel, unsigned int sampleRate,
8758 RtAudioFormat format, unsigned int *bufferSize,
8759 RtAudio::StreamOptions *options )
8760 {
8761 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8762 if ( mixerfd == -1 ) {
8763 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8764 return FAILURE;
8765 }
8766
8767 oss_sysinfo sysinfo;
8768 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8769 if ( result == -1 ) {
8770 close( mixerfd );
8771 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8772 return FAILURE;
8773 }
8774
8775 unsigned nDevices = sysinfo.numaudios;
8776 if ( nDevices == 0 ) {
8777 // This should not happen because a check is made before this function is called.
8778 close( mixerfd );
8779 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8780 return FAILURE;
8781 }
8782
8783 if ( device >= nDevices ) {
8784 // This should not happen because a check is made before this function is called.
8785 close( mixerfd );
8786 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8787 return FAILURE;
8788 }
8789
8790 oss_audioinfo ainfo;
8791 ainfo.dev = device;
8792 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8793 close( mixerfd );
8794 if ( result == -1 ) {
8795 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8796 errorText_ = errorStream_.str();
8797 return FAILURE;
8798 }
8799
8800 // Check if device supports input or output
8801 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8802 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8803 if ( mode == OUTPUT )
8804 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8805 else
8806 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8807 errorText_ = errorStream_.str();
8808 return FAILURE;
8809 }
8810
8811 int flags = 0;
8812 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8813 if ( mode == OUTPUT )
8814 flags |= O_WRONLY;
8815 else { // mode == INPUT
8816 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8817 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8818 close( handle->id[0] );
8819 handle->id[0] = 0;
8820 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8821 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8822 errorText_ = errorStream_.str();
8823 return FAILURE;
8824 }
8825 // Check that the number previously set channels is the same.
8826 if ( stream_.nUserChannels[0] != channels ) {
8827 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8828 errorText_ = errorStream_.str();
8829 return FAILURE;
8830 }
8831 flags |= O_RDWR;
8832 }
8833 else
8834 flags |= O_RDONLY;
8835 }
8836
8837 // Set exclusive access if specified.
8838 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8839
8840 // Try to open the device.
8841 int fd;
8842 fd = open( ainfo.devnode, flags, 0 );
8843 if ( fd == -1 ) {
8844 if ( errno == EBUSY )
8845 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8846 else
8847 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8848 errorText_ = errorStream_.str();
8849 return FAILURE;
8850 }
8851
8852 // For duplex operation, specifically set this mode (this doesn't seem to work).
8853 /*
8854 if ( flags | O_RDWR ) {
8855 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8856 if ( result == -1) {
8857 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8858 errorText_ = errorStream_.str();
8859 return FAILURE;
8860 }
8861 }
8862 */
8863
8864 // Check the device channel support.
8865 stream_.nUserChannels[mode] = channels;
8866 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8867 close( fd );
8868 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8869 errorText_ = errorStream_.str();
8870 return FAILURE;
8871 }
8872
8873 // Set the number of channels.
8874 int deviceChannels = channels + firstChannel;
8875 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8876 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8877 close( fd );
8878 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8879 errorText_ = errorStream_.str();
8880 return FAILURE;
8881 }
8882 stream_.nDeviceChannels[mode] = deviceChannels;
8883
8884 // Get the data format mask
8885 int mask;
8886 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8887 if ( result == -1 ) {
8888 close( fd );
8889 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8890 errorText_ = errorStream_.str();
8891 return FAILURE;
8892 }
8893
8894 // Determine how to set the device format.
8895 stream_.userFormat = format;
8896 int deviceFormat = -1;
8897 stream_.doByteSwap[mode] = false;
8898 if ( format == RTAUDIO_SINT8 ) {
8899 if ( mask & AFMT_S8 ) {
8900 deviceFormat = AFMT_S8;
8901 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8902 }
8903 }
8904 else if ( format == RTAUDIO_SINT16 ) {
8905 if ( mask & AFMT_S16_NE ) {
8906 deviceFormat = AFMT_S16_NE;
8907 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8908 }
8909 else if ( mask & AFMT_S16_OE ) {
8910 deviceFormat = AFMT_S16_OE;
8911 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8912 stream_.doByteSwap[mode] = true;
8913 }
8914 }
8915 else if ( format == RTAUDIO_SINT24 ) {
8916 if ( mask & AFMT_S24_NE ) {
8917 deviceFormat = AFMT_S24_NE;
8918 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8919 }
8920 else if ( mask & AFMT_S24_OE ) {
8921 deviceFormat = AFMT_S24_OE;
8922 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8923 stream_.doByteSwap[mode] = true;
8924 }
8925 }
8926 else if ( format == RTAUDIO_SINT32 ) {
8927 if ( mask & AFMT_S32_NE ) {
8928 deviceFormat = AFMT_S32_NE;
8929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8930 }
8931 else if ( mask & AFMT_S32_OE ) {
8932 deviceFormat = AFMT_S32_OE;
8933 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8934 stream_.doByteSwap[mode] = true;
8935 }
8936 }
8937
8938 if ( deviceFormat == -1 ) {
8939 // The user requested format is not natively supported by the device.
8940 if ( mask & AFMT_S16_NE ) {
8941 deviceFormat = AFMT_S16_NE;
8942 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8943 }
8944 else if ( mask & AFMT_S32_NE ) {
8945 deviceFormat = AFMT_S32_NE;
8946 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8947 }
8948 else if ( mask & AFMT_S24_NE ) {
8949 deviceFormat = AFMT_S24_NE;
8950 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8951 }
8952 else if ( mask & AFMT_S16_OE ) {
8953 deviceFormat = AFMT_S16_OE;
8954 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8955 stream_.doByteSwap[mode] = true;
8956 }
8957 else if ( mask & AFMT_S32_OE ) {
8958 deviceFormat = AFMT_S32_OE;
8959 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8960 stream_.doByteSwap[mode] = true;
8961 }
8962 else if ( mask & AFMT_S24_OE ) {
8963 deviceFormat = AFMT_S24_OE;
8964 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8965 stream_.doByteSwap[mode] = true;
8966 }
8967 else if ( mask & AFMT_S8) {
8968 deviceFormat = AFMT_S8;
8969 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8970 }
8971 }
8972
8973 if ( stream_.deviceFormat[mode] == 0 ) {
8974 // This really shouldn't happen ...
8975 close( fd );
8976 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8977 errorText_ = errorStream_.str();
8978 return FAILURE;
8979 }
8980
8981 // Set the data format.
8982 int temp = deviceFormat;
8983 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8984 if ( result == -1 || deviceFormat != temp ) {
8985 close( fd );
8986 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8987 errorText_ = errorStream_.str();
8988 return FAILURE;
8989 }
8990
8991 // Attempt to set the buffer size. According to OSS, the minimum
8992 // number of buffers is two. The supposed minimum buffer size is 16
8993 // bytes, so that will be our lower bound. The argument to this
8994 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8995 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8996 // We'll check the actual value used near the end of the setup
8997 // procedure.
8998 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8999 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9000 int buffers = 0;
9001 if ( options ) buffers = options->numberOfBuffers;
9002 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9003 if ( buffers < 2 ) buffers = 3;
9004 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9005 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9006 if ( result == -1 ) {
9007 close( fd );
9008 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9009 errorText_ = errorStream_.str();
9010 return FAILURE;
9011 }
9012 stream_.nBuffers = buffers;
9013
9014 // Save buffer size (in sample frames).
9015 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9016 stream_.bufferSize = *bufferSize;
9017
9018 // Set the sample rate.
9019 int srate = sampleRate;
9020 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9021 if ( result == -1 ) {
9022 close( fd );
9023 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9024 errorText_ = errorStream_.str();
9025 return FAILURE;
9026 }
9027
9028 // Verify the sample rate setup worked.
9029 if ( abs( long( srate - sampleRate ) ) > 100 ) {
9030 close( fd );
9031 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9032 errorText_ = errorStream_.str();
9033 return FAILURE;
9034 }
9035 stream_.sampleRate = sampleRate;
9036
9037 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9038 // We're doing duplex setup here.
9039 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9040 stream_.nDeviceChannels[0] = deviceChannels;
9041 }
9042
9043 // Set interleaving parameters.
9044 stream_.userInterleaved = true;
9045 stream_.deviceInterleaved[mode] = true;
9046 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9047 stream_.userInterleaved = false;
9048
9049 // Set flags for buffer conversion
9050 stream_.doConvertBuffer[mode] = false;
9051 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9052 stream_.doConvertBuffer[mode] = true;
9053 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9054 stream_.doConvertBuffer[mode] = true;
9055 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9056 stream_.nUserChannels[mode] > 1 )
9057 stream_.doConvertBuffer[mode] = true;
9058
9059 // Allocate the stream handles if necessary and then save.
9060 if ( stream_.apiHandle == 0 ) {
9061 try {
9062 handle = new OssHandle;
9063 }
9064 catch ( std::bad_alloc& ) {
9065 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9066 goto error;
9067 }
9068
9069 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9070 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9071 goto error;
9072 }
9073
9074 stream_.apiHandle = (void *) handle;
9075 }
9076 else {
9077 handle = (OssHandle *) stream_.apiHandle;
9078 }
9079 handle->id[mode] = fd;
9080
9081 // Allocate necessary internal buffers.
9082 unsigned long bufferBytes;
9083 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9084 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9085 if ( stream_.userBuffer[mode] == NULL ) {
9086 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9087 goto error;
9088 }
9089
9090 if ( stream_.doConvertBuffer[mode] ) {
9091
9092 bool makeBuffer = true;
9093 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9094 if ( mode == INPUT ) {
9095 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9096 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9097 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9098 }
9099 }
9100
9101 if ( makeBuffer ) {
9102 bufferBytes *= *bufferSize;
9103 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9104 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9105 if ( stream_.deviceBuffer == NULL ) {
9106 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9107 goto error;
9108 }
9109 }
9110 }
9111
9112 stream_.device[mode] = device;
9113 stream_.state = STREAM_STOPPED;
9114
9115 // Setup the buffer conversion information structure.
9116 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9117
9118 // Setup thread if necessary.
9119 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9120 // We had already set up an output stream.
9121 stream_.mode = DUPLEX;
9122 if ( stream_.device[0] == device ) handle->id[0] = fd;
9123 }
9124 else {
9125 stream_.mode = mode;
9126
9127 // Setup callback thread.
9128 stream_.callbackInfo.object = (void *) this;
9129
9130 // Set the thread attributes for joinable and realtime scheduling
9131 // priority. The higher priority will only take affect if the
9132 // program is run as root or suid.
9133 pthread_attr_t attr;
9134 pthread_attr_init( &attr );
9135 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9136 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9137 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9138 struct sched_param param;
9139 int priority = options->priority;
9140 int min = sched_get_priority_min( SCHED_RR );
9141 int max = sched_get_priority_max( SCHED_RR );
9142 if ( priority < min ) priority = min;
9143 else if ( priority > max ) priority = max;
9144 param.sched_priority = priority;
9145 pthread_attr_setschedparam( &attr, ¶m );
9146 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9147 }
9148 else
9149 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9150 #else
9151 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9152 #endif
9153
9154 stream_.callbackInfo.isRunning = true;
9155 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9156 pthread_attr_destroy( &attr );
9157 if ( result ) {
9158 stream_.callbackInfo.isRunning = false;
9159 errorText_ = "RtApiOss::error creating callback thread!";
9160 goto error;
9161 }
9162 }
9163
9164 return SUCCESS;
9165
9166 error:
9167 if ( handle ) {
9168 pthread_cond_destroy( &handle->runnable );
9169 if ( handle->id[0] ) close( handle->id[0] );
9170 if ( handle->id[1] ) close( handle->id[1] );
9171 delete handle;
9172 stream_.apiHandle = 0;
9173 }
9174
9175 for ( int i=0; i<2; i++ ) {
9176 if ( stream_.userBuffer[i] ) {
9177 free( stream_.userBuffer[i] );
9178 stream_.userBuffer[i] = 0;
9179 }
9180 }
9181
9182 if ( stream_.deviceBuffer ) {
9183 free( stream_.deviceBuffer );
9184 stream_.deviceBuffer = 0;
9185 }
9186
9187 return FAILURE;
9188 }
9189
closeStream()9190 void RtApiOss :: closeStream()
9191 {
9192 if ( stream_.state == STREAM_CLOSED ) {
9193 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9194 error( RtAudioError::WARNING );
9195 return;
9196 }
9197
9198 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9199 stream_.callbackInfo.isRunning = false;
9200 MUTEX_LOCK( &stream_.mutex );
9201 if ( stream_.state == STREAM_STOPPED )
9202 pthread_cond_signal( &handle->runnable );
9203 MUTEX_UNLOCK( &stream_.mutex );
9204 pthread_join( stream_.callbackInfo.thread, NULL );
9205
9206 if ( stream_.state == STREAM_RUNNING ) {
9207 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9208 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9209 else
9210 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9211 stream_.state = STREAM_STOPPED;
9212 }
9213
9214 if ( handle ) {
9215 pthread_cond_destroy( &handle->runnable );
9216 if ( handle->id[0] ) close( handle->id[0] );
9217 if ( handle->id[1] ) close( handle->id[1] );
9218 delete handle;
9219 stream_.apiHandle = 0;
9220 }
9221
9222 for ( int i=0; i<2; i++ ) {
9223 if ( stream_.userBuffer[i] ) {
9224 free( stream_.userBuffer[i] );
9225 stream_.userBuffer[i] = 0;
9226 }
9227 }
9228
9229 if ( stream_.deviceBuffer ) {
9230 free( stream_.deviceBuffer );
9231 stream_.deviceBuffer = 0;
9232 }
9233
9234 stream_.mode = UNINITIALIZED;
9235 stream_.state = STREAM_CLOSED;
9236 }
9237
startStream()9238 void RtApiOss :: startStream()
9239 {
9240 verifyStream();
9241 if ( stream_.state == STREAM_RUNNING ) {
9242 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9243 error( RtAudioError::WARNING );
9244 return;
9245 }
9246
9247 MUTEX_LOCK( &stream_.mutex );
9248
9249 stream_.state = STREAM_RUNNING;
9250
9251 // No need to do anything else here ... OSS automatically starts
9252 // when fed samples.
9253
9254 MUTEX_UNLOCK( &stream_.mutex );
9255
9256 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9257 pthread_cond_signal( &handle->runnable );
9258 }
9259
stopStream()9260 void RtApiOss :: stopStream()
9261 {
9262 verifyStream();
9263 if ( stream_.state == STREAM_STOPPED ) {
9264 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9265 error( RtAudioError::WARNING );
9266 return;
9267 }
9268
9269 MUTEX_LOCK( &stream_.mutex );
9270
9271 // The state might change while waiting on a mutex.
9272 if ( stream_.state == STREAM_STOPPED ) {
9273 MUTEX_UNLOCK( &stream_.mutex );
9274 return;
9275 }
9276
9277 int result = 0;
9278 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9279 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9280
9281 // Flush the output with zeros a few times.
9282 char *buffer;
9283 int samples;
9284 RtAudioFormat format;
9285
9286 if ( stream_.doConvertBuffer[0] ) {
9287 buffer = stream_.deviceBuffer;
9288 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9289 format = stream_.deviceFormat[0];
9290 }
9291 else {
9292 buffer = stream_.userBuffer[0];
9293 samples = stream_.bufferSize * stream_.nUserChannels[0];
9294 format = stream_.userFormat;
9295 }
9296
9297 memset( buffer, 0, samples * formatBytes(format) );
9298 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9299 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9300 if ( result == -1 ) {
9301 errorText_ = "RtApiOss::stopStream: audio write error.";
9302 error( RtAudioError::WARNING );
9303 }
9304 }
9305
9306 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9307 if ( result == -1 ) {
9308 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9309 errorText_ = errorStream_.str();
9310 goto unlock;
9311 }
9312 handle->triggered = false;
9313 }
9314
9315 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9316 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9317 if ( result == -1 ) {
9318 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9319 errorText_ = errorStream_.str();
9320 goto unlock;
9321 }
9322 }
9323
9324 unlock:
9325 stream_.state = STREAM_STOPPED;
9326 MUTEX_UNLOCK( &stream_.mutex );
9327
9328 if ( result != -1 ) return;
9329 error( RtAudioError::SYSTEM_ERROR );
9330 }
9331
abortStream()9332 void RtApiOss :: abortStream()
9333 {
9334 verifyStream();
9335 if ( stream_.state == STREAM_STOPPED ) {
9336 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9337 error( RtAudioError::WARNING );
9338 return;
9339 }
9340
9341 MUTEX_LOCK( &stream_.mutex );
9342
9343 // The state might change while waiting on a mutex.
9344 if ( stream_.state == STREAM_STOPPED ) {
9345 MUTEX_UNLOCK( &stream_.mutex );
9346 return;
9347 }
9348
9349 int result = 0;
9350 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9351 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9352 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9353 if ( result == -1 ) {
9354 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9355 errorText_ = errorStream_.str();
9356 goto unlock;
9357 }
9358 handle->triggered = false;
9359 }
9360
9361 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9362 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9363 if ( result == -1 ) {
9364 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9365 errorText_ = errorStream_.str();
9366 goto unlock;
9367 }
9368 }
9369
9370 unlock:
9371 stream_.state = STREAM_STOPPED;
9372 MUTEX_UNLOCK( &stream_.mutex );
9373
9374 if ( result != -1 ) return;
9375 error( RtAudioError::SYSTEM_ERROR );
9376 }
9377
callbackEvent()9378 void RtApiOss :: callbackEvent()
9379 {
9380 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9381 if ( stream_.state == STREAM_STOPPED ) {
9382 MUTEX_LOCK( &stream_.mutex );
9383 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9384 if ( stream_.state != STREAM_RUNNING ) {
9385 MUTEX_UNLOCK( &stream_.mutex );
9386 return;
9387 }
9388 MUTEX_UNLOCK( &stream_.mutex );
9389 }
9390
9391 if ( stream_.state == STREAM_CLOSED ) {
9392 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9393 error( RtAudioError::WARNING );
9394 return;
9395 }
9396
9397 // Invoke user callback to get fresh output data.
9398 int doStopStream = 0;
9399 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9400 double streamTime = getStreamTime();
9401 RtAudioStreamStatus status = 0;
9402 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9403 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9404 handle->xrun[0] = false;
9405 }
9406 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9407 status |= RTAUDIO_INPUT_OVERFLOW;
9408 handle->xrun[1] = false;
9409 }
9410 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9411 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9412 if ( doStopStream == 2 ) {
9413 this->abortStream();
9414 return;
9415 }
9416
9417 MUTEX_LOCK( &stream_.mutex );
9418
9419 // The state might change while waiting on a mutex.
9420 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9421
9422 int result;
9423 char *buffer;
9424 int samples;
9425 RtAudioFormat format;
9426
9427 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9428
9429 // Setup parameters and do buffer conversion if necessary.
9430 if ( stream_.doConvertBuffer[0] ) {
9431 buffer = stream_.deviceBuffer;
9432 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9433 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9434 format = stream_.deviceFormat[0];
9435 }
9436 else {
9437 buffer = stream_.userBuffer[0];
9438 samples = stream_.bufferSize * stream_.nUserChannels[0];
9439 format = stream_.userFormat;
9440 }
9441
9442 // Do byte swapping if necessary.
9443 if ( stream_.doByteSwap[0] )
9444 byteSwapBuffer( buffer, samples, format );
9445
9446 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9447 int trig = 0;
9448 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9449 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9450 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9451 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9452 handle->triggered = true;
9453 }
9454 else
9455 // Write samples to device.
9456 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9457
9458 if ( result == -1 ) {
9459 // We'll assume this is an underrun, though there isn't a
9460 // specific means for determining that.
9461 handle->xrun[0] = true;
9462 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9463 error( RtAudioError::WARNING );
9464 // Continue on to input section.
9465 }
9466 }
9467
9468 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9469
9470 // Setup parameters.
9471 if ( stream_.doConvertBuffer[1] ) {
9472 buffer = stream_.deviceBuffer;
9473 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9474 format = stream_.deviceFormat[1];
9475 }
9476 else {
9477 buffer = stream_.userBuffer[1];
9478 samples = stream_.bufferSize * stream_.nUserChannels[1];
9479 format = stream_.userFormat;
9480 }
9481
9482 // Read samples from device.
9483 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9484
9485 if ( result == -1 ) {
9486 // We'll assume this is an overrun, though there isn't a
9487 // specific means for determining that.
9488 handle->xrun[1] = true;
9489 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9490 error( RtAudioError::WARNING );
9491 goto unlock;
9492 }
9493
9494 // Do byte swapping if necessary.
9495 if ( stream_.doByteSwap[1] )
9496 byteSwapBuffer( buffer, samples, format );
9497
9498 // Do buffer conversion if necessary.
9499 if ( stream_.doConvertBuffer[1] )
9500 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9501 }
9502
9503 unlock:
9504 MUTEX_UNLOCK( &stream_.mutex );
9505
9506 RtApi::tickStreamTime();
9507 if ( doStopStream == 1 ) this->stopStream();
9508 }
9509
ossCallbackHandler(void * ptr)9510 static void *ossCallbackHandler( void *ptr )
9511 {
9512 CallbackInfo *info = (CallbackInfo *) ptr;
9513 RtApiOss *object = (RtApiOss *) info->object;
9514 bool *isRunning = &info->isRunning;
9515
9516 while ( *isRunning == true ) {
9517 pthread_testcancel();
9518 object->callbackEvent();
9519 }
9520
9521 pthread_exit( NULL );
9522 }
9523
9524 //******************** End of __LINUX_OSS__ *********************//
9525 #endif
9526
9527
9528 // *************************************************** //
9529 //
9530 // Protected common (OS-independent) RtAudio methods.
9531 //
9532 // *************************************************** //
9533
9534 // This method can be modified to control the behavior of error
9535 // message printing.
error(RtAudioError::Type type)9536 void RtApi :: error( RtAudioError::Type type )
9537 {
9538 errorStream_.str(""); // clear the ostringstream
9539
9540 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9541 if ( errorCallback ) {
9542 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9543
9544 if ( firstErrorOccurred_ )
9545 return;
9546
9547 firstErrorOccurred_ = true;
9548 const std::string errorMessage = errorText_;
9549
9550 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9551 stream_.callbackInfo.isRunning = false; // exit from the thread
9552 abortStream();
9553 }
9554
9555 errorCallback( type, errorMessage );
9556 firstErrorOccurred_ = false;
9557 return;
9558 }
9559
9560 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9561 std::cerr << '\n' << errorText_ << "\n\n";
9562 else if ( type != RtAudioError::WARNING )
9563 throw( RtAudioError( errorText_, type ) );
9564 }
9565
verifyStream()9566 void RtApi :: verifyStream()
9567 {
9568 if ( stream_.state == STREAM_CLOSED ) {
9569 errorText_ = "RtApi:: a stream is not open!";
9570 error( RtAudioError::INVALID_USE );
9571 }
9572 }
9573
clearStreamInfo()9574 void RtApi :: clearStreamInfo()
9575 {
9576 stream_.mode = UNINITIALIZED;
9577 stream_.state = STREAM_CLOSED;
9578 stream_.sampleRate = 0;
9579 stream_.bufferSize = 0;
9580 stream_.nBuffers = 0;
9581 stream_.userFormat = 0;
9582 stream_.userInterleaved = true;
9583 stream_.streamTime = 0.0;
9584 stream_.apiHandle = 0;
9585 stream_.deviceBuffer = 0;
9586 stream_.callbackInfo.callback = 0;
9587 stream_.callbackInfo.userData = 0;
9588 stream_.callbackInfo.isRunning = false;
9589 stream_.callbackInfo.errorCallback = 0;
9590 for ( int i=0; i<2; i++ ) {
9591 stream_.device[i] = 11111;
9592 stream_.doConvertBuffer[i] = false;
9593 stream_.deviceInterleaved[i] = true;
9594 stream_.doByteSwap[i] = false;
9595 stream_.nUserChannels[i] = 0;
9596 stream_.nDeviceChannels[i] = 0;
9597 stream_.channelOffset[i] = 0;
9598 stream_.deviceFormat[i] = 0;
9599 stream_.latency[i] = 0;
9600 stream_.userBuffer[i] = 0;
9601 stream_.convertInfo[i].channels = 0;
9602 stream_.convertInfo[i].inJump = 0;
9603 stream_.convertInfo[i].outJump = 0;
9604 stream_.convertInfo[i].inFormat = 0;
9605 stream_.convertInfo[i].outFormat = 0;
9606 stream_.convertInfo[i].inOffset.clear();
9607 stream_.convertInfo[i].outOffset.clear();
9608 }
9609 }
9610
formatBytes(RtAudioFormat format)9611 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9612 {
9613 if ( format == RTAUDIO_SINT16 )
9614 return 2;
9615 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9616 return 4;
9617 else if ( format == RTAUDIO_FLOAT64 )
9618 return 8;
9619 else if ( format == RTAUDIO_SINT24 )
9620 return 3;
9621 else if ( format == RTAUDIO_SINT8 )
9622 return 1;
9623
9624 errorText_ = "RtApi::formatBytes: undefined format.";
9625 error( RtAudioError::WARNING );
9626
9627 return 0;
9628 }
9629
setConvertInfo(StreamMode mode,unsigned int firstChannel)9630 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9631 {
9632 if ( mode == INPUT ) { // convert device to user buffer
9633 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9634 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9635 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9636 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9637 }
9638 else { // convert user to device buffer
9639 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9640 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9641 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9642 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9643 }
9644
9645 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9646 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9647 else
9648 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9649
9650 // Set up the interleave/deinterleave offsets.
9651 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9652 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9653 ( mode == INPUT && stream_.userInterleaved ) ) {
9654 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9655 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9656 stream_.convertInfo[mode].outOffset.push_back( k );
9657 stream_.convertInfo[mode].inJump = 1;
9658 }
9659 }
9660 else {
9661 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9662 stream_.convertInfo[mode].inOffset.push_back( k );
9663 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9664 stream_.convertInfo[mode].outJump = 1;
9665 }
9666 }
9667 }
9668 else { // no (de)interleaving
9669 if ( stream_.userInterleaved ) {
9670 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9671 stream_.convertInfo[mode].inOffset.push_back( k );
9672 stream_.convertInfo[mode].outOffset.push_back( k );
9673 }
9674 }
9675 else {
9676 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9677 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9678 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9679 stream_.convertInfo[mode].inJump = 1;
9680 stream_.convertInfo[mode].outJump = 1;
9681 }
9682 }
9683 }
9684
9685 // Add channel offset.
9686 if ( firstChannel > 0 ) {
9687 if ( stream_.deviceInterleaved[mode] ) {
9688 if ( mode == OUTPUT ) {
9689 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9690 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9691 }
9692 else {
9693 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9694 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9695 }
9696 }
9697 else {
9698 if ( mode == OUTPUT ) {
9699 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9700 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9701 }
9702 else {
9703 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9704 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9705 }
9706 }
9707 }
9708 }
9709
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)9710 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9711 {
9712 // This function does format conversion, input/output channel compensation, and
9713 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9714 // the lower three bytes of a 32-bit integer.
9715
9716 // Clear our device buffer when in/out duplex device channels are different
9717 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9718 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9719 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9720
9721 int j;
9722 if (info.outFormat == RTAUDIO_FLOAT64) {
9723 Float64 scale;
9724 Float64 *out = (Float64 *)outBuffer;
9725
9726 if (info.inFormat == RTAUDIO_SINT8) {
9727 signed char *in = (signed char *)inBuffer;
9728 scale = 1.0 / 127.5;
9729 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9730 for (j=0; j<info.channels; j++) {
9731 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9732 out[info.outOffset[j]] += 0.5;
9733 out[info.outOffset[j]] *= scale;
9734 }
9735 in += info.inJump;
9736 out += info.outJump;
9737 }
9738 }
9739 else if (info.inFormat == RTAUDIO_SINT16) {
9740 Int16 *in = (Int16 *)inBuffer;
9741 scale = 1.0 / 32767.5;
9742 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9743 for (j=0; j<info.channels; j++) {
9744 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9745 out[info.outOffset[j]] += 0.5;
9746 out[info.outOffset[j]] *= scale;
9747 }
9748 in += info.inJump;
9749 out += info.outJump;
9750 }
9751 }
9752 else if (info.inFormat == RTAUDIO_SINT24) {
9753 Int24 *in = (Int24 *)inBuffer;
9754 scale = 1.0 / 8388607.5;
9755 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9756 for (j=0; j<info.channels; j++) {
9757 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9758 out[info.outOffset[j]] += 0.5;
9759 out[info.outOffset[j]] *= scale;
9760 }
9761 in += info.inJump;
9762 out += info.outJump;
9763 }
9764 }
9765 else if (info.inFormat == RTAUDIO_SINT32) {
9766 Int32 *in = (Int32 *)inBuffer;
9767 scale = 1.0 / 2147483647.5;
9768 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9769 for (j=0; j<info.channels; j++) {
9770 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9771 out[info.outOffset[j]] += 0.5;
9772 out[info.outOffset[j]] *= scale;
9773 }
9774 in += info.inJump;
9775 out += info.outJump;
9776 }
9777 }
9778 else if (info.inFormat == RTAUDIO_FLOAT32) {
9779 Float32 *in = (Float32 *)inBuffer;
9780 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9781 for (j=0; j<info.channels; j++) {
9782 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9783 }
9784 in += info.inJump;
9785 out += info.outJump;
9786 }
9787 }
9788 else if (info.inFormat == RTAUDIO_FLOAT64) {
9789 // Channel compensation and/or (de)interleaving only.
9790 Float64 *in = (Float64 *)inBuffer;
9791 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9792 for (j=0; j<info.channels; j++) {
9793 out[info.outOffset[j]] = in[info.inOffset[j]];
9794 }
9795 in += info.inJump;
9796 out += info.outJump;
9797 }
9798 }
9799 }
9800 else if (info.outFormat == RTAUDIO_FLOAT32) {
9801 Float32 scale;
9802 Float32 *out = (Float32 *)outBuffer;
9803
9804 if (info.inFormat == RTAUDIO_SINT8) {
9805 signed char *in = (signed char *)inBuffer;
9806 scale = (Float32) ( 1.0 / 127.5 );
9807 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9808 for (j=0; j<info.channels; j++) {
9809 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9810 out[info.outOffset[j]] += 0.5;
9811 out[info.outOffset[j]] *= scale;
9812 }
9813 in += info.inJump;
9814 out += info.outJump;
9815 }
9816 }
9817 else if (info.inFormat == RTAUDIO_SINT16) {
9818 Int16 *in = (Int16 *)inBuffer;
9819 scale = (Float32) ( 1.0 / 32767.5 );
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9821 for (j=0; j<info.channels; j++) {
9822 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9823 out[info.outOffset[j]] += 0.5;
9824 out[info.outOffset[j]] *= scale;
9825 }
9826 in += info.inJump;
9827 out += info.outJump;
9828 }
9829 }
9830 else if (info.inFormat == RTAUDIO_SINT24) {
9831 Int24 *in = (Int24 *)inBuffer;
9832 scale = (Float32) ( 1.0 / 8388607.5 );
9833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9834 for (j=0; j<info.channels; j++) {
9835 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9836 out[info.outOffset[j]] += 0.5;
9837 out[info.outOffset[j]] *= scale;
9838 }
9839 in += info.inJump;
9840 out += info.outJump;
9841 }
9842 }
9843 else if (info.inFormat == RTAUDIO_SINT32) {
9844 Int32 *in = (Int32 *)inBuffer;
9845 scale = (Float32) ( 1.0 / 2147483647.5 );
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9847 for (j=0; j<info.channels; j++) {
9848 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9849 out[info.outOffset[j]] += 0.5;
9850 out[info.outOffset[j]] *= scale;
9851 }
9852 in += info.inJump;
9853 out += info.outJump;
9854 }
9855 }
9856 else if (info.inFormat == RTAUDIO_FLOAT32) {
9857 // Channel compensation and/or (de)interleaving only.
9858 Float32 *in = (Float32 *)inBuffer;
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9860 for (j=0; j<info.channels; j++) {
9861 out[info.outOffset[j]] = in[info.inOffset[j]];
9862 }
9863 in += info.inJump;
9864 out += info.outJump;
9865 }
9866 }
9867 else if (info.inFormat == RTAUDIO_FLOAT64) {
9868 Float64 *in = (Float64 *)inBuffer;
9869 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9870 for (j=0; j<info.channels; j++) {
9871 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9872 }
9873 in += info.inJump;
9874 out += info.outJump;
9875 }
9876 }
9877 }
9878 else if (info.outFormat == RTAUDIO_SINT32) {
9879 Int32 *out = (Int32 *)outBuffer;
9880 if (info.inFormat == RTAUDIO_SINT8) {
9881 signed char *in = (signed char *)inBuffer;
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9883 for (j=0; j<info.channels; j++) {
9884 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9885 out[info.outOffset[j]] <<= 24;
9886 }
9887 in += info.inJump;
9888 out += info.outJump;
9889 }
9890 }
9891 else if (info.inFormat == RTAUDIO_SINT16) {
9892 Int16 *in = (Int16 *)inBuffer;
9893 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9894 for (j=0; j<info.channels; j++) {
9895 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9896 out[info.outOffset[j]] <<= 16;
9897 }
9898 in += info.inJump;
9899 out += info.outJump;
9900 }
9901 }
9902 else if (info.inFormat == RTAUDIO_SINT24) {
9903 Int24 *in = (Int24 *)inBuffer;
9904 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9905 for (j=0; j<info.channels; j++) {
9906 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9907 out[info.outOffset[j]] <<= 8;
9908 }
9909 in += info.inJump;
9910 out += info.outJump;
9911 }
9912 }
9913 else if (info.inFormat == RTAUDIO_SINT32) {
9914 // Channel compensation and/or (de)interleaving only.
9915 Int32 *in = (Int32 *)inBuffer;
9916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9917 for (j=0; j<info.channels; j++) {
9918 out[info.outOffset[j]] = in[info.inOffset[j]];
9919 }
9920 in += info.inJump;
9921 out += info.outJump;
9922 }
9923 }
9924 else if (info.inFormat == RTAUDIO_FLOAT32) {
9925 Float32 *in = (Float32 *)inBuffer;
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9927 for (j=0; j<info.channels; j++) {
9928 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9929 }
9930 in += info.inJump;
9931 out += info.outJump;
9932 }
9933 }
9934 else if (info.inFormat == RTAUDIO_FLOAT64) {
9935 Float64 *in = (Float64 *)inBuffer;
9936 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9937 for (j=0; j<info.channels; j++) {
9938 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9939 }
9940 in += info.inJump;
9941 out += info.outJump;
9942 }
9943 }
9944 }
9945 else if (info.outFormat == RTAUDIO_SINT24) {
9946 Int24 *out = (Int24 *)outBuffer;
9947 if (info.inFormat == RTAUDIO_SINT8) {
9948 signed char *in = (signed char *)inBuffer;
9949 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9950 for (j=0; j<info.channels; j++) {
9951 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9952 //out[info.outOffset[j]] <<= 16;
9953 }
9954 in += info.inJump;
9955 out += info.outJump;
9956 }
9957 }
9958 else if (info.inFormat == RTAUDIO_SINT16) {
9959 Int16 *in = (Int16 *)inBuffer;
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9961 for (j=0; j<info.channels; j++) {
9962 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9963 //out[info.outOffset[j]] <<= 8;
9964 }
9965 in += info.inJump;
9966 out += info.outJump;
9967 }
9968 }
9969 else if (info.inFormat == RTAUDIO_SINT24) {
9970 // Channel compensation and/or (de)interleaving only.
9971 Int24 *in = (Int24 *)inBuffer;
9972 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9973 for (j=0; j<info.channels; j++) {
9974 out[info.outOffset[j]] = in[info.inOffset[j]];
9975 }
9976 in += info.inJump;
9977 out += info.outJump;
9978 }
9979 }
9980 else if (info.inFormat == RTAUDIO_SINT32) {
9981 Int32 *in = (Int32 *)inBuffer;
9982 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9983 for (j=0; j<info.channels; j++) {
9984 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9985 //out[info.outOffset[j]] >>= 8;
9986 }
9987 in += info.inJump;
9988 out += info.outJump;
9989 }
9990 }
9991 else if (info.inFormat == RTAUDIO_FLOAT32) {
9992 Float32 *in = (Float32 *)inBuffer;
9993 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9994 for (j=0; j<info.channels; j++) {
9995 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9996 }
9997 in += info.inJump;
9998 out += info.outJump;
9999 }
10000 }
10001 else if (info.inFormat == RTAUDIO_FLOAT64) {
10002 Float64 *in = (Float64 *)inBuffer;
10003 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10004 for (j=0; j<info.channels; j++) {
10005 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10006 }
10007 in += info.inJump;
10008 out += info.outJump;
10009 }
10010 }
10011 }
10012 else if (info.outFormat == RTAUDIO_SINT16) {
10013 Int16 *out = (Int16 *)outBuffer;
10014 if (info.inFormat == RTAUDIO_SINT8) {
10015 signed char *in = (signed char *)inBuffer;
10016 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10017 for (j=0; j<info.channels; j++) {
10018 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10019 out[info.outOffset[j]] <<= 8;
10020 }
10021 in += info.inJump;
10022 out += info.outJump;
10023 }
10024 }
10025 else if (info.inFormat == RTAUDIO_SINT16) {
10026 // Channel compensation and/or (de)interleaving only.
10027 Int16 *in = (Int16 *)inBuffer;
10028 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10029 for (j=0; j<info.channels; j++) {
10030 out[info.outOffset[j]] = in[info.inOffset[j]];
10031 }
10032 in += info.inJump;
10033 out += info.outJump;
10034 }
10035 }
10036 else if (info.inFormat == RTAUDIO_SINT24) {
10037 Int24 *in = (Int24 *)inBuffer;
10038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10039 for (j=0; j<info.channels; j++) {
10040 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10041 }
10042 in += info.inJump;
10043 out += info.outJump;
10044 }
10045 }
10046 else if (info.inFormat == RTAUDIO_SINT32) {
10047 Int32 *in = (Int32 *)inBuffer;
10048 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10049 for (j=0; j<info.channels; j++) {
10050 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10051 }
10052 in += info.inJump;
10053 out += info.outJump;
10054 }
10055 }
10056 else if (info.inFormat == RTAUDIO_FLOAT32) {
10057 Float32 *in = (Float32 *)inBuffer;
10058 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10059 for (j=0; j<info.channels; j++) {
10060 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10061 }
10062 in += info.inJump;
10063 out += info.outJump;
10064 }
10065 }
10066 else if (info.inFormat == RTAUDIO_FLOAT64) {
10067 Float64 *in = (Float64 *)inBuffer;
10068 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10069 for (j=0; j<info.channels; j++) {
10070 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10071 }
10072 in += info.inJump;
10073 out += info.outJump;
10074 }
10075 }
10076 }
10077 else if (info.outFormat == RTAUDIO_SINT8) {
10078 signed char *out = (signed char *)outBuffer;
10079 if (info.inFormat == RTAUDIO_SINT8) {
10080 // Channel compensation and/or (de)interleaving only.
10081 signed char *in = (signed char *)inBuffer;
10082 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10083 for (j=0; j<info.channels; j++) {
10084 out[info.outOffset[j]] = in[info.inOffset[j]];
10085 }
10086 in += info.inJump;
10087 out += info.outJump;
10088 }
10089 }
10090 if (info.inFormat == RTAUDIO_SINT16) {
10091 Int16 *in = (Int16 *)inBuffer;
10092 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10093 for (j=0; j<info.channels; j++) {
10094 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10095 }
10096 in += info.inJump;
10097 out += info.outJump;
10098 }
10099 }
10100 else if (info.inFormat == RTAUDIO_SINT24) {
10101 Int24 *in = (Int24 *)inBuffer;
10102 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10103 for (j=0; j<info.channels; j++) {
10104 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10105 }
10106 in += info.inJump;
10107 out += info.outJump;
10108 }
10109 }
10110 else if (info.inFormat == RTAUDIO_SINT32) {
10111 Int32 *in = (Int32 *)inBuffer;
10112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10113 for (j=0; j<info.channels; j++) {
10114 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10115 }
10116 in += info.inJump;
10117 out += info.outJump;
10118 }
10119 }
10120 else if (info.inFormat == RTAUDIO_FLOAT32) {
10121 Float32 *in = (Float32 *)inBuffer;
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10123 for (j=0; j<info.channels; j++) {
10124 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10125 }
10126 in += info.inJump;
10127 out += info.outJump;
10128 }
10129 }
10130 else if (info.inFormat == RTAUDIO_FLOAT64) {
10131 Float64 *in = (Float64 *)inBuffer;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10135 }
10136 in += info.inJump;
10137 out += info.outJump;
10138 }
10139 }
10140 }
10141 }
10142
10143 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10144 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10145 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10146
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10147 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10148 {
10149 char val;
10150 char *ptr;
10151
10152 ptr = buffer;
10153 if ( format == RTAUDIO_SINT16 ) {
10154 for ( unsigned int i=0; i<samples; i++ ) {
10155 // Swap 1st and 2nd bytes.
10156 val = *(ptr);
10157 *(ptr) = *(ptr+1);
10158 *(ptr+1) = val;
10159
10160 // Increment 2 bytes.
10161 ptr += 2;
10162 }
10163 }
10164 else if ( format == RTAUDIO_SINT32 ||
10165 format == RTAUDIO_FLOAT32 ) {
10166 for ( unsigned int i=0; i<samples; i++ ) {
10167 // Swap 1st and 4th bytes.
10168 val = *(ptr);
10169 *(ptr) = *(ptr+3);
10170 *(ptr+3) = val;
10171
10172 // Swap 2nd and 3rd bytes.
10173 ptr += 1;
10174 val = *(ptr);
10175 *(ptr) = *(ptr+1);
10176 *(ptr+1) = val;
10177
10178 // Increment 3 more bytes.
10179 ptr += 3;
10180 }
10181 }
10182 else if ( format == RTAUDIO_SINT24 ) {
10183 for ( unsigned int i=0; i<samples; i++ ) {
10184 // Swap 1st and 3rd bytes.
10185 val = *(ptr);
10186 *(ptr) = *(ptr+2);
10187 *(ptr+2) = val;
10188
10189 // Increment 2 more bytes.
10190 ptr += 2;
10191 }
10192 }
10193 else if ( format == RTAUDIO_FLOAT64 ) {
10194 for ( unsigned int i=0; i<samples; i++ ) {
10195 // Swap 1st and 8th bytes
10196 val = *(ptr);
10197 *(ptr) = *(ptr+7);
10198 *(ptr+7) = val;
10199
10200 // Swap 2nd and 7th bytes
10201 ptr += 1;
10202 val = *(ptr);
10203 *(ptr) = *(ptr+5);
10204 *(ptr+5) = val;
10205
10206 // Swap 3rd and 6th bytes
10207 ptr += 1;
10208 val = *(ptr);
10209 *(ptr) = *(ptr+3);
10210 *(ptr+3) = val;
10211
10212 // Swap 4th and 5th bytes
10213 ptr += 1;
10214 val = *(ptr);
10215 *(ptr) = *(ptr+1);
10216 *(ptr+1) = val;
10217
10218 // Increment 5 more bytes.
10219 ptr += 5;
10220 }
10221 }
10222 }
10223
10224 // Indentation settings for Vim and Emacs
10225 //
10226 // Local Variables:
10227 // c-basic-offset: 2
10228 // indent-tabs-mode: nil
10229 // End:
10230 //
10231 // vim: et sts=2 sw=2
10232
10233