1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
14
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
22
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
25
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
30
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 /************************************************************************/
40
41 // RtAudio: Version 5.0.0
42
43 #include "RtAudio.h"
44 #include <iostream>
45 #include <cstdlib>
46 #include <cstring>
47 #include <climits>
48 #include <cmath>
49 #include <algorithm>
50
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
56 };
57
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
63
64 #include "tchar.h"
65
convertCharPointerToStdString(const char * text)66 static std::string convertCharPointerToStdString(const char *text)
67 {
68 return std::string(text);
69 }
70
convertCharPointerToStdString(const wchar_t * text)71 static std::string convertCharPointerToStdString(const wchar_t *text)
72 {
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
76 return s;
77 }
78
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
80 // pthread API
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
85 #else
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
88 #endif
89
90 // *************************************************** //
91 //
92 // RtAudio definitions.
93 //
94 // *************************************************** //
95
getVersion(void)96 std::string RtAudio :: getVersion( void )
97 {
98 return RTAUDIO_VERSION;
99 }
100
getCompiledApi(std::vector<RtAudio::Api> & apis)101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
102 {
103 apis.clear();
104
105 // The order here will control the order of RtAudio's API search in
106 // the constructor.
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
109 #endif
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
112 #endif
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
115 #endif
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
118 #endif
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
121 #endif
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
124 #endif
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
127 #endif
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
130 #endif
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
133 #endif
134 }
135
openRtApi(RtAudio::Api api)136 void RtAudio :: openRtApi( RtAudio::Api api )
137 {
138 if ( rtapi_ )
139 delete rtapi_;
140 rtapi_ = 0;
141
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
145 #endif
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
149 #endif
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
153 #endif
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
157 #endif
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
161 #endif
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
165 #endif
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
169 #endif
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
173 #endif
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
177 #endif
178 }
179
RtAudio(RtAudio::Api api)180 RtAudio :: RtAudio( RtAudio::Api api )
181 {
182 rtapi_ = 0;
183
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
186 openRtApi( api );
187 if ( rtapi_ ) return;
188
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
192 }
193
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
201 }
202
203 if ( rtapi_ ) return;
204
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
211 }
212
~RtAudio()213 RtAudio :: ~RtAudio()
214 {
215 if ( rtapi_ )
216 delete rtapi_;
217 }
218
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
226 {
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
230 }
231
232 // *************************************************** //
233 //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
236 //
237 // *************************************************** //
238
RtApi()239 RtApi :: RtApi()
240 {
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
249 }
250
~RtApi()251 RtApi :: ~RtApi()
252 {
253 MUTEX_DESTROY( &stream_.mutex );
254 }
255
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
263 {
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
267 return;
268 }
269
270 // Clear stream information potentially left from a previously open stream.
271 clearStreamInfo();
272
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
276 return;
277 }
278
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
282 return;
283 }
284
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
288 return;
289 }
290
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
294 return;
295 }
296
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
299 if ( oParams ) {
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
304 return;
305 }
306 }
307
308 unsigned int iChannels = 0;
309 if ( iParams ) {
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
314 return;
315 }
316 }
317
318 bool result;
319
320 if ( oChannels > 0 ) {
321
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
326 return;
327 }
328 }
329
330 if ( iChannels > 0 ) {
331
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
337 return;
338 }
339 }
340
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
344
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
347 }
348
getDefaultInputDevice(void)349 unsigned int RtApi :: getDefaultInputDevice( void )
350 {
351 // Should be implemented in subclasses if possible.
352 return 0;
353 }
354
getDefaultOutputDevice(void)355 unsigned int RtApi :: getDefaultOutputDevice( void )
356 {
357 // Should be implemented in subclasses if possible.
358 return 0;
359 }
360
closeStream(void)361 void RtApi :: closeStream( void )
362 {
363 // MUST be implemented in subclasses!
364 return;
365 }
366
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
371 {
372 // MUST be implemented in subclasses!
373 return FAILURE;
374 }
375
tickStreamTime(void)376 void RtApi :: tickStreamTime( void )
377 {
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
381
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
383
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
386 #endif
387 }
388
getStreamLatency(void)389 long RtApi :: getStreamLatency( void )
390 {
391 verifyStream();
392
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
398
399 return totalLatency;
400 }
401
getStreamTime(void)402 double RtApi :: getStreamTime( void )
403 {
404 verifyStream();
405
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
409 struct timeval then;
410 struct timeval now;
411
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
414
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
420 #else
421 return stream_.streamTime;
422 #endif
423 }
424
setStreamTime(double time)425 void RtApi :: setStreamTime( double time )
426 {
427 verifyStream();
428
429 if ( time >= 0.0 )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
433 #endif
434 }
435
getStreamSampleRate(void)436 unsigned int RtApi :: getStreamSampleRate( void )
437 {
438 verifyStream();
439
440 return stream_.sampleRate;
441 }
442
443
444 // *************************************************** //
445 //
446 // OS/API-specific methods.
447 //
448 // *************************************************** //
449
450 #if defined(__MACOSX_CORE__)
451
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
457 //
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
466
467 // A structure to hold various information related to the CoreAudio API
468 // implementation.
469 struct CoreHandle {
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
473 #endif
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
476 bool xrun[2];
477 char *deviceBuffer;
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
481
CoreHandleCoreHandle482 CoreHandle()
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
484 };
485
RtApiCore()486 RtApiCore:: RtApiCore()
487 {
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
492 // correctly.
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
501 }
502 #endif
503 }
504
~RtApiCore()505 RtApiCore :: ~RtApiCore()
506 {
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
511 }
512
getDeviceCount(void)513 unsigned int RtApiCore :: getDeviceCount( void )
514 {
515 // Find out how many audio devices there are, if any.
516 UInt32 dataSize;
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
522 return 0;
523 }
524
525 return dataSize / sizeof( AudioDeviceID );
526 }
527
getDefaultInputDevice(void)528 unsigned int RtApiCore :: getDefaultInputDevice( void )
529 {
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
532
533 AudioDeviceID id;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
540 return 0;
541 }
542
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
550 return 0;
551 }
552
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
555
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
558 return 0;
559 }
560
getDefaultOutputDevice(void)561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
562 {
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
565
566 AudioDeviceID id;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
573 return 0;
574 }
575
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
583 return 0;
584 }
585
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
588
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
591 return 0;
592 }
593
getDeviceInfo(unsigned int device)594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
595 {
596 RtAudio::DeviceInfo info;
597 info.probed = false;
598
599 // Get device ID
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
604 return info;
605 }
606
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
610 return info;
611 }
612
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
623 return info;
624 }
625
626 AudioDeviceID id = deviceList[ device ];
627
628 // Get the device name.
629 info.name.erase();
630 CFStringRef cfname;
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
638 return info;
639 }
640
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
646 #else
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
648 #endif
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
651 CFRelease( cfname );
652 free(mname);
653
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
660 return info;
661 }
662
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
668 #else
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
670 #endif
671 info.name.append( (const char *)name, strlen(name) );
672 CFRelease( cfname );
673 free(name);
674
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
680 dataSize = 0;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
686 return info;
687 }
688
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
694 return info;
695 }
696
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
699 free( bufferList );
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
703 return info;
704 }
705
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
710 free( bufferList );
711
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
719 return info;
720 }
721
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
727 return info;
728 }
729
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
732 free( bufferList );
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
736 return info;
737 }
738
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
743 free( bufferList );
744
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
748
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
752
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
761 return info;
762 }
763
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
771 return info;
772 }
773
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
788
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
791
792 } else {
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
796 }
797 }
798
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
803
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
806 }
807 }
808 }
809
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
813
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
818 return info;
819 }
820
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
825
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
830
831 info.probed = true;
832 return info;
833 }
834
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
841 void* infoPointer )
842 {
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
844
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
848 else
849 return kAudioHardwareNoError;
850 }
851
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
853 UInt32 nAddresses,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
856 {
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
862 else
863 handle->xrun[0] = true;
864 }
865 }
866
867 return kAudioHardwareNoError;
868 }
869
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
873 void* ratePointer )
874 {
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
882 }
883
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
888 {
889 // Get device ID
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
894 return FAILURE;
895 }
896
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
900 return FAILURE;
901 }
902
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
912 return FAILURE;
913 }
914
915 AudioDeviceID id = deviceList[ device ];
916
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
920 isInput = true;
921 property.mScope = kAudioDevicePropertyScopeInput;
922 }
923 else
924 property.mScope = kAudioDevicePropertyScopeOutput;
925
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
928 dataSize = 0;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
934 return FAILURE;
935 }
936
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
941 return FAILURE;
942 }
943
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
946 free( bufferList );
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
949 return FAILURE;
950 }
951
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
964
965 // First check that the device supports the requested number of
966 // channels.
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
970
971 if ( deviceChannels < ( channels + firstChannel ) ) {
972 free( bufferList );
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
975 return FAILURE;
976 }
977
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
985 foundStream = true;
986 break;
987 }
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
990 }
991
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
995 monoMode = true;
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1001 }
1002
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1006
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1012 streamCount++;
1013 }
1014 }
1015
1016 free( bufferList );
1017
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1023
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1027 return FAILURE;
1028 }
1029
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1033
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1040
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1044 return FAILURE;
1045 }
1046
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1053 return FAILURE;
1054 }
1055
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1058
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1061 pid_t hog_pid;
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1068 return FAILURE;
1069 }
1070
1071 if ( hog_pid != getpid() ) {
1072 hog_pid = getpid();
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1077 return FAILURE;
1078 }
1079 }
1080 }
1081
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1090 return FAILURE;
1091 }
1092
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1095
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1103 return FAILURE;
1104 }
1105
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1112 return FAILURE;
1113 }
1114
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1120 usleep( 5000 );
1121 }
1122
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1125
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1129 return FAILURE;
1130 }
1131 }
1132
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1142 return FAILURE;
1143 }
1144
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1152 }
1153
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1157 }
1158
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1164 return FAILURE;
1165 }
1166 }
1167
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1174 return FAILURE;
1175 }
1176
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1182
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1187 UInt32 formatFlags;
1188
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1203
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1211 else
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1222 break;
1223 }
1224 }
1225
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1229 return FAILURE;
1230 }
1231 } // done setting virtual/physical formats.
1232
1233 // Get the stream / device latency.
1234 UInt32 latency;
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1240 else {
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1244 }
1245 }
1246
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1251
1252 // From the CoreAudio documentation, PCM data must be supplied as
1253 // 32-bit floats.
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1256
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1267
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1278 }
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1281
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1285 try {
1286 handle = new CoreHandle;
1287 }
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1290 goto error;
1291 }
1292
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1295 goto error;
1296 }
1297 stream_.apiHandle = (void *) handle;
1298 }
1299 else
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1304
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1313 goto error;
1314 }
1315
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1318 // streams.
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1320
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1327 }
1328 }
1329
1330 if ( makeBuffer ) {
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1336 goto error;
1337 }
1338 }
1339 }
1340
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1345
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1350 }
1351
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1355 else {
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1358 #else
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1361 #endif
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1365 goto error;
1366 }
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1369 else
1370 stream_.mode = mode;
1371 }
1372
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1377
1378 return SUCCESS;
1379
1380 error:
1381 if ( handle ) {
1382 pthread_cond_destroy( &handle->condition );
1383 delete handle;
1384 stream_.apiHandle = 0;
1385 }
1386
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1391 }
1392 }
1393
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1397 }
1398
1399 stream_.state = STREAM_CLOSED;
1400 return FAILURE;
1401 }
1402
closeStream(void)1403 void RtApiCore :: closeStream( void )
1404 {
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1408 return;
1409 }
1410
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1413 if (handle) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1417
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1423 }
1424 }
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1429 #else
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1432 #endif
1433 }
1434
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1436 if (handle) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1440
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1446 }
1447 }
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1452 #else
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1455 #endif
1456 }
1457
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1462 }
1463 }
1464
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1468 }
1469
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1472 delete handle;
1473 stream_.apiHandle = 0;
1474
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1477 }
1478
startStream(void)1479 void RtApiCore :: startStream( void )
1480 {
1481 verifyStream();
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1485 return;
1486 }
1487
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1491
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1496 goto unlock;
1497 }
1498 }
1499
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1507 goto unlock;
1508 }
1509 }
1510
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1514
1515 unlock:
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1518 }
1519
stopStream(void)1520 void RtApiCore :: stopStream( void )
1521 {
1522 verifyStream();
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1526 return;
1527 }
1528
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1532
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1536 }
1537
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1542 goto unlock;
1543 }
1544 }
1545
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1547
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1552 goto unlock;
1553 }
1554 }
1555
1556 stream_.state = STREAM_STOPPED;
1557
1558 unlock:
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1561 }
1562
abortStream(void)1563 void RtApiCore :: abortStream( void )
1564 {
1565 verifyStream();
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1569 return;
1570 }
1571
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1574
1575 stopStream();
1576 }
1577
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
coreStopStream(void * ptr)1583 static void *coreStopStream( void *ptr )
1584 {
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1587
1588 object->stopStream();
1589 pthread_exit( NULL );
1590 }
1591
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1595 {
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1600 return FAILURE;
1601 }
1602
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1605
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1609
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1615 return SUCCESS;
1616 }
1617
1618 AudioDeviceID outputDevice = handle->id[0];
1619
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1630 }
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1634 }
1635
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1641 abortStream();
1642 return SUCCESS;
1643 }
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1647 }
1648 }
1649
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1651
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1653
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1656 0,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1658 }
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1662 0,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1664 }
1665 }
1666 }
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1671 }
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1676 }
1677 }
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1683 }
1684
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1690 }
1691 }
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1694 Float32 *out, *in;
1695
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1701 }
1702
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1705
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1708 in = inBuffer;
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1711
1712 outJump = 0;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1717 out += outJump;
1718 }
1719
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1724 }
1725
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1730 }
1731 else {
1732 inJump = 1;
1733 in += (inChannels - channelsLeft) * inOffset;
1734 }
1735
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1739 }
1740 out += outJump;
1741 in += inJump;
1742 }
1743 channelsLeft -= streamChannels;
1744 }
1745 }
1746 }
1747 }
1748
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1752 goto unlock;
1753 }
1754
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1758
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1764 }
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1769 }
1770 }
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1774
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1780 }
1781 }
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1784 Float32 *out, *in;
1785
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1791 }
1792
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1795
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1798 out = outBuffer;
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1801
1802 inJump = 0;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1807 in += inJump;
1808 }
1809
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1814 }
1815
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1820 }
1821 else {
1822 outJump = 1;
1823 out += (outChannels - channelsLeft) * outOffset;
1824 }
1825
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1829 }
1830 out += outJump;
1831 in += inJump;
1832 }
1833 channelsLeft -= streamChannels;
1834 }
1835 }
1836
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1841 }
1842 }
1843 }
1844
1845 unlock:
1846 //MUTEX_UNLOCK( &stream_.mutex );
1847
1848 RtApi::tickStreamTime();
1849 return SUCCESS;
1850 }
1851
getErrorCode(OSStatus code)1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1853 {
1854 switch( code ) {
1855
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1858
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1861
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1864
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1867
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1870
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1873
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1876
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1879
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1882
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1885
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1888
1889 default:
1890 return "CoreAudio unknown error";
1891 }
1892 }
1893
1894 //******************** End of __MACOSX_CORE__ *********************//
1895 #endif
1896
1897 #if defined(__UNIX_JACK__)
1898
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1903 //
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1907 //
1908 // .jackd -d alsa -d hw:0
1909 //
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1913 // particular,
1914 //
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1916 //
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1921 //
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1927
1928 #include <jack/jack.h>
1929 #include <unistd.h>
1930 #include <cstdio>
1931
1932 // A structure to hold various information related to the Jack API
1933 // implementation.
1934 struct JackHandle {
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1938 bool xrun[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1942
JackHandleJackHandle1943 JackHandle()
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1945 };
1946
1947 #if !defined(__RTAUDIO_DEBUG__)
jackSilentError(const char *)1948 static void jackSilentError( const char * ) {};
1949 #endif
1950
RtApiJack()1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1957 #endif
1958 }
1959
~RtApiJack()1960 RtApiJack :: ~RtApiJack()
1961 {
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1963 }
1964
getDeviceCount(void)1965 unsigned int RtApiJack :: getDeviceCount( void )
1966 {
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1972
1973 const char **ports;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, NULL, 0 );
1977 if ( ports ) {
1978 // Parse the port names up to the first colon (:).
1979 size_t iColon = 0;
1980 do {
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1986 nDevices++;
1987 previousPort = port;
1988 }
1989 }
1990 } while ( ports[++nChannels] );
1991 free( ports );
1992 }
1993
1994 jack_client_close( client );
1995 return nDevices;
1996 }
1997
getDeviceInfo(unsigned int device)1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1999 {
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2002
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2009 return info;
2010 }
2011
2012 const char **ports;
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, NULL, 0 );
2016 if ( ports ) {
2017 // Parse the port names up to the first colon (:).
2018 size_t iColon = 0;
2019 do {
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2026 nDevices++;
2027 previousPort = port;
2028 }
2029 }
2030 } while ( ports[++nPorts] );
2031 free( ports );
2032 }
2033
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2038 return info;
2039 }
2040
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2043
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2046
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2051 if ( ports ) {
2052 while ( ports[ nChannels ] ) nChannels++;
2053 free( ports );
2054 info.outputChannels = nChannels;
2055 }
2056
2057 // Jack "output ports" equal RtAudio input channels.
2058 nChannels = 0;
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2060 if ( ports ) {
2061 while ( ports[ nChannels ] ) nChannels++;
2062 free( ports );
2063 info.inputChannels = nChannels;
2064 }
2065
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2070 return info;
2071 }
2072
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2076
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2079
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2085
2086 jack_client_close(client);
2087 info.probed = true;
2088 return info;
2089 }
2090
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2092 {
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2094
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2097
2098 return 0;
2099 }
2100
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2105 static void *jackCloseStream( void *ptr )
2106 {
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2109
2110 object->closeStream();
2111
2112 pthread_exit( NULL );
2113 }
jackShutdown(void * infoPointer)2114 static void jackShutdown( void *infoPointer )
2115 {
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2118
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2125
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2129 }
2130
jackXrun(void * infoPointer)2131 static int jackXrun( void *infoPointer )
2132 {
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2134
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2137
2138 return 0;
2139 }
2140
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2145 {
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2147
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2155 else
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2160 return FAILURE;
2161 }
2162 }
2163 else {
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2166 }
2167
2168 const char **ports;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, NULL, 0 );
2172 if ( ports ) {
2173 // Parse the port names up to the first colon (:).
2174 size_t iColon = 0;
2175 do {
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2182 nDevices++;
2183 previousPort = port;
2184 }
2185 }
2186 } while ( ports[++nPorts] );
2187 free( ports );
2188 }
2189
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2192 return FAILURE;
2193 }
2194
2195 // Count the available ports containing the client name as device
2196 // channels. Jack "input ports" equal RtAudio output channels.
2197 unsigned int nChannels = 0;
2198 unsigned long flag = JackPortIsInput;
2199 if ( mode == INPUT ) flag = JackPortIsOutput;
2200 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2201 if ( ports ) {
2202 while ( ports[ nChannels ] ) nChannels++;
2203 free( ports );
2204 }
2205
2206 // Compare the jack ports for specified client to the requested number of channels.
2207 if ( nChannels < (channels + firstChannel) ) {
2208 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2209 errorText_ = errorStream_.str();
2210 return FAILURE;
2211 }
2212
2213 // Check the jack server sample rate.
2214 unsigned int jackRate = jack_get_sample_rate( client );
2215 if ( sampleRate != jackRate ) {
2216 jack_client_close( client );
2217 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2218 errorText_ = errorStream_.str();
2219 return FAILURE;
2220 }
2221 stream_.sampleRate = jackRate;
2222
2223 // Get the latency of the JACK port.
2224 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2225 if ( ports[ firstChannel ] ) {
2226 // Added by Ge Wang
2227 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2228 // the range (usually the min and max are equal)
2229 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2230 // get the latency range
2231 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2232 // be optimistic, use the min!
2233 stream_.latency[mode] = latrange.min;
2234 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2235 }
2236 free( ports );
2237
2238 // The jack server always uses 32-bit floating-point data.
2239 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2240 stream_.userFormat = format;
2241
2242 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2243 else stream_.userInterleaved = true;
2244
2245 // Jack always uses non-interleaved buffers.
2246 stream_.deviceInterleaved[mode] = false;
2247
2248 // Jack always provides host byte-ordered data.
2249 stream_.doByteSwap[mode] = false;
2250
2251 // Get the buffer size. The buffer size and number of buffers
2252 // (periods) is set when the jack server is started.
2253 stream_.bufferSize = (int) jack_get_buffer_size( client );
2254 *bufferSize = stream_.bufferSize;
2255
2256 stream_.nDeviceChannels[mode] = channels;
2257 stream_.nUserChannels[mode] = channels;
2258
2259 // Set flags for buffer conversion.
2260 stream_.doConvertBuffer[mode] = false;
2261 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2262 stream_.doConvertBuffer[mode] = true;
2263 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2264 stream_.nUserChannels[mode] > 1 )
2265 stream_.doConvertBuffer[mode] = true;
2266
2267 // Allocate our JackHandle structure for the stream.
2268 if ( handle == 0 ) {
2269 try {
2270 handle = new JackHandle;
2271 }
2272 catch ( std::bad_alloc& ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2274 goto error;
2275 }
2276
2277 if ( pthread_cond_init(&handle->condition, NULL) ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2279 goto error;
2280 }
2281 stream_.apiHandle = (void *) handle;
2282 handle->client = client;
2283 }
2284 handle->deviceName[mode] = deviceName;
2285
2286 // Allocate necessary internal buffers.
2287 unsigned long bufferBytes;
2288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2290 if ( stream_.userBuffer[mode] == NULL ) {
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2292 goto error;
2293 }
2294
2295 if ( stream_.doConvertBuffer[mode] ) {
2296
2297 bool makeBuffer = true;
2298 if ( mode == OUTPUT )
2299 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2300 else { // mode == INPUT
2301 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2304 if ( bufferBytes < bytesOut ) makeBuffer = false;
2305 }
2306 }
2307
2308 if ( makeBuffer ) {
2309 bufferBytes *= *bufferSize;
2310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2312 if ( stream_.deviceBuffer == NULL ) {
2313 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2314 goto error;
2315 }
2316 }
2317 }
2318
2319 // Allocate memory for the Jack ports (channels) identifiers.
2320 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2321 if ( handle->ports[mode] == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2323 goto error;
2324 }
2325
2326 stream_.device[mode] = device;
2327 stream_.channelOffset[mode] = firstChannel;
2328 stream_.state = STREAM_STOPPED;
2329 stream_.callbackInfo.object = (void *) this;
2330
2331 if ( stream_.mode == OUTPUT && mode == INPUT )
2332 // We had already set up the stream for output.
2333 stream_.mode = DUPLEX;
2334 else {
2335 stream_.mode = mode;
2336 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2337 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2338 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2339 }
2340
2341 // Register our ports.
2342 char label[64];
2343 if ( mode == OUTPUT ) {
2344 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2345 snprintf( label, 64, "outport %d", i );
2346 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2347 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2348 }
2349 }
2350 else {
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 snprintf( label, 64, "inport %d", i );
2353 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2354 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2355 }
2356 }
2357
2358 // Setup the buffer conversion information structure. We don't use
2359 // buffers to do channel offsets, so we override that parameter
2360 // here.
2361 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2362
2363 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2364
2365 return SUCCESS;
2366
2367 error:
2368 if ( handle ) {
2369 pthread_cond_destroy( &handle->condition );
2370 jack_client_close( handle->client );
2371
2372 if ( handle->ports[0] ) free( handle->ports[0] );
2373 if ( handle->ports[1] ) free( handle->ports[1] );
2374
2375 delete handle;
2376 stream_.apiHandle = 0;
2377 }
2378
2379 for ( int i=0; i<2; i++ ) {
2380 if ( stream_.userBuffer[i] ) {
2381 free( stream_.userBuffer[i] );
2382 stream_.userBuffer[i] = 0;
2383 }
2384 }
2385
2386 if ( stream_.deviceBuffer ) {
2387 free( stream_.deviceBuffer );
2388 stream_.deviceBuffer = 0;
2389 }
2390
2391 return FAILURE;
2392 }
2393
closeStream(void)2394 void RtApiJack :: closeStream( void )
2395 {
2396 if ( stream_.state == STREAM_CLOSED ) {
2397 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2398 error( RtAudioError::WARNING );
2399 return;
2400 }
2401
2402 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2403 if ( handle ) {
2404
2405 if ( stream_.state == STREAM_RUNNING )
2406 jack_deactivate( handle->client );
2407
2408 jack_client_close( handle->client );
2409 }
2410
2411 if ( handle ) {
2412 if ( handle->ports[0] ) free( handle->ports[0] );
2413 if ( handle->ports[1] ) free( handle->ports[1] );
2414 pthread_cond_destroy( &handle->condition );
2415 delete handle;
2416 stream_.apiHandle = 0;
2417 }
2418
2419 for ( int i=0; i<2; i++ ) {
2420 if ( stream_.userBuffer[i] ) {
2421 free( stream_.userBuffer[i] );
2422 stream_.userBuffer[i] = 0;
2423 }
2424 }
2425
2426 if ( stream_.deviceBuffer ) {
2427 free( stream_.deviceBuffer );
2428 stream_.deviceBuffer = 0;
2429 }
2430
2431 stream_.mode = UNINITIALIZED;
2432 stream_.state = STREAM_CLOSED;
2433 }
2434
startStream(void)2435 void RtApiJack :: startStream( void )
2436 {
2437 verifyStream();
2438 if ( stream_.state == STREAM_RUNNING ) {
2439 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2440 error( RtAudioError::WARNING );
2441 return;
2442 }
2443
2444 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2445 int result = jack_activate( handle->client );
2446 if ( result ) {
2447 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2448 goto unlock;
2449 }
2450
2451 const char **ports;
2452
2453 // Get the list of available ports.
2454 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2455 result = 1;
2456 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2457 if ( ports == NULL) {
2458 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2459 goto unlock;
2460 }
2461
2462 // Now make the port connections. Since RtAudio wasn't designed to
2463 // allow the user to select particular channels of a device, we'll
2464 // just open the first "nChannels" ports with offset.
2465 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2466 result = 1;
2467 if ( ports[ stream_.channelOffset[0] + i ] )
2468 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2469 if ( result ) {
2470 free( ports );
2471 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2472 goto unlock;
2473 }
2474 }
2475 free(ports);
2476 }
2477
2478 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2479 result = 1;
2480 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2481 if ( ports == NULL) {
2482 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2483 goto unlock;
2484 }
2485
2486 // Now make the port connections. See note above.
2487 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2488 result = 1;
2489 if ( ports[ stream_.channelOffset[1] + i ] )
2490 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2491 if ( result ) {
2492 free( ports );
2493 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2494 goto unlock;
2495 }
2496 }
2497 free(ports);
2498 }
2499
2500 handle->drainCounter = 0;
2501 handle->internalDrain = false;
2502 stream_.state = STREAM_RUNNING;
2503
2504 unlock:
2505 if ( result == 0 ) return;
2506 error( RtAudioError::SYSTEM_ERROR );
2507 }
2508
stopStream(void)2509 void RtApiJack :: stopStream( void )
2510 {
2511 verifyStream();
2512 if ( stream_.state == STREAM_STOPPED ) {
2513 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2514 error( RtAudioError::WARNING );
2515 return;
2516 }
2517
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2520
2521 if ( handle->drainCounter == 0 ) {
2522 handle->drainCounter = 2;
2523 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2524 }
2525 }
2526
2527 jack_deactivate( handle->client );
2528 stream_.state = STREAM_STOPPED;
2529 }
2530
abortStream(void)2531 void RtApiJack :: abortStream( void )
2532 {
2533 verifyStream();
2534 if ( stream_.state == STREAM_STOPPED ) {
2535 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2536 error( RtAudioError::WARNING );
2537 return;
2538 }
2539
2540 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2541 handle->drainCounter = 2;
2542
2543 stopStream();
2544 }
2545
2546 // This function will be called by a spawned thread when the user
2547 // callback function signals that the stream should be stopped or
2548 // aborted. It is necessary to handle it this way because the
2549 // callbackEvent() function must return before the jack_deactivate()
2550 // function will return.
jackStopStream(void * ptr)2551 static void *jackStopStream( void *ptr )
2552 {
2553 CallbackInfo *info = (CallbackInfo *) ptr;
2554 RtApiJack *object = (RtApiJack *) info->object;
2555
2556 object->stopStream();
2557 pthread_exit( NULL );
2558 }
2559
callbackEvent(unsigned long nframes)2560 bool RtApiJack :: callbackEvent( unsigned long nframes )
2561 {
2562 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2563 if ( stream_.state == STREAM_CLOSED ) {
2564 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2565 error( RtAudioError::WARNING );
2566 return FAILURE;
2567 }
2568 if ( stream_.bufferSize != nframes ) {
2569 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2570 error( RtAudioError::WARNING );
2571 return FAILURE;
2572 }
2573
2574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2575 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2576
2577 // Check if we were draining the stream and signal is finished.
2578 if ( handle->drainCounter > 3 ) {
2579 ThreadHandle threadId;
2580
2581 stream_.state = STREAM_STOPPING;
2582 if ( handle->internalDrain == true )
2583 pthread_create( &threadId, NULL, jackStopStream, info );
2584 else
2585 pthread_cond_signal( &handle->condition );
2586 return SUCCESS;
2587 }
2588
2589 // Invoke user callback first, to get fresh output data.
2590 if ( handle->drainCounter == 0 ) {
2591 RtAudioCallback callback = (RtAudioCallback) info->callback;
2592 double streamTime = getStreamTime();
2593 RtAudioStreamStatus status = 0;
2594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2596 handle->xrun[0] = false;
2597 }
2598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2599 status |= RTAUDIO_INPUT_OVERFLOW;
2600 handle->xrun[1] = false;
2601 }
2602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2603 stream_.bufferSize, streamTime, status, info->userData );
2604 if ( cbReturnValue == 2 ) {
2605 stream_.state = STREAM_STOPPING;
2606 handle->drainCounter = 2;
2607 ThreadHandle id;
2608 pthread_create( &id, NULL, jackStopStream, info );
2609 return SUCCESS;
2610 }
2611 else if ( cbReturnValue == 1 ) {
2612 handle->drainCounter = 1;
2613 handle->internalDrain = true;
2614 }
2615 }
2616
2617 jack_default_audio_sample_t *jackbuffer;
2618 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2620
2621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2622
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2625 memset( jackbuffer, 0, bufferBytes );
2626 }
2627
2628 }
2629 else if ( stream_.doConvertBuffer[0] ) {
2630
2631 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2632
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2636 }
2637 }
2638 else { // no buffer conversion
2639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2640 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2641 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2642 }
2643 }
2644 }
2645
2646 // Don't bother draining input
2647 if ( handle->drainCounter ) {
2648 handle->drainCounter++;
2649 goto unlock;
2650 }
2651
2652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2653
2654 if ( stream_.doConvertBuffer[1] ) {
2655 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2657 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2658 }
2659 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2660 }
2661 else { // no buffer conversion
2662 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2663 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2664 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2665 }
2666 }
2667 }
2668
2669 unlock:
2670 RtApi::tickStreamTime();
2671 return SUCCESS;
2672 }
2673 //******************** End of __UNIX_JACK__ *********************//
2674 #endif
2675
2676 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2677
2678 // The ASIO API is designed around a callback scheme, so this
2679 // implementation is similar to that used for OS-X CoreAudio and Linux
2680 // Jack. The primary constraint with ASIO is that it only allows
2681 // access to a single driver at a time. Thus, it is not possible to
2682 // have more than one simultaneous RtAudio stream.
2683 //
2684 // This implementation also requires a number of external ASIO files
2685 // and a few global variables. The ASIO callback scheme does not
2686 // allow for the passing of user data, so we must create a global
2687 // pointer to our callbackInfo structure.
2688 //
2689 // On unix systems, we make use of a pthread condition variable.
2690 // Since there is no equivalent in Windows, I hacked something based
2691 // on information found in
2692 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2693
2694 #include "asiosys.h"
2695 #include "asio.h"
2696 #include "iasiothiscallresolver.h"
2697 #include "asiodrivers.h"
2698 #include <cmath>
2699
2700 static AsioDrivers drivers;
2701 static ASIOCallbacks asioCallbacks;
2702 static ASIODriverInfo driverInfo;
2703 static CallbackInfo *asioCallbackInfo;
2704 static bool asioXRun;
2705
2706 struct AsioHandle {
2707 int drainCounter; // Tracks callback counts when draining
2708 bool internalDrain; // Indicates if stop is initiated from callback or not.
2709 ASIOBufferInfo *bufferInfos;
2710 HANDLE condition;
2711
AsioHandleAsioHandle2712 AsioHandle()
2713 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2714 };
2715
2716 // Function declarations (definitions at end of section)
2717 static const char* getAsioErrorString( ASIOError result );
2718 static void sampleRateChanged( ASIOSampleRate sRate );
2719 static long asioMessages( long selector, long value, void* message, double* opt );
2720
RtApiAsio()2721 RtApiAsio :: RtApiAsio()
2722 {
2723 // ASIO cannot run on a multi-threaded appartment. You can call
2724 // CoInitialize beforehand, but it must be for appartment threading
2725 // (in which case, CoInitilialize will return S_FALSE here).
2726 coInitialized_ = false;
2727 HRESULT hr = CoInitialize( NULL );
2728 if ( FAILED(hr) ) {
2729 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2730 error( RtAudioError::WARNING );
2731 }
2732 coInitialized_ = true;
2733
2734 drivers.removeCurrentDriver();
2735 driverInfo.asioVersion = 2;
2736
2737 // See note in DirectSound implementation about GetDesktopWindow().
2738 driverInfo.sysRef = GetForegroundWindow();
2739 }
2740
~RtApiAsio()2741 RtApiAsio :: ~RtApiAsio()
2742 {
2743 if ( stream_.state != STREAM_CLOSED ) closeStream();
2744 if ( coInitialized_ ) CoUninitialize();
2745 }
2746
getDeviceCount(void)2747 unsigned int RtApiAsio :: getDeviceCount( void )
2748 {
2749 return (unsigned int) drivers.asioGetNumDev();
2750 }
2751
getDeviceInfo(unsigned int device)2752 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2753 {
2754 RtAudio::DeviceInfo info;
2755 info.probed = false;
2756
2757 // Get device ID
2758 unsigned int nDevices = getDeviceCount();
2759 if ( nDevices == 0 ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2761 error( RtAudioError::INVALID_USE );
2762 return info;
2763 }
2764
2765 if ( device >= nDevices ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2767 error( RtAudioError::INVALID_USE );
2768 return info;
2769 }
2770
2771 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2772 if ( stream_.state != STREAM_CLOSED ) {
2773 if ( device >= devices_.size() ) {
2774 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2775 error( RtAudioError::WARNING );
2776 return info;
2777 }
2778 return devices_[ device ];
2779 }
2780
2781 char driverName[32];
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2783 if ( result != ASE_OK ) {
2784 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2785 errorText_ = errorStream_.str();
2786 error( RtAudioError::WARNING );
2787 return info;
2788 }
2789
2790 info.name = driverName;
2791
2792 if ( !drivers.loadDriver( driverName ) ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2796 return info;
2797 }
2798
2799 result = ASIOInit( &driverInfo );
2800 if ( result != ASE_OK ) {
2801 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2802 errorText_ = errorStream_.str();
2803 error( RtAudioError::WARNING );
2804 return info;
2805 }
2806
2807 // Determine the device channel information.
2808 long inputChannels, outputChannels;
2809 result = ASIOGetChannels( &inputChannels, &outputChannels );
2810 if ( result != ASE_OK ) {
2811 drivers.removeCurrentDriver();
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2815 return info;
2816 }
2817
2818 info.outputChannels = outputChannels;
2819 info.inputChannels = inputChannels;
2820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2822
2823 // Determine the supported sample rates.
2824 info.sampleRates.clear();
2825 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2826 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2827 if ( result == ASE_OK ) {
2828 info.sampleRates.push_back( SAMPLE_RATES[i] );
2829
2830 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2831 info.preferredSampleRate = SAMPLE_RATES[i];
2832 }
2833 }
2834
2835 // Determine supported data types ... just check first channel and assume rest are the same.
2836 ASIOChannelInfo channelInfo;
2837 channelInfo.channel = 0;
2838 channelInfo.isInput = true;
2839 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2840 result = ASIOGetChannelInfo( &channelInfo );
2841 if ( result != ASE_OK ) {
2842 drivers.removeCurrentDriver();
2843 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2844 errorText_ = errorStream_.str();
2845 error( RtAudioError::WARNING );
2846 return info;
2847 }
2848
2849 info.nativeFormats = 0;
2850 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2851 info.nativeFormats |= RTAUDIO_SINT16;
2852 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2853 info.nativeFormats |= RTAUDIO_SINT32;
2854 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2855 info.nativeFormats |= RTAUDIO_FLOAT32;
2856 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT64;
2858 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2859 info.nativeFormats |= RTAUDIO_SINT24;
2860
2861 if ( info.outputChannels > 0 )
2862 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2863 if ( info.inputChannels > 0 )
2864 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2865
2866 info.probed = true;
2867 drivers.removeCurrentDriver();
2868 return info;
2869 }
2870
bufferSwitch(long index,ASIOBool)2871 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2872 {
2873 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2874 object->callbackEvent( index );
2875 }
2876
saveDeviceInfo(void)2877 void RtApiAsio :: saveDeviceInfo( void )
2878 {
2879 devices_.clear();
2880
2881 unsigned int nDevices = getDeviceCount();
2882 devices_.resize( nDevices );
2883 for ( unsigned int i=0; i<nDevices; i++ )
2884 devices_[i] = getDeviceInfo( i );
2885 }
2886
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2887 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2888 unsigned int firstChannel, unsigned int sampleRate,
2889 RtAudioFormat format, unsigned int *bufferSize,
2890 RtAudio::StreamOptions *options )
2891 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2892
2893 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2894
2895 // For ASIO, a duplex stream MUST use the same driver.
2896 if ( isDuplexInput && stream_.device[0] != device ) {
2897 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2898 return FAILURE;
2899 }
2900
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2906 return FAILURE;
2907 }
2908
2909 // Only load the driver once for duplex stream.
2910 if ( !isDuplexInput ) {
2911 // The getDeviceInfo() function will not work when a stream is open
2912 // because ASIO does not allow multiple devices to run at the same
2913 // time. Thus, we'll probe the system before opening a stream and
2914 // save the results for use by getDeviceInfo().
2915 this->saveDeviceInfo();
2916
2917 if ( !drivers.loadDriver( driverName ) ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2919 errorText_ = errorStream_.str();
2920 return FAILURE;
2921 }
2922
2923 result = ASIOInit( &driverInfo );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2927 return FAILURE;
2928 }
2929 }
2930
2931 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2932 bool buffersAllocated = false;
2933 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2934 unsigned int nChannels;
2935
2936
2937 // Check the device channel count.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2942 errorText_ = errorStream_.str();
2943 goto error;
2944 }
2945
2946 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2947 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2949 errorText_ = errorStream_.str();
2950 goto error;
2951 }
2952 stream_.nDeviceChannels[mode] = channels;
2953 stream_.nUserChannels[mode] = channels;
2954 stream_.channelOffset[mode] = firstChannel;
2955
2956 // Verify the sample rate is supported.
2957 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2960 errorText_ = errorStream_.str();
2961 goto error;
2962 }
2963
2964 // Get the current sample rate
2965 ASIOSampleRate currentRate;
2966 result = ASIOGetSampleRate( ¤tRate );
2967 if ( result != ASE_OK ) {
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2969 errorText_ = errorStream_.str();
2970 goto error;
2971 }
2972
2973 // Set the sample rate only if necessary
2974 if ( currentRate != sampleRate ) {
2975 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2978 errorText_ = errorStream_.str();
2979 goto error;
2980 }
2981 }
2982
2983 // Determine the driver data type.
2984 ASIOChannelInfo channelInfo;
2985 channelInfo.channel = 0;
2986 if ( mode == OUTPUT ) channelInfo.isInput = false;
2987 else channelInfo.isInput = true;
2988 result = ASIOGetChannelInfo( &channelInfo );
2989 if ( result != ASE_OK ) {
2990 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2991 errorText_ = errorStream_.str();
2992 goto error;
2993 }
2994
2995 // Assuming WINDOWS host is always little-endian.
2996 stream_.doByteSwap[mode] = false;
2997 stream_.userFormat = format;
2998 stream_.deviceFormat[mode] = 0;
2999 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3001 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3002 }
3003 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3005 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3006 }
3007 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3009 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3010 }
3011 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3012 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3013 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3014 }
3015 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3017 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3018 }
3019
3020 if ( stream_.deviceFormat[mode] == 0 ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3022 errorText_ = errorStream_.str();
3023 goto error;
3024 }
3025
3026 // Set the buffer size. For a duplex stream, this will end up
3027 // setting the buffer size based on the input constraints, which
3028 // should be ok.
3029 long minSize, maxSize, preferSize, granularity;
3030 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3033 errorText_ = errorStream_.str();
3034 goto error;
3035 }
3036
3037 if ( isDuplexInput ) {
3038 // When this is the duplex input (output was opened before), then we have to use the same
3039 // buffersize as the output, because it might use the preferred buffer size, which most
3040 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3041 // So instead of throwing an error, make them equal. The caller uses the reference
3042 // to the "bufferSize" param as usual to set up processing buffers.
3043
3044 *bufferSize = stream_.bufferSize;
3045
3046 } else {
3047 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3048 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3049 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3050 else if ( granularity == -1 ) {
3051 // Make sure bufferSize is a power of two.
3052 int log2_of_min_size = 0;
3053 int log2_of_max_size = 0;
3054
3055 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3056 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3057 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3058 }
3059
3060 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3061 int min_delta_num = log2_of_min_size;
3062
3063 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3064 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3065 if (current_delta < min_delta) {
3066 min_delta = current_delta;
3067 min_delta_num = i;
3068 }
3069 }
3070
3071 *bufferSize = ( (unsigned int)1 << min_delta_num );
3072 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3073 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3074 }
3075 else if ( granularity != 0 ) {
3076 // Set to an even multiple of granularity, rounding up.
3077 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3078 }
3079 }
3080
3081 /*
3082 // we don't use it anymore, see above!
3083 // Just left it here for the case...
3084 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3085 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3086 goto error;
3087 }
3088 */
3089
3090 stream_.bufferSize = *bufferSize;
3091 stream_.nBuffers = 2;
3092
3093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3094 else stream_.userInterleaved = true;
3095
3096 // ASIO always uses non-interleaved buffers.
3097 stream_.deviceInterleaved[mode] = false;
3098
3099 // Allocate, if necessary, our AsioHandle structure for the stream.
3100 if ( handle == 0 ) {
3101 try {
3102 handle = new AsioHandle;
3103 }
3104 catch ( std::bad_alloc& ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3106 goto error;
3107 }
3108 handle->bufferInfos = 0;
3109
3110 // Create a manual-reset event.
3111 handle->condition = CreateEvent( NULL, // no security
3112 TRUE, // manual-reset
3113 FALSE, // non-signaled initially
3114 NULL ); // unnamed
3115 stream_.apiHandle = (void *) handle;
3116 }
3117
3118 // Create the ASIO internal buffers. Since RtAudio sets up input
3119 // and output separately, we'll have to dispose of previously
3120 // created output buffers for a duplex stream.
3121 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3122 ASIODisposeBuffers();
3123 if ( handle->bufferInfos ) free( handle->bufferInfos );
3124 }
3125
3126 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3127 unsigned int i;
3128 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3129 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3130 if ( handle->bufferInfos == NULL ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3132 errorText_ = errorStream_.str();
3133 goto error;
3134 }
3135
3136 ASIOBufferInfo *infos;
3137 infos = handle->bufferInfos;
3138 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3139 infos->isInput = ASIOFalse;
3140 infos->channelNum = i + stream_.channelOffset[0];
3141 infos->buffers[0] = infos->buffers[1] = 0;
3142 }
3143 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3144 infos->isInput = ASIOTrue;
3145 infos->channelNum = i + stream_.channelOffset[1];
3146 infos->buffers[0] = infos->buffers[1] = 0;
3147 }
3148
3149 // prepare for callbacks
3150 stream_.sampleRate = sampleRate;
3151 stream_.device[mode] = device;
3152 stream_.mode = isDuplexInput ? DUPLEX : mode;
3153
3154 // store this class instance before registering callbacks, that are going to use it
3155 asioCallbackInfo = &stream_.callbackInfo;
3156 stream_.callbackInfo.object = (void *) this;
3157
3158 // Set up the ASIO callback structure and create the ASIO data buffers.
3159 asioCallbacks.bufferSwitch = &bufferSwitch;
3160 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3161 asioCallbacks.asioMessage = &asioMessages;
3162 asioCallbacks.bufferSwitchTimeInfo = NULL;
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3164 if ( result != ASE_OK ) {
3165 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3166 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3167 // in that case, let's be naïve and try that instead
3168 *bufferSize = preferSize;
3169 stream_.bufferSize = *bufferSize;
3170 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3171 }
3172
3173 if ( result != ASE_OK ) {
3174 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3175 errorText_ = errorStream_.str();
3176 goto error;
3177 }
3178 buffersAllocated = true;
3179 stream_.state = STREAM_STOPPED;
3180
3181 // Set flags for buffer conversion.
3182 stream_.doConvertBuffer[mode] = false;
3183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3184 stream_.doConvertBuffer[mode] = true;
3185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3186 stream_.nUserChannels[mode] > 1 )
3187 stream_.doConvertBuffer[mode] = true;
3188
3189 // Allocate necessary internal buffers
3190 unsigned long bufferBytes;
3191 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3193 if ( stream_.userBuffer[mode] == NULL ) {
3194 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3195 goto error;
3196 }
3197
3198 if ( stream_.doConvertBuffer[mode] ) {
3199
3200 bool makeBuffer = true;
3201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3202 if ( isDuplexInput && stream_.deviceBuffer ) {
3203 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3204 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3205 }
3206
3207 if ( makeBuffer ) {
3208 bufferBytes *= *bufferSize;
3209 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3210 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3211 if ( stream_.deviceBuffer == NULL ) {
3212 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3213 goto error;
3214 }
3215 }
3216 }
3217
3218 // Determine device latencies
3219 long inputLatency, outputLatency;
3220 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3221 if ( result != ASE_OK ) {
3222 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3223 errorText_ = errorStream_.str();
3224 error( RtAudioError::WARNING); // warn but don't fail
3225 }
3226 else {
3227 stream_.latency[0] = outputLatency;
3228 stream_.latency[1] = inputLatency;
3229 }
3230
3231 // Setup the buffer conversion information structure. We don't use
3232 // buffers to do channel offsets, so we override that parameter
3233 // here.
3234 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3235
3236 return SUCCESS;
3237
3238 error:
3239 if ( !isDuplexInput ) {
3240 // the cleanup for error in the duplex input, is done by RtApi::openStream
3241 // So we clean up for single channel only
3242
3243 if ( buffersAllocated )
3244 ASIODisposeBuffers();
3245
3246 drivers.removeCurrentDriver();
3247
3248 if ( handle ) {
3249 CloseHandle( handle->condition );
3250 if ( handle->bufferInfos )
3251 free( handle->bufferInfos );
3252
3253 delete handle;
3254 stream_.apiHandle = 0;
3255 }
3256
3257
3258 if ( stream_.userBuffer[mode] ) {
3259 free( stream_.userBuffer[mode] );
3260 stream_.userBuffer[mode] = 0;
3261 }
3262
3263 if ( stream_.deviceBuffer ) {
3264 free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = 0;
3266 }
3267 }
3268
3269 return FAILURE;
3270 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3271
closeStream()3272 void RtApiAsio :: closeStream()
3273 {
3274 if ( stream_.state == STREAM_CLOSED ) {
3275 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3276 error( RtAudioError::WARNING );
3277 return;
3278 }
3279
3280 if ( stream_.state == STREAM_RUNNING ) {
3281 stream_.state = STREAM_STOPPED;
3282 ASIOStop();
3283 }
3284 ASIODisposeBuffers();
3285 drivers.removeCurrentDriver();
3286
3287 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3288 if ( handle ) {
3289 CloseHandle( handle->condition );
3290 if ( handle->bufferInfos )
3291 free( handle->bufferInfos );
3292 delete handle;
3293 stream_.apiHandle = 0;
3294 }
3295
3296 for ( int i=0; i<2; i++ ) {
3297 if ( stream_.userBuffer[i] ) {
3298 free( stream_.userBuffer[i] );
3299 stream_.userBuffer[i] = 0;
3300 }
3301 }
3302
3303 if ( stream_.deviceBuffer ) {
3304 free( stream_.deviceBuffer );
3305 stream_.deviceBuffer = 0;
3306 }
3307
3308 stream_.mode = UNINITIALIZED;
3309 stream_.state = STREAM_CLOSED;
3310 }
3311
3312 bool stopThreadCalled = false;
3313
startStream()3314 void RtApiAsio :: startStream()
3315 {
3316 verifyStream();
3317 if ( stream_.state == STREAM_RUNNING ) {
3318 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3319 error( RtAudioError::WARNING );
3320 return;
3321 }
3322
3323 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3324 ASIOError result = ASIOStart();
3325 if ( result != ASE_OK ) {
3326 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3327 errorText_ = errorStream_.str();
3328 goto unlock;
3329 }
3330
3331 handle->drainCounter = 0;
3332 handle->internalDrain = false;
3333 ResetEvent( handle->condition );
3334 stream_.state = STREAM_RUNNING;
3335 asioXRun = false;
3336
3337 unlock:
3338 stopThreadCalled = false;
3339
3340 if ( result == ASE_OK ) return;
3341 error( RtAudioError::SYSTEM_ERROR );
3342 }
3343
stopStream()3344 void RtApiAsio :: stopStream()
3345 {
3346 verifyStream();
3347 if ( stream_.state == STREAM_STOPPED ) {
3348 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3349 error( RtAudioError::WARNING );
3350 return;
3351 }
3352
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3355 if ( handle->drainCounter == 0 ) {
3356 handle->drainCounter = 2;
3357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3358 }
3359 }
3360
3361 stream_.state = STREAM_STOPPED;
3362
3363 ASIOError result = ASIOStop();
3364 if ( result != ASE_OK ) {
3365 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3366 errorText_ = errorStream_.str();
3367 }
3368
3369 if ( result == ASE_OK ) return;
3370 error( RtAudioError::SYSTEM_ERROR );
3371 }
3372
abortStream()3373 void RtApiAsio :: abortStream()
3374 {
3375 verifyStream();
3376 if ( stream_.state == STREAM_STOPPED ) {
3377 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3378 error( RtAudioError::WARNING );
3379 return;
3380 }
3381
3382 // The following lines were commented-out because some behavior was
3383 // noted where the device buffers need to be zeroed to avoid
3384 // continuing sound, even when the device buffers are completely
3385 // disposed. So now, calling abort is the same as calling stop.
3386 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3387 // handle->drainCounter = 2;
3388 stopStream();
3389 }
3390
3391 // This function will be called by a spawned thread when the user
3392 // callback function signals that the stream should be stopped or
3393 // aborted. It is necessary to handle it this way because the
3394 // callbackEvent() function must return before the ASIOStop()
3395 // function will return.
asioStopStream(void * ptr)3396 static unsigned __stdcall asioStopStream( void *ptr )
3397 {
3398 CallbackInfo *info = (CallbackInfo *) ptr;
3399 RtApiAsio *object = (RtApiAsio *) info->object;
3400
3401 object->stopStream();
3402 _endthreadex( 0 );
3403 return 0;
3404 }
3405
callbackEvent(long bufferIndex)3406 bool RtApiAsio :: callbackEvent( long bufferIndex )
3407 {
3408 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3409 if ( stream_.state == STREAM_CLOSED ) {
3410 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3411 error( RtAudioError::WARNING );
3412 return FAILURE;
3413 }
3414
3415 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3416 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3417
3418 // Check if we were draining the stream and signal if finished.
3419 if ( handle->drainCounter > 3 ) {
3420
3421 stream_.state = STREAM_STOPPING;
3422 if ( handle->internalDrain == false )
3423 SetEvent( handle->condition );
3424 else { // spawn a thread to stop the stream
3425 unsigned threadId;
3426 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3427 &stream_.callbackInfo, 0, &threadId );
3428 }
3429 return SUCCESS;
3430 }
3431
3432 // Invoke user callback to get fresh output data UNLESS we are
3433 // draining stream.
3434 if ( handle->drainCounter == 0 ) {
3435 RtAudioCallback callback = (RtAudioCallback) info->callback;
3436 double streamTime = getStreamTime();
3437 RtAudioStreamStatus status = 0;
3438 if ( stream_.mode != INPUT && asioXRun == true ) {
3439 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3440 asioXRun = false;
3441 }
3442 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3443 status |= RTAUDIO_INPUT_OVERFLOW;
3444 asioXRun = false;
3445 }
3446 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3447 stream_.bufferSize, streamTime, status, info->userData );
3448 if ( cbReturnValue == 2 ) {
3449 stream_.state = STREAM_STOPPING;
3450 handle->drainCounter = 2;
3451 unsigned threadId;
3452 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3453 &stream_.callbackInfo, 0, &threadId );
3454 return SUCCESS;
3455 }
3456 else if ( cbReturnValue == 1 ) {
3457 handle->drainCounter = 1;
3458 handle->internalDrain = true;
3459 }
3460 }
3461
3462 unsigned int nChannels, bufferBytes, i, j;
3463 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3465
3466 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3467
3468 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3469
3470 for ( i=0, j=0; i<nChannels; i++ ) {
3471 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3472 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3473 }
3474
3475 }
3476 else if ( stream_.doConvertBuffer[0] ) {
3477
3478 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3479 if ( stream_.doByteSwap[0] )
3480 byteSwapBuffer( stream_.deviceBuffer,
3481 stream_.bufferSize * stream_.nDeviceChannels[0],
3482 stream_.deviceFormat[0] );
3483
3484 for ( i=0, j=0; i<nChannels; i++ ) {
3485 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3486 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3487 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3488 }
3489
3490 }
3491 else {
3492
3493 if ( stream_.doByteSwap[0] )
3494 byteSwapBuffer( stream_.userBuffer[0],
3495 stream_.bufferSize * stream_.nUserChannels[0],
3496 stream_.userFormat );
3497
3498 for ( i=0, j=0; i<nChannels; i++ ) {
3499 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3500 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3501 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3502 }
3503
3504 }
3505 }
3506
3507 // Don't bother draining input
3508 if ( handle->drainCounter ) {
3509 handle->drainCounter++;
3510 goto unlock;
3511 }
3512
3513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3514
3515 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3516
3517 if (stream_.doConvertBuffer[1]) {
3518
3519 // Always interleave ASIO input data.
3520 for ( i=0, j=0; i<nChannels; i++ ) {
3521 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3522 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3523 handle->bufferInfos[i].buffers[bufferIndex],
3524 bufferBytes );
3525 }
3526
3527 if ( stream_.doByteSwap[1] )
3528 byteSwapBuffer( stream_.deviceBuffer,
3529 stream_.bufferSize * stream_.nDeviceChannels[1],
3530 stream_.deviceFormat[1] );
3531 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3532
3533 }
3534 else {
3535 for ( i=0, j=0; i<nChannels; i++ ) {
3536 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3537 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3538 handle->bufferInfos[i].buffers[bufferIndex],
3539 bufferBytes );
3540 }
3541 }
3542
3543 if ( stream_.doByteSwap[1] )
3544 byteSwapBuffer( stream_.userBuffer[1],
3545 stream_.bufferSize * stream_.nUserChannels[1],
3546 stream_.userFormat );
3547 }
3548 }
3549
3550 unlock:
3551 // The following call was suggested by Malte Clasen. While the API
3552 // documentation indicates it should not be required, some device
3553 // drivers apparently do not function correctly without it.
3554 ASIOOutputReady();
3555
3556 RtApi::tickStreamTime();
3557 return SUCCESS;
3558 }
3559
sampleRateChanged(ASIOSampleRate sRate)3560 static void sampleRateChanged( ASIOSampleRate sRate )
3561 {
3562 // The ASIO documentation says that this usually only happens during
3563 // external sync. Audio processing is not stopped by the driver,
3564 // actual sample rate might not have even changed, maybe only the
3565 // sample rate status of an AES/EBU or S/PDIF digital input at the
3566 // audio device.
3567
3568 RtApi *object = (RtApi *) asioCallbackInfo->object;
3569 try {
3570 object->stopStream();
3571 }
3572 catch ( RtAudioError &exception ) {
3573 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3574 return;
3575 }
3576
3577 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3578 }
3579
asioMessages(long selector,long value,void *,double *)3580 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3581 {
3582 long ret = 0;
3583
3584 switch( selector ) {
3585 case kAsioSelectorSupported:
3586 if ( value == kAsioResetRequest
3587 || value == kAsioEngineVersion
3588 || value == kAsioResyncRequest
3589 || value == kAsioLatenciesChanged
3590 // The following three were added for ASIO 2.0, you don't
3591 // necessarily have to support them.
3592 || value == kAsioSupportsTimeInfo
3593 || value == kAsioSupportsTimeCode
3594 || value == kAsioSupportsInputMonitor)
3595 ret = 1L;
3596 break;
3597 case kAsioResetRequest:
3598 // Defer the task and perform the reset of the driver during the
3599 // next "safe" situation. You cannot reset the driver right now,
3600 // as this code is called from the driver. Reset the driver is
3601 // done by completely destruct is. I.e. ASIOStop(),
3602 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3603 // driver again.
3604 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3605 ret = 1L;
3606 break;
3607 case kAsioResyncRequest:
3608 // This informs the application that the driver encountered some
3609 // non-fatal data loss. It is used for synchronization purposes
3610 // of different media. Added mainly to work around the Win16Mutex
3611 // problems in Windows 95/98 with the Windows Multimedia system,
3612 // which could lose data because the Mutex was held too long by
3613 // another thread. However a driver can issue it in other
3614 // situations, too.
3615 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3616 asioXRun = true;
3617 ret = 1L;
3618 break;
3619 case kAsioLatenciesChanged:
3620 // This will inform the host application that the drivers were
3621 // latencies changed. Beware, it this does not mean that the
3622 // buffer sizes have changed! You might need to update internal
3623 // delay data.
3624 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3625 ret = 1L;
3626 break;
3627 case kAsioEngineVersion:
3628 // Return the supported ASIO version of the host application. If
3629 // a host application does not implement this selector, ASIO 1.0
3630 // is assumed by the driver.
3631 ret = 2L;
3632 break;
3633 case kAsioSupportsTimeInfo:
3634 // Informs the driver whether the
3635 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3636 // For compatibility with ASIO 1.0 drivers the host application
3637 // should always support the "old" bufferSwitch method, too.
3638 ret = 0;
3639 break;
3640 case kAsioSupportsTimeCode:
3641 // Informs the driver whether application is interested in time
3642 // code info. If an application does not need to know about time
3643 // code, the driver has less work to do.
3644 ret = 0;
3645 break;
3646 }
3647 return ret;
3648 }
3649
getAsioErrorString(ASIOError result)3650 static const char* getAsioErrorString( ASIOError result )
3651 {
3652 struct Messages
3653 {
3654 ASIOError value;
3655 const char*message;
3656 };
3657
3658 static const Messages m[] =
3659 {
3660 { ASE_NotPresent, "Hardware input or output is not present or available." },
3661 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3662 { ASE_InvalidParameter, "Invalid input parameter." },
3663 { ASE_InvalidMode, "Invalid mode." },
3664 { ASE_SPNotAdvancing, "Sample position not advancing." },
3665 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3666 { ASE_NoMemory, "Not enough memory to complete the request." }
3667 };
3668
3669 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3670 if ( m[i].value == result ) return m[i].message;
3671
3672 return "Unknown error.";
3673 }
3674
3675 //******************** End of __WINDOWS_ASIO__ *********************//
3676 #endif
3677
3678
3679 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3680
3681 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3682 // - Introduces support for the Windows WASAPI API
3683 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3684 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3685 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3686
3687 #ifndef INITGUID
3688 #define INITGUID
3689 #endif
3690 #include <audioclient.h>
3691 #include <avrt.h>
3692 #include <mmdeviceapi.h>
3693 #include <functiondiscoverykeys_devpkey.h>
3694 #include <sstream>
3695
3696 //=============================================================================
3697
3698 #define SAFE_RELEASE( objectPtr )\
3699 if ( objectPtr )\
3700 {\
3701 objectPtr->Release();\
3702 objectPtr = NULL;\
3703 }
3704
3705 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3706
3707 //-----------------------------------------------------------------------------
3708
3709 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3710 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3711 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3712 // provide intermediate storage for read / write synchronization.
3713 class WasapiBuffer
3714 {
3715 public:
WasapiBuffer()3716 WasapiBuffer()
3717 : buffer_( NULL ),
3718 bufferSize_( 0 ),
3719 inIndex_( 0 ),
3720 outIndex_( 0 ) {}
3721
~WasapiBuffer()3722 ~WasapiBuffer() {
3723 free( buffer_ );
3724 }
3725
3726 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3727 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3728 free( buffer_ );
3729
3730 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3731
3732 bufferSize_ = bufferSize;
3733 inIndex_ = 0;
3734 outIndex_ = 0;
3735 }
3736
3737 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3738 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3739 {
3740 if ( !buffer || // incoming buffer is NULL
3741 bufferSize == 0 || // incoming buffer has no data
3742 bufferSize > bufferSize_ ) // incoming buffer too large
3743 {
3744 return false;
3745 }
3746
3747 unsigned int relOutIndex = outIndex_;
3748 unsigned int inIndexEnd = inIndex_ + bufferSize;
3749 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3750 relOutIndex += bufferSize_;
3751 }
3752
3753 // "in" index can end on the "out" index but cannot begin at it
3754 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3755 return false; // not enough space between "in" index and "out" index
3756 }
3757
3758 // copy buffer from external to internal
3759 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3760 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3761 int fromInSize = bufferSize - fromZeroSize;
3762
3763 switch( format )
3764 {
3765 case RTAUDIO_SINT8:
3766 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3767 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3768 break;
3769 case RTAUDIO_SINT16:
3770 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3771 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3772 break;
3773 case RTAUDIO_SINT24:
3774 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3775 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3776 break;
3777 case RTAUDIO_SINT32:
3778 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3779 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3780 break;
3781 case RTAUDIO_FLOAT32:
3782 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3783 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3784 break;
3785 case RTAUDIO_FLOAT64:
3786 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3787 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3788 break;
3789 }
3790
3791 // update "in" index
3792 inIndex_ += bufferSize;
3793 inIndex_ %= bufferSize_;
3794
3795 return true;
3796 }
3797
3798 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3799 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3800 {
3801 if ( !buffer || // incoming buffer is NULL
3802 bufferSize == 0 || // incoming buffer has no data
3803 bufferSize > bufferSize_ ) // incoming buffer too large
3804 {
3805 return false;
3806 }
3807
3808 unsigned int relInIndex = inIndex_;
3809 unsigned int outIndexEnd = outIndex_ + bufferSize;
3810 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3811 relInIndex += bufferSize_;
3812 }
3813
3814 // "out" index can begin at and end on the "in" index
3815 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3816 return false; // not enough space between "out" index and "in" index
3817 }
3818
3819 // copy buffer from internal to external
3820 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3821 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3822 int fromOutSize = bufferSize - fromZeroSize;
3823
3824 switch( format )
3825 {
3826 case RTAUDIO_SINT8:
3827 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3828 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3829 break;
3830 case RTAUDIO_SINT16:
3831 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3832 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3833 break;
3834 case RTAUDIO_SINT24:
3835 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3836 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3837 break;
3838 case RTAUDIO_SINT32:
3839 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3840 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3841 break;
3842 case RTAUDIO_FLOAT32:
3843 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3844 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3845 break;
3846 case RTAUDIO_FLOAT64:
3847 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3848 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3849 break;
3850 }
3851
3852 // update "out" index
3853 outIndex_ += bufferSize;
3854 outIndex_ %= bufferSize_;
3855
3856 return true;
3857 }
3858
3859 private:
3860 char* buffer_;
3861 unsigned int bufferSize_;
3862 unsigned int inIndex_;
3863 unsigned int outIndex_;
3864 };
3865
3866 //-----------------------------------------------------------------------------
3867
3868 // A structure to hold various information related to the WASAPI implementation.
3869 struct WasapiHandle
3870 {
3871 IAudioClient* captureAudioClient;
3872 IAudioClient* renderAudioClient;
3873 IAudioCaptureClient* captureClient;
3874 IAudioRenderClient* renderClient;
3875 HANDLE captureEvent;
3876 HANDLE renderEvent;
3877
WasapiHandleWasapiHandle3878 WasapiHandle()
3879 : captureAudioClient( NULL ),
3880 renderAudioClient( NULL ),
3881 captureClient( NULL ),
3882 renderClient( NULL ),
3883 captureEvent( NULL ),
3884 renderEvent( NULL ) {}
3885 };
3886
3887 //=============================================================================
3888
RtApiWasapi()3889 RtApiWasapi::RtApiWasapi()
3890 : coInitialized_( false ), deviceEnumerator_( NULL )
3891 {
3892 // WASAPI can run either apartment or multi-threaded
3893 HRESULT hr = CoInitialize( NULL );
3894 if ( !FAILED( hr ) )
3895 coInitialized_ = true;
3896
3897 // Instantiate device enumerator
3898 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3899 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3900 ( void** ) &deviceEnumerator_ );
3901
3902 if ( FAILED( hr ) ) {
3903 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3904 error( RtAudioError::DRIVER_ERROR );
3905 }
3906 }
3907
3908 //-----------------------------------------------------------------------------
3909
~RtApiWasapi()3910 RtApiWasapi::~RtApiWasapi()
3911 {
3912 if ( stream_.state != STREAM_CLOSED )
3913 closeStream();
3914
3915 SAFE_RELEASE( deviceEnumerator_ );
3916
3917 // If this object previously called CoInitialize()
3918 if ( coInitialized_ )
3919 CoUninitialize();
3920 }
3921
3922 //=============================================================================
3923
getDeviceCount(void)3924 unsigned int RtApiWasapi::getDeviceCount( void )
3925 {
3926 unsigned int captureDeviceCount = 0;
3927 unsigned int renderDeviceCount = 0;
3928
3929 IMMDeviceCollection* captureDevices = NULL;
3930 IMMDeviceCollection* renderDevices = NULL;
3931
3932 // Count capture devices
3933 errorText_.clear();
3934 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3935 if ( FAILED( hr ) ) {
3936 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3937 goto Exit;
3938 }
3939
3940 hr = captureDevices->GetCount( &captureDeviceCount );
3941 if ( FAILED( hr ) ) {
3942 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3943 goto Exit;
3944 }
3945
3946 // Count render devices
3947 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3948 if ( FAILED( hr ) ) {
3949 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3950 goto Exit;
3951 }
3952
3953 hr = renderDevices->GetCount( &renderDeviceCount );
3954 if ( FAILED( hr ) ) {
3955 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3956 goto Exit;
3957 }
3958
3959 Exit:
3960 // release all references
3961 SAFE_RELEASE( captureDevices );
3962 SAFE_RELEASE( renderDevices );
3963
3964 if ( errorText_.empty() )
3965 return captureDeviceCount + renderDeviceCount;
3966
3967 error( RtAudioError::DRIVER_ERROR );
3968 return 0;
3969 }
3970
3971 //-----------------------------------------------------------------------------
3972
getDeviceInfo(unsigned int device)3973 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3974 {
3975 RtAudio::DeviceInfo info;
3976 unsigned int captureDeviceCount = 0;
3977 unsigned int renderDeviceCount = 0;
3978 std::string defaultDeviceName;
3979 bool isCaptureDevice = false;
3980
3981 PROPVARIANT deviceNameProp;
3982 PROPVARIANT defaultDeviceNameProp;
3983
3984 IMMDeviceCollection* captureDevices = NULL;
3985 IMMDeviceCollection* renderDevices = NULL;
3986 IMMDevice* devicePtr = NULL;
3987 IMMDevice* defaultDevicePtr = NULL;
3988 IAudioClient* audioClient = NULL;
3989 IPropertyStore* devicePropStore = NULL;
3990 IPropertyStore* defaultDevicePropStore = NULL;
3991
3992 WAVEFORMATEX* deviceFormat = NULL;
3993 WAVEFORMATEX* closestMatchFormat = NULL;
3994
3995 // probed
3996 info.probed = false;
3997
3998 // Count capture devices
3999 errorText_.clear();
4000 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4001 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4002 if ( FAILED( hr ) ) {
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4004 goto Exit;
4005 }
4006
4007 hr = captureDevices->GetCount( &captureDeviceCount );
4008 if ( FAILED( hr ) ) {
4009 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4010 goto Exit;
4011 }
4012
4013 // Count render devices
4014 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4015 if ( FAILED( hr ) ) {
4016 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4017 goto Exit;
4018 }
4019
4020 hr = renderDevices->GetCount( &renderDeviceCount );
4021 if ( FAILED( hr ) ) {
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4023 goto Exit;
4024 }
4025
4026 // validate device index
4027 if ( device >= captureDeviceCount + renderDeviceCount ) {
4028 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4029 errorType = RtAudioError::INVALID_USE;
4030 goto Exit;
4031 }
4032
4033 // determine whether index falls within capture or render devices
4034 if ( device >= renderDeviceCount ) {
4035 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4036 if ( FAILED( hr ) ) {
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4038 goto Exit;
4039 }
4040 isCaptureDevice = true;
4041 }
4042 else {
4043 hr = renderDevices->Item( device, &devicePtr );
4044 if ( FAILED( hr ) ) {
4045 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4046 goto Exit;
4047 }
4048 isCaptureDevice = false;
4049 }
4050
4051 // get default device name
4052 if ( isCaptureDevice ) {
4053 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4054 if ( FAILED( hr ) ) {
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4056 goto Exit;
4057 }
4058 }
4059 else {
4060 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4061 if ( FAILED( hr ) ) {
4062 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4063 goto Exit;
4064 }
4065 }
4066
4067 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4068 if ( FAILED( hr ) ) {
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4070 goto Exit;
4071 }
4072 PropVariantInit( &defaultDeviceNameProp );
4073
4074 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4075 if ( FAILED( hr ) ) {
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4077 goto Exit;
4078 }
4079
4080 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4081
4082 // name
4083 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4084 if ( FAILED( hr ) ) {
4085 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4086 goto Exit;
4087 }
4088
4089 PropVariantInit( &deviceNameProp );
4090
4091 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4094 goto Exit;
4095 }
4096
4097 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4098
4099 // is default
4100 if ( isCaptureDevice ) {
4101 info.isDefaultInput = info.name == defaultDeviceName;
4102 info.isDefaultOutput = false;
4103 }
4104 else {
4105 info.isDefaultInput = false;
4106 info.isDefaultOutput = info.name == defaultDeviceName;
4107 }
4108
4109 // channel count
4110 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4111 if ( FAILED( hr ) ) {
4112 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4113 goto Exit;
4114 }
4115
4116 hr = audioClient->GetMixFormat( &deviceFormat );
4117 if ( FAILED( hr ) ) {
4118 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4119 goto Exit;
4120 }
4121
4122 if ( isCaptureDevice ) {
4123 info.inputChannels = deviceFormat->nChannels;
4124 info.outputChannels = 0;
4125 info.duplexChannels = 0;
4126 }
4127 else {
4128 info.inputChannels = 0;
4129 info.outputChannels = deviceFormat->nChannels;
4130 info.duplexChannels = 0;
4131 }
4132
4133 // sample rates (WASAPI only supports the one native sample rate)
4134 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4135
4136 info.sampleRates.clear();
4137 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4138
4139 // native format
4140 info.nativeFormats = 0;
4141
4142 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4143 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4144 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4145 {
4146 if ( deviceFormat->wBitsPerSample == 32 ) {
4147 info.nativeFormats |= RTAUDIO_FLOAT32;
4148 }
4149 else if ( deviceFormat->wBitsPerSample == 64 ) {
4150 info.nativeFormats |= RTAUDIO_FLOAT64;
4151 }
4152 }
4153 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4154 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4155 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4156 {
4157 if ( deviceFormat->wBitsPerSample == 8 ) {
4158 info.nativeFormats |= RTAUDIO_SINT8;
4159 }
4160 else if ( deviceFormat->wBitsPerSample == 16 ) {
4161 info.nativeFormats |= RTAUDIO_SINT16;
4162 }
4163 else if ( deviceFormat->wBitsPerSample == 24 ) {
4164 info.nativeFormats |= RTAUDIO_SINT24;
4165 }
4166 else if ( deviceFormat->wBitsPerSample == 32 ) {
4167 info.nativeFormats |= RTAUDIO_SINT32;
4168 }
4169 }
4170
4171 // probed
4172 info.probed = true;
4173
4174 Exit:
4175 // release all references
4176 PropVariantClear( &deviceNameProp );
4177 PropVariantClear( &defaultDeviceNameProp );
4178
4179 SAFE_RELEASE( captureDevices );
4180 SAFE_RELEASE( renderDevices );
4181 SAFE_RELEASE( devicePtr );
4182 SAFE_RELEASE( defaultDevicePtr );
4183 SAFE_RELEASE( audioClient );
4184 SAFE_RELEASE( devicePropStore );
4185 SAFE_RELEASE( defaultDevicePropStore );
4186
4187 CoTaskMemFree( deviceFormat );
4188 CoTaskMemFree( closestMatchFormat );
4189
4190 if ( !errorText_.empty() )
4191 error( errorType );
4192 return info;
4193 }
4194
4195 //-----------------------------------------------------------------------------
4196
getDefaultOutputDevice(void)4197 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4198 {
4199 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4200 if ( getDeviceInfo( i ).isDefaultOutput ) {
4201 return i;
4202 }
4203 }
4204
4205 return 0;
4206 }
4207
4208 //-----------------------------------------------------------------------------
4209
getDefaultInputDevice(void)4210 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4211 {
4212 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4213 if ( getDeviceInfo( i ).isDefaultInput ) {
4214 return i;
4215 }
4216 }
4217
4218 return 0;
4219 }
4220
4221 //-----------------------------------------------------------------------------
4222
closeStream(void)4223 void RtApiWasapi::closeStream( void )
4224 {
4225 if ( stream_.state == STREAM_CLOSED ) {
4226 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4227 error( RtAudioError::WARNING );
4228 return;
4229 }
4230
4231 if ( stream_.state != STREAM_STOPPED )
4232 stopStream();
4233
4234 // clean up stream memory
4235 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4236 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4237
4238 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4239 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4240
4241 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4242 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4243
4244 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4245 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4246
4247 delete ( WasapiHandle* ) stream_.apiHandle;
4248 stream_.apiHandle = NULL;
4249
4250 for ( int i = 0; i < 2; i++ ) {
4251 if ( stream_.userBuffer[i] ) {
4252 free( stream_.userBuffer[i] );
4253 stream_.userBuffer[i] = 0;
4254 }
4255 }
4256
4257 if ( stream_.deviceBuffer ) {
4258 free( stream_.deviceBuffer );
4259 stream_.deviceBuffer = 0;
4260 }
4261
4262 // update stream state
4263 stream_.state = STREAM_CLOSED;
4264 }
4265
4266 //-----------------------------------------------------------------------------
4267
startStream(void)4268 void RtApiWasapi::startStream( void )
4269 {
4270 verifyStream();
4271
4272 if ( stream_.state == STREAM_RUNNING ) {
4273 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4274 error( RtAudioError::WARNING );
4275 return;
4276 }
4277
4278 // update stream state
4279 stream_.state = STREAM_RUNNING;
4280
4281 // create WASAPI stream thread
4282 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4283
4284 if ( !stream_.callbackInfo.thread ) {
4285 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4286 error( RtAudioError::THREAD_ERROR );
4287 }
4288 else {
4289 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4290 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4291 }
4292 }
4293
4294 //-----------------------------------------------------------------------------
4295
stopStream(void)4296 void RtApiWasapi::stopStream( void )
4297 {
4298 verifyStream();
4299
4300 if ( stream_.state == STREAM_STOPPED ) {
4301 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4302 error( RtAudioError::WARNING );
4303 return;
4304 }
4305
4306 // inform stream thread by setting stream state to STREAM_STOPPING
4307 stream_.state = STREAM_STOPPING;
4308
4309 // wait until stream thread is stopped
4310 while( stream_.state != STREAM_STOPPED ) {
4311 Sleep( 1 );
4312 }
4313
4314 // Wait for the last buffer to play before stopping.
4315 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4316
4317 // stop capture client if applicable
4318 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4319 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4320 if ( FAILED( hr ) ) {
4321 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4322 error( RtAudioError::DRIVER_ERROR );
4323 return;
4324 }
4325 }
4326
4327 // stop render client if applicable
4328 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4329 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4330 if ( FAILED( hr ) ) {
4331 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4332 error( RtAudioError::DRIVER_ERROR );
4333 return;
4334 }
4335 }
4336
4337 // close thread handle
4338 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4339 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4340 error( RtAudioError::THREAD_ERROR );
4341 return;
4342 }
4343
4344 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4345 }
4346
4347 //-----------------------------------------------------------------------------
4348
abortStream(void)4349 void RtApiWasapi::abortStream( void )
4350 {
4351 verifyStream();
4352
4353 if ( stream_.state == STREAM_STOPPED ) {
4354 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4355 error( RtAudioError::WARNING );
4356 return;
4357 }
4358
4359 // inform stream thread by setting stream state to STREAM_STOPPING
4360 stream_.state = STREAM_STOPPING;
4361
4362 // wait until stream thread is stopped
4363 while ( stream_.state != STREAM_STOPPED ) {
4364 Sleep( 1 );
4365 }
4366
4367 // stop capture client if applicable
4368 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4369 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4372 error( RtAudioError::DRIVER_ERROR );
4373 return;
4374 }
4375 }
4376
4377 // stop render client if applicable
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4379 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4382 error( RtAudioError::DRIVER_ERROR );
4383 return;
4384 }
4385 }
4386
4387 // close thread handle
4388 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4389 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4390 error( RtAudioError::THREAD_ERROR );
4391 return;
4392 }
4393
4394 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4395 }
4396
4397 //-----------------------------------------------------------------------------
4398
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4399 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4400 unsigned int firstChannel, unsigned int sampleRate,
4401 RtAudioFormat format, unsigned int* bufferSize,
4402 RtAudio::StreamOptions* options )
4403 {
4404 bool methodResult = FAILURE;
4405 unsigned int captureDeviceCount = 0;
4406 unsigned int renderDeviceCount = 0;
4407
4408 IMMDeviceCollection* captureDevices = NULL;
4409 IMMDeviceCollection* renderDevices = NULL;
4410 IMMDevice* devicePtr = NULL;
4411 WAVEFORMATEX* deviceFormat = NULL;
4412 unsigned int bufferBytes;
4413 stream_.state = STREAM_STOPPED;
4414 RtAudio::DeviceInfo deviceInfo;
4415
4416 // create API Handle if not already created
4417 if ( !stream_.apiHandle )
4418 stream_.apiHandle = ( void* ) new WasapiHandle();
4419
4420 // Count capture devices
4421 errorText_.clear();
4422 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4423 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4424 if ( FAILED( hr ) ) {
4425 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4426 goto Exit;
4427 }
4428
4429 hr = captureDevices->GetCount( &captureDeviceCount );
4430 if ( FAILED( hr ) ) {
4431 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4432 goto Exit;
4433 }
4434
4435 // Count render devices
4436 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4439 goto Exit;
4440 }
4441
4442 hr = renderDevices->GetCount( &renderDeviceCount );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4445 goto Exit;
4446 }
4447
4448 // validate device index
4449 if ( device >= captureDeviceCount + renderDeviceCount ) {
4450 errorType = RtAudioError::INVALID_USE;
4451 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4452 goto Exit;
4453 }
4454
4455 deviceInfo = getDeviceInfo( device );
4456
4457 // validate sample rate
4458 if ( sampleRate != deviceInfo.preferredSampleRate )
4459 {
4460 errorType = RtAudioError::INVALID_USE;
4461 std::stringstream ss;
4462 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4463 << "Hz sample rate not supported. This device only supports "
4464 << deviceInfo.preferredSampleRate << "Hz.";
4465 errorText_ = ss.str();
4466 goto Exit;
4467 }
4468
4469 // determine whether index falls within capture or render devices
4470 if ( device >= renderDeviceCount ) {
4471 if ( mode != INPUT ) {
4472 errorType = RtAudioError::INVALID_USE;
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4474 goto Exit;
4475 }
4476
4477 // retrieve captureAudioClient from devicePtr
4478 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4479
4480 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4481 if ( FAILED( hr ) ) {
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4483 goto Exit;
4484 }
4485
4486 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4487 NULL, ( void** ) &captureAudioClient );
4488 if ( FAILED( hr ) ) {
4489 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4490 goto Exit;
4491 }
4492
4493 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4494 if ( FAILED( hr ) ) {
4495 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4496 goto Exit;
4497 }
4498
4499 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4500 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4501 }
4502 else {
4503 if ( mode != OUTPUT ) {
4504 errorType = RtAudioError::INVALID_USE;
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4506 goto Exit;
4507 }
4508
4509 // retrieve renderAudioClient from devicePtr
4510 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4511
4512 hr = renderDevices->Item( device, &devicePtr );
4513 if ( FAILED( hr ) ) {
4514 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4515 goto Exit;
4516 }
4517
4518 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4519 NULL, ( void** ) &renderAudioClient );
4520 if ( FAILED( hr ) ) {
4521 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4522 goto Exit;
4523 }
4524
4525 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4526 if ( FAILED( hr ) ) {
4527 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4528 goto Exit;
4529 }
4530
4531 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4532 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4533 }
4534
4535 // fill stream data
4536 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4537 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4538 stream_.mode = DUPLEX;
4539 }
4540 else {
4541 stream_.mode = mode;
4542 }
4543
4544 stream_.device[mode] = device;
4545 stream_.doByteSwap[mode] = false;
4546 stream_.sampleRate = sampleRate;
4547 stream_.bufferSize = *bufferSize;
4548 stream_.nBuffers = 1;
4549 stream_.nUserChannels[mode] = channels;
4550 stream_.channelOffset[mode] = firstChannel;
4551 stream_.userFormat = format;
4552 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4553
4554 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4555 stream_.userInterleaved = false;
4556 else
4557 stream_.userInterleaved = true;
4558 stream_.deviceInterleaved[mode] = true;
4559
4560 // Set flags for buffer conversion.
4561 stream_.doConvertBuffer[mode] = false;
4562 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4563 stream_.nUserChannels != stream_.nDeviceChannels )
4564 stream_.doConvertBuffer[mode] = true;
4565 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4566 stream_.nUserChannels[mode] > 1 )
4567 stream_.doConvertBuffer[mode] = true;
4568
4569 if ( stream_.doConvertBuffer[mode] )
4570 setConvertInfo( mode, 0 );
4571
4572 // Allocate necessary internal buffers
4573 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4574
4575 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4576 if ( !stream_.userBuffer[mode] ) {
4577 errorType = RtAudioError::MEMORY_ERROR;
4578 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4579 goto Exit;
4580 }
4581
4582 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4583 stream_.callbackInfo.priority = 15;
4584 else
4585 stream_.callbackInfo.priority = 0;
4586
4587 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4588 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4589
4590 methodResult = SUCCESS;
4591
4592 Exit:
4593 //clean up
4594 SAFE_RELEASE( captureDevices );
4595 SAFE_RELEASE( renderDevices );
4596 SAFE_RELEASE( devicePtr );
4597 CoTaskMemFree( deviceFormat );
4598
4599 // if method failed, close the stream
4600 if ( methodResult == FAILURE )
4601 closeStream();
4602
4603 if ( !errorText_.empty() )
4604 error( errorType );
4605 return methodResult;
4606 }
4607
4608 //=============================================================================
4609
runWasapiThread(void * wasapiPtr)4610 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4611 {
4612 if ( wasapiPtr )
4613 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4614
4615 return 0;
4616 }
4617
stopWasapiThread(void * wasapiPtr)4618 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4619 {
4620 if ( wasapiPtr )
4621 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4622
4623 return 0;
4624 }
4625
abortWasapiThread(void * wasapiPtr)4626 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4627 {
4628 if ( wasapiPtr )
4629 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4630
4631 return 0;
4632 }
4633
4634 //-----------------------------------------------------------------------------
4635
wasapiThread()4636 void RtApiWasapi::wasapiThread()
4637 {
4638 // as this is a new thread, we must CoInitialize it
4639 CoInitialize( NULL );
4640
4641 HRESULT hr;
4642
4643 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4644 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4645 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4646 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4647 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4648 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4649
4650 WAVEFORMATEX* captureFormat = NULL;
4651 WAVEFORMATEX* renderFormat = NULL;
4652 WasapiBuffer captureBuffer;
4653 WasapiBuffer renderBuffer;
4654
4655 // declare local stream variables
4656 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4657 BYTE* streamBuffer = NULL;
4658 unsigned long captureFlags = 0;
4659 unsigned int bufferFrameCount = 0;
4660 unsigned int numFramesPadding = 0;
4661 bool callbackPushed = false;
4662 bool callbackPulled = false;
4663 bool callbackStopped = false;
4664 int callbackResult = 0;
4665
4666 unsigned int deviceBuffSize = 0;
4667
4668 errorText_.clear();
4669 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4670
4671 // Attempt to assign "Pro Audio" characteristic to thread
4672 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4673 if ( AvrtDll ) {
4674 DWORD taskIndex = 0;
4675 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4676 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4677 FreeLibrary( AvrtDll );
4678 }
4679
4680 // start capture stream if applicable
4681 if ( captureAudioClient ) {
4682 hr = captureAudioClient->GetMixFormat( &captureFormat );
4683 if ( FAILED( hr ) ) {
4684 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4685 goto Exit;
4686 }
4687
4688 // initialize capture stream according to desire buffer size
4689 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4690
4691 if ( !captureClient ) {
4692 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4693 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4694 desiredBufferPeriod,
4695 desiredBufferPeriod,
4696 captureFormat,
4697 NULL );
4698 if ( FAILED( hr ) ) {
4699 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4700 goto Exit;
4701 }
4702
4703 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4704 ( void** ) &captureClient );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4707 goto Exit;
4708 }
4709
4710 // configure captureEvent to trigger on every available capture buffer
4711 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4712 if ( !captureEvent ) {
4713 errorType = RtAudioError::SYSTEM_ERROR;
4714 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4715 goto Exit;
4716 }
4717
4718 hr = captureAudioClient->SetEventHandle( captureEvent );
4719 if ( FAILED( hr ) ) {
4720 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4721 goto Exit;
4722 }
4723
4724 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4725 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4726 }
4727
4728 unsigned int inBufferSize = 0;
4729 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4730 if ( FAILED( hr ) ) {
4731 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4732 goto Exit;
4733 }
4734
4735 // scale outBufferSize according to stream->user sample rate ratio
4736 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4737 inBufferSize *= stream_.nDeviceChannels[INPUT];
4738
4739 // set captureBuffer size
4740 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4741
4742 // reset the capture stream
4743 hr = captureAudioClient->Reset();
4744 if ( FAILED( hr ) ) {
4745 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4746 goto Exit;
4747 }
4748
4749 // start the capture stream
4750 hr = captureAudioClient->Start();
4751 if ( FAILED( hr ) ) {
4752 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4753 goto Exit;
4754 }
4755 }
4756
4757 // start render stream if applicable
4758 if ( renderAudioClient ) {
4759 hr = renderAudioClient->GetMixFormat( &renderFormat );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4762 goto Exit;
4763 }
4764
4765 // initialize render stream according to desire buffer size
4766 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4767
4768 if ( !renderClient ) {
4769 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4770 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4771 desiredBufferPeriod,
4772 desiredBufferPeriod,
4773 renderFormat,
4774 NULL );
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4777 goto Exit;
4778 }
4779
4780 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4781 ( void** ) &renderClient );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4784 goto Exit;
4785 }
4786
4787 // configure renderEvent to trigger on every available render buffer
4788 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4789 if ( !renderEvent ) {
4790 errorType = RtAudioError::SYSTEM_ERROR;
4791 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4792 goto Exit;
4793 }
4794
4795 hr = renderAudioClient->SetEventHandle( renderEvent );
4796 if ( FAILED( hr ) ) {
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4798 goto Exit;
4799 }
4800
4801 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4802 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4803 }
4804
4805 unsigned int outBufferSize = 0;
4806 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4809 goto Exit;
4810 }
4811
4812 // scale inBufferSize according to user->stream sample rate ratio
4813 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4814 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4815
4816 // set renderBuffer size
4817 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4818
4819 // reset the render stream
4820 hr = renderAudioClient->Reset();
4821 if ( FAILED( hr ) ) {
4822 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4823 goto Exit;
4824 }
4825
4826 // start the render stream
4827 hr = renderAudioClient->Start();
4828 if ( FAILED( hr ) ) {
4829 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4830 goto Exit;
4831 }
4832 }
4833
4834 if ( stream_.mode == INPUT ) {
4835 using namespace std; // for roundf
4836 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4837 }
4838 else if ( stream_.mode == OUTPUT ) {
4839 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4840 }
4841 else if ( stream_.mode == DUPLEX ) {
4842 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4843 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4844 }
4845
4846 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4847 if ( !stream_.deviceBuffer ) {
4848 errorType = RtAudioError::MEMORY_ERROR;
4849 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4850 goto Exit;
4851 }
4852
4853 // stream process loop
4854 while ( stream_.state != STREAM_STOPPING ) {
4855 if ( !callbackPulled ) {
4856 // Callback Input
4857 // ==============
4858 // 1. Pull callback buffer from inputBuffer
4859 // 2. If 1. was successful: Convert callback buffer to user format
4860
4861 if ( captureAudioClient ) {
4862 // Pull callback buffer from inputBuffer
4863 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4864 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4865 stream_.deviceFormat[INPUT] );
4866
4867 if ( callbackPulled ) {
4868 if ( stream_.doConvertBuffer[INPUT] ) {
4869 // Convert callback buffer to user format
4870 convertBuffer( stream_.userBuffer[INPUT],
4871 stream_.deviceBuffer,
4872 stream_.convertInfo[INPUT] );
4873 }
4874 else {
4875 // no further conversion, simple copy deviceBuffer to userBuffer
4876 memcpy( stream_.userBuffer[INPUT],
4877 stream_.deviceBuffer,
4878 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4879 }
4880 }
4881 }
4882 else {
4883 // if there is no capture stream, set callbackPulled flag
4884 callbackPulled = true;
4885 }
4886
4887 // Execute Callback
4888 // ================
4889 // 1. Execute user callback method
4890 // 2. Handle return value from callback
4891
4892 // if callback has not requested the stream to stop
4893 if ( callbackPulled && !callbackStopped ) {
4894 // Execute user callback method
4895 callbackResult = callback( stream_.userBuffer[OUTPUT],
4896 stream_.userBuffer[INPUT],
4897 stream_.bufferSize,
4898 getStreamTime(),
4899 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4900 stream_.callbackInfo.userData );
4901
4902 // Handle return value from callback
4903 if ( callbackResult == 1 ) {
4904 // instantiate a thread to stop this thread
4905 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4906 if ( !threadHandle ) {
4907 errorType = RtAudioError::THREAD_ERROR;
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4909 goto Exit;
4910 }
4911 else if ( !CloseHandle( threadHandle ) ) {
4912 errorType = RtAudioError::THREAD_ERROR;
4913 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4914 goto Exit;
4915 }
4916
4917 callbackStopped = true;
4918 }
4919 else if ( callbackResult == 2 ) {
4920 // instantiate a thread to stop this thread
4921 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4922 if ( !threadHandle ) {
4923 errorType = RtAudioError::THREAD_ERROR;
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4925 goto Exit;
4926 }
4927 else if ( !CloseHandle( threadHandle ) ) {
4928 errorType = RtAudioError::THREAD_ERROR;
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4930 goto Exit;
4931 }
4932
4933 callbackStopped = true;
4934 }
4935 }
4936 }
4937
4938 // Callback Output
4939 // ===============
4940 // 1. Convert callback buffer to stream format
4941 // 2. Push callback buffer into outputBuffer
4942
4943 if ( renderAudioClient && callbackPulled ) {
4944 if ( stream_.doConvertBuffer[OUTPUT] ) {
4945 // Convert callback buffer to stream format
4946 convertBuffer( stream_.deviceBuffer,
4947 stream_.userBuffer[OUTPUT],
4948 stream_.convertInfo[OUTPUT] );
4949
4950 }
4951
4952 // Push callback buffer into outputBuffer
4953 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
4954 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
4955 stream_.deviceFormat[OUTPUT] );
4956 }
4957 else {
4958 // if there is no render stream, set callbackPushed flag
4959 callbackPushed = true;
4960 }
4961
4962 // Stream Capture
4963 // ==============
4964 // 1. Get capture buffer from stream
4965 // 2. Push capture buffer into inputBuffer
4966 // 3. If 2. was successful: Release capture buffer
4967
4968 if ( captureAudioClient ) {
4969 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4970 if ( !callbackPulled ) {
4971 WaitForSingleObject( captureEvent, INFINITE );
4972 }
4973
4974 // Get capture buffer from stream
4975 hr = captureClient->GetBuffer( &streamBuffer,
4976 &bufferFrameCount,
4977 &captureFlags, NULL, NULL );
4978 if ( FAILED( hr ) ) {
4979 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4980 goto Exit;
4981 }
4982
4983 if ( bufferFrameCount != 0 ) {
4984 // Push capture buffer into inputBuffer
4985 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4986 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4987 stream_.deviceFormat[INPUT] ) )
4988 {
4989 // Release capture buffer
4990 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4991 if ( FAILED( hr ) ) {
4992 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4993 goto Exit;
4994 }
4995 }
4996 else
4997 {
4998 // Inform WASAPI that capture was unsuccessful
4999 hr = captureClient->ReleaseBuffer( 0 );
5000 if ( FAILED( hr ) ) {
5001 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5002 goto Exit;
5003 }
5004 }
5005 }
5006 else
5007 {
5008 // Inform WASAPI that capture was unsuccessful
5009 hr = captureClient->ReleaseBuffer( 0 );
5010 if ( FAILED( hr ) ) {
5011 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5012 goto Exit;
5013 }
5014 }
5015 }
5016
5017 // Stream Render
5018 // =============
5019 // 1. Get render buffer from stream
5020 // 2. Pull next buffer from outputBuffer
5021 // 3. If 2. was successful: Fill render buffer with next buffer
5022 // Release render buffer
5023
5024 if ( renderAudioClient ) {
5025 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5026 if ( callbackPulled && !callbackPushed ) {
5027 WaitForSingleObject( renderEvent, INFINITE );
5028 }
5029
5030 // Get render buffer from stream
5031 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5032 if ( FAILED( hr ) ) {
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5034 goto Exit;
5035 }
5036
5037 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5038 if ( FAILED( hr ) ) {
5039 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5040 goto Exit;
5041 }
5042
5043 bufferFrameCount -= numFramesPadding;
5044
5045 if ( bufferFrameCount != 0 ) {
5046 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5047 if ( FAILED( hr ) ) {
5048 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5049 goto Exit;
5050 }
5051
5052 // Pull next buffer from outputBuffer
5053 // Fill render buffer with next buffer
5054 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5055 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5056 stream_.deviceFormat[OUTPUT] ) )
5057 {
5058 // Release render buffer
5059 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5060 if ( FAILED( hr ) ) {
5061 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5062 goto Exit;
5063 }
5064 }
5065 else
5066 {
5067 // Inform WASAPI that render was unsuccessful
5068 hr = renderClient->ReleaseBuffer( 0, 0 );
5069 if ( FAILED( hr ) ) {
5070 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5071 goto Exit;
5072 }
5073 }
5074 }
5075 else
5076 {
5077 // Inform WASAPI that render was unsuccessful
5078 hr = renderClient->ReleaseBuffer( 0, 0 );
5079 if ( FAILED( hr ) ) {
5080 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5081 goto Exit;
5082 }
5083 }
5084 }
5085
5086 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5087 if ( callbackPushed ) {
5088 callbackPulled = false;
5089 // tick stream time
5090 RtApi::tickStreamTime();
5091 }
5092
5093 }
5094
5095 Exit:
5096 // clean up
5097 CoTaskMemFree( captureFormat );
5098 CoTaskMemFree( renderFormat );
5099
5100 CoUninitialize();
5101
5102 // update stream state
5103 stream_.state = STREAM_STOPPED;
5104
5105 if ( errorText_.empty() )
5106 return;
5107 else
5108 error( errorType );
5109 }
5110
5111 //******************** End of __WINDOWS_WASAPI__ *********************//
5112 #endif
5113
5114
5115 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5116
5117 // Modified by Robin Davies, October 2005
5118 // - Improvements to DirectX pointer chasing.
5119 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5120 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5121 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5122 // Changed device query structure for RtAudio 4.0.7, January 2010
5123
5124 #include <mmsystem.h>
5125 #include <mmreg.h>
5126 #include <dsound.h>
5127 #include <assert.h>
5128 #include <algorithm>
5129
5130 #if defined(__MINGW32__)
5131 // missing from latest mingw winapi
5132 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5133 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5134 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5135 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5136 #endif
5137
5138 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5139
5140 #ifdef _MSC_VER // if Microsoft Visual C++
5141 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5142 #endif
5143
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5144 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5145 {
5146 if ( pointer > bufferSize ) pointer -= bufferSize;
5147 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5148 if ( pointer < earlierPointer ) pointer += bufferSize;
5149 return pointer >= earlierPointer && pointer < laterPointer;
5150 }
5151
5152 // A structure to hold various information related to the DirectSound
5153 // API implementation.
5154 struct DsHandle {
5155 unsigned int drainCounter; // Tracks callback counts when draining
5156 bool internalDrain; // Indicates if stop is initiated from callback or not.
5157 void *id[2];
5158 void *buffer[2];
5159 bool xrun[2];
5160 UINT bufferPointer[2];
5161 DWORD dsBufferSize[2];
5162 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5163 HANDLE condition;
5164
DsHandleDsHandle5165 DsHandle()
5166 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5167 };
5168
5169 // Declarations for utility functions, callbacks, and structures
5170 // specific to the DirectSound implementation.
5171 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5172 LPCTSTR description,
5173 LPCTSTR module,
5174 LPVOID lpContext );
5175
5176 static const char* getErrorString( int code );
5177
5178 static unsigned __stdcall callbackHandler( void *ptr );
5179
5180 struct DsDevice {
5181 LPGUID id[2];
5182 bool validId[2];
5183 bool found;
5184 std::string name;
5185
DsDeviceDsDevice5186 DsDevice()
5187 : found(false) { validId[0] = false; validId[1] = false; }
5188 };
5189
5190 struct DsProbeData {
5191 bool isInput;
5192 std::vector<struct DsDevice>* dsDevices;
5193 };
5194
RtApiDs()5195 RtApiDs :: RtApiDs()
5196 {
5197 // Dsound will run both-threaded. If CoInitialize fails, then just
5198 // accept whatever the mainline chose for a threading model.
5199 coInitialized_ = false;
5200 HRESULT hr = CoInitialize( NULL );
5201 if ( !FAILED( hr ) ) coInitialized_ = true;
5202 }
5203
~RtApiDs()5204 RtApiDs :: ~RtApiDs()
5205 {
5206 if ( stream_.state != STREAM_CLOSED ) closeStream();
5207 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5208 }
5209
5210 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5211 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5212 {
5213 return 0;
5214 }
5215
5216 // The DirectSound default input is always the first input device,
5217 // which is the first capture device enumerated.
getDefaultInputDevice(void)5218 unsigned int RtApiDs :: getDefaultInputDevice( void )
5219 {
5220 return 0;
5221 }
5222
getDeviceCount(void)5223 unsigned int RtApiDs :: getDeviceCount( void )
5224 {
5225 // Set query flag for previously found devices to false, so that we
5226 // can check for any devices that have disappeared.
5227 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5228 dsDevices[i].found = false;
5229
5230 // Query DirectSound devices.
5231 struct DsProbeData probeInfo;
5232 probeInfo.isInput = false;
5233 probeInfo.dsDevices = &dsDevices;
5234 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5235 if ( FAILED( result ) ) {
5236 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5237 errorText_ = errorStream_.str();
5238 error( RtAudioError::WARNING );
5239 }
5240
5241 // Query DirectSoundCapture devices.
5242 probeInfo.isInput = true;
5243 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5244 if ( FAILED( result ) ) {
5245 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5246 errorText_ = errorStream_.str();
5247 error( RtAudioError::WARNING );
5248 }
5249
5250 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5251 for ( unsigned int i=0; i<dsDevices.size(); ) {
5252 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5253 else i++;
5254 }
5255
5256 return static_cast<unsigned int>(dsDevices.size());
5257 }
5258
getDeviceInfo(unsigned int device)5259 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5260 {
5261 RtAudio::DeviceInfo info;
5262 info.probed = false;
5263
5264 if ( dsDevices.size() == 0 ) {
5265 // Force a query of all devices
5266 getDeviceCount();
5267 if ( dsDevices.size() == 0 ) {
5268 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5269 error( RtAudioError::INVALID_USE );
5270 return info;
5271 }
5272 }
5273
5274 if ( device >= dsDevices.size() ) {
5275 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5276 error( RtAudioError::INVALID_USE );
5277 return info;
5278 }
5279
5280 HRESULT result;
5281 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5282
5283 LPDIRECTSOUND output;
5284 DSCAPS outCaps;
5285 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5286 if ( FAILED( result ) ) {
5287 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5288 errorText_ = errorStream_.str();
5289 error( RtAudioError::WARNING );
5290 goto probeInput;
5291 }
5292
5293 outCaps.dwSize = sizeof( outCaps );
5294 result = output->GetCaps( &outCaps );
5295 if ( FAILED( result ) ) {
5296 output->Release();
5297 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5298 errorText_ = errorStream_.str();
5299 error( RtAudioError::WARNING );
5300 goto probeInput;
5301 }
5302
5303 // Get output channel information.
5304 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5305
5306 // Get sample rate information.
5307 info.sampleRates.clear();
5308 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5309 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5310 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5311 info.sampleRates.push_back( SAMPLE_RATES[k] );
5312
5313 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5314 info.preferredSampleRate = SAMPLE_RATES[k];
5315 }
5316 }
5317
5318 // Get format information.
5319 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5320 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5321
5322 output->Release();
5323
5324 if ( getDefaultOutputDevice() == device )
5325 info.isDefaultOutput = true;
5326
5327 if ( dsDevices[ device ].validId[1] == false ) {
5328 info.name = dsDevices[ device ].name;
5329 info.probed = true;
5330 return info;
5331 }
5332
5333 probeInput:
5334
5335 LPDIRECTSOUNDCAPTURE input;
5336 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5337 if ( FAILED( result ) ) {
5338 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5339 errorText_ = errorStream_.str();
5340 error( RtAudioError::WARNING );
5341 return info;
5342 }
5343
5344 DSCCAPS inCaps;
5345 inCaps.dwSize = sizeof( inCaps );
5346 result = input->GetCaps( &inCaps );
5347 if ( FAILED( result ) ) {
5348 input->Release();
5349 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5350 errorText_ = errorStream_.str();
5351 error( RtAudioError::WARNING );
5352 return info;
5353 }
5354
5355 // Get input channel information.
5356 info.inputChannels = inCaps.dwChannels;
5357
5358 // Get sample rate and format information.
5359 std::vector<unsigned int> rates;
5360 if ( inCaps.dwChannels >= 2 ) {
5361 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5362 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5363 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5364 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5365 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5366 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5367 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5368 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5369
5370 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5371 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5372 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5373 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5374 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5375 }
5376 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5377 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5378 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5379 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5380 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5381 }
5382 }
5383 else if ( inCaps.dwChannels == 1 ) {
5384 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5385 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5386 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5387 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5388 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5389 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5390 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5391 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5392
5393 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5394 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5395 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5396 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5397 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5398 }
5399 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5400 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5401 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5402 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5403 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5404 }
5405 }
5406 else info.inputChannels = 0; // technically, this would be an error
5407
5408 input->Release();
5409
5410 if ( info.inputChannels == 0 ) return info;
5411
5412 // Copy the supported rates to the info structure but avoid duplication.
5413 bool found;
5414 for ( unsigned int i=0; i<rates.size(); i++ ) {
5415 found = false;
5416 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5417 if ( rates[i] == info.sampleRates[j] ) {
5418 found = true;
5419 break;
5420 }
5421 }
5422 if ( found == false ) info.sampleRates.push_back( rates[i] );
5423 }
5424 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5425
5426 // If device opens for both playback and capture, we determine the channels.
5427 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5428 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5429
5430 if ( device == 0 ) info.isDefaultInput = true;
5431
5432 // Copy name and return.
5433 info.name = dsDevices[ device ].name;
5434 info.probed = true;
5435 return info;
5436 }
5437
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5438 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5439 unsigned int firstChannel, unsigned int sampleRate,
5440 RtAudioFormat format, unsigned int *bufferSize,
5441 RtAudio::StreamOptions *options )
5442 {
5443 if ( channels + firstChannel > 2 ) {
5444 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5445 return FAILURE;
5446 }
5447
5448 size_t nDevices = dsDevices.size();
5449 if ( nDevices == 0 ) {
5450 // This should not happen because a check is made before this function is called.
5451 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5452 return FAILURE;
5453 }
5454
5455 if ( device >= nDevices ) {
5456 // This should not happen because a check is made before this function is called.
5457 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5458 return FAILURE;
5459 }
5460
5461 if ( mode == OUTPUT ) {
5462 if ( dsDevices[ device ].validId[0] == false ) {
5463 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5464 errorText_ = errorStream_.str();
5465 return FAILURE;
5466 }
5467 }
5468 else { // mode == INPUT
5469 if ( dsDevices[ device ].validId[1] == false ) {
5470 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5471 errorText_ = errorStream_.str();
5472 return FAILURE;
5473 }
5474 }
5475
5476 // According to a note in PortAudio, using GetDesktopWindow()
5477 // instead of GetForegroundWindow() is supposed to avoid problems
5478 // that occur when the application's window is not the foreground
5479 // window. Also, if the application window closes before the
5480 // DirectSound buffer, DirectSound can crash. In the past, I had
5481 // problems when using GetDesktopWindow() but it seems fine now
5482 // (January 2010). I'll leave it commented here.
5483 // HWND hWnd = GetForegroundWindow();
5484 HWND hWnd = GetDesktopWindow();
5485
5486 // Check the numberOfBuffers parameter and limit the lowest value to
5487 // two. This is a judgement call and a value of two is probably too
5488 // low for capture, but it should work for playback.
5489 int nBuffers = 0;
5490 if ( options ) nBuffers = options->numberOfBuffers;
5491 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5492 if ( nBuffers < 2 ) nBuffers = 3;
5493
5494 // Check the lower range of the user-specified buffer size and set
5495 // (arbitrarily) to a lower bound of 32.
5496 if ( *bufferSize < 32 ) *bufferSize = 32;
5497
5498 // Create the wave format structure. The data format setting will
5499 // be determined later.
5500 WAVEFORMATEX waveFormat;
5501 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5502 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5503 waveFormat.nChannels = channels + firstChannel;
5504 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5505
5506 // Determine the device buffer size. By default, we'll use the value
5507 // defined above (32K), but we will grow it to make allowances for
5508 // very large software buffer sizes.
5509 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5510 DWORD dsPointerLeadTime = 0;
5511
5512 void *ohandle = 0, *bhandle = 0;
5513 HRESULT result;
5514 if ( mode == OUTPUT ) {
5515
5516 LPDIRECTSOUND output;
5517 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5518 if ( FAILED( result ) ) {
5519 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5520 errorText_ = errorStream_.str();
5521 return FAILURE;
5522 }
5523
5524 DSCAPS outCaps;
5525 outCaps.dwSize = sizeof( outCaps );
5526 result = output->GetCaps( &outCaps );
5527 if ( FAILED( result ) ) {
5528 output->Release();
5529 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5530 errorText_ = errorStream_.str();
5531 return FAILURE;
5532 }
5533
5534 // Check channel information.
5535 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5536 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5537 errorText_ = errorStream_.str();
5538 return FAILURE;
5539 }
5540
5541 // Check format information. Use 16-bit format unless not
5542 // supported or user requests 8-bit.
5543 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5544 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5545 waveFormat.wBitsPerSample = 16;
5546 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5547 }
5548 else {
5549 waveFormat.wBitsPerSample = 8;
5550 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5551 }
5552 stream_.userFormat = format;
5553
5554 // Update wave format structure and buffer information.
5555 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5556 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5557 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5558
5559 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5560 while ( dsPointerLeadTime * 2U > dsBufferSize )
5561 dsBufferSize *= 2;
5562
5563 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5564 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5565 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5566 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5567 if ( FAILED( result ) ) {
5568 output->Release();
5569 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5570 errorText_ = errorStream_.str();
5571 return FAILURE;
5572 }
5573
5574 // Even though we will write to the secondary buffer, we need to
5575 // access the primary buffer to set the correct output format
5576 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5577 // buffer description.
5578 DSBUFFERDESC bufferDescription;
5579 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5580 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5581 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5582
5583 // Obtain the primary buffer
5584 LPDIRECTSOUNDBUFFER buffer;
5585 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5586 if ( FAILED( result ) ) {
5587 output->Release();
5588 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5589 errorText_ = errorStream_.str();
5590 return FAILURE;
5591 }
5592
5593 // Set the primary DS buffer sound format.
5594 result = buffer->SetFormat( &waveFormat );
5595 if ( FAILED( result ) ) {
5596 output->Release();
5597 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5598 errorText_ = errorStream_.str();
5599 return FAILURE;
5600 }
5601
5602 // Setup the secondary DS buffer description.
5603 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5604 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5605 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5606 DSBCAPS_GLOBALFOCUS |
5607 DSBCAPS_GETCURRENTPOSITION2 |
5608 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5609 bufferDescription.dwBufferBytes = dsBufferSize;
5610 bufferDescription.lpwfxFormat = &waveFormat;
5611
5612 // Try to create the secondary DS buffer. If that doesn't work,
5613 // try to use software mixing. Otherwise, there's a problem.
5614 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5615 if ( FAILED( result ) ) {
5616 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5617 DSBCAPS_GLOBALFOCUS |
5618 DSBCAPS_GETCURRENTPOSITION2 |
5619 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5620 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5621 if ( FAILED( result ) ) {
5622 output->Release();
5623 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5624 errorText_ = errorStream_.str();
5625 return FAILURE;
5626 }
5627 }
5628
5629 // Get the buffer size ... might be different from what we specified.
5630 DSBCAPS dsbcaps;
5631 dsbcaps.dwSize = sizeof( DSBCAPS );
5632 result = buffer->GetCaps( &dsbcaps );
5633 if ( FAILED( result ) ) {
5634 output->Release();
5635 buffer->Release();
5636 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5637 errorText_ = errorStream_.str();
5638 return FAILURE;
5639 }
5640
5641 dsBufferSize = dsbcaps.dwBufferBytes;
5642
5643 // Lock the DS buffer
5644 LPVOID audioPtr;
5645 DWORD dataLen;
5646 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5647 if ( FAILED( result ) ) {
5648 output->Release();
5649 buffer->Release();
5650 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5651 errorText_ = errorStream_.str();
5652 return FAILURE;
5653 }
5654
5655 // Zero the DS buffer
5656 ZeroMemory( audioPtr, dataLen );
5657
5658 // Unlock the DS buffer
5659 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5660 if ( FAILED( result ) ) {
5661 output->Release();
5662 buffer->Release();
5663 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5664 errorText_ = errorStream_.str();
5665 return FAILURE;
5666 }
5667
5668 ohandle = (void *) output;
5669 bhandle = (void *) buffer;
5670 }
5671
5672 if ( mode == INPUT ) {
5673
5674 LPDIRECTSOUNDCAPTURE input;
5675 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5676 if ( FAILED( result ) ) {
5677 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5678 errorText_ = errorStream_.str();
5679 return FAILURE;
5680 }
5681
5682 DSCCAPS inCaps;
5683 inCaps.dwSize = sizeof( inCaps );
5684 result = input->GetCaps( &inCaps );
5685 if ( FAILED( result ) ) {
5686 input->Release();
5687 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5688 errorText_ = errorStream_.str();
5689 return FAILURE;
5690 }
5691
5692 // Check channel information.
5693 if ( inCaps.dwChannels < channels + firstChannel ) {
5694 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5695 return FAILURE;
5696 }
5697
5698 // Check format information. Use 16-bit format unless user
5699 // requests 8-bit.
5700 DWORD deviceFormats;
5701 if ( channels + firstChannel == 2 ) {
5702 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5703 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5704 waveFormat.wBitsPerSample = 8;
5705 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5706 }
5707 else { // assume 16-bit is supported
5708 waveFormat.wBitsPerSample = 16;
5709 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5710 }
5711 }
5712 else { // channel == 1
5713 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5714 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5715 waveFormat.wBitsPerSample = 8;
5716 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5717 }
5718 else { // assume 16-bit is supported
5719 waveFormat.wBitsPerSample = 16;
5720 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5721 }
5722 }
5723 stream_.userFormat = format;
5724
5725 // Update wave format structure and buffer information.
5726 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5727 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5728 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5729
5730 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5731 while ( dsPointerLeadTime * 2U > dsBufferSize )
5732 dsBufferSize *= 2;
5733
5734 // Setup the secondary DS buffer description.
5735 DSCBUFFERDESC bufferDescription;
5736 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5737 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5738 bufferDescription.dwFlags = 0;
5739 bufferDescription.dwReserved = 0;
5740 bufferDescription.dwBufferBytes = dsBufferSize;
5741 bufferDescription.lpwfxFormat = &waveFormat;
5742
5743 // Create the capture buffer.
5744 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5745 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5746 if ( FAILED( result ) ) {
5747 input->Release();
5748 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5749 errorText_ = errorStream_.str();
5750 return FAILURE;
5751 }
5752
5753 // Get the buffer size ... might be different from what we specified.
5754 DSCBCAPS dscbcaps;
5755 dscbcaps.dwSize = sizeof( DSCBCAPS );
5756 result = buffer->GetCaps( &dscbcaps );
5757 if ( FAILED( result ) ) {
5758 input->Release();
5759 buffer->Release();
5760 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5761 errorText_ = errorStream_.str();
5762 return FAILURE;
5763 }
5764
5765 dsBufferSize = dscbcaps.dwBufferBytes;
5766
5767 // NOTE: We could have a problem here if this is a duplex stream
5768 // and the play and capture hardware buffer sizes are different
5769 // (I'm actually not sure if that is a problem or not).
5770 // Currently, we are not verifying that.
5771
5772 // Lock the capture buffer
5773 LPVOID audioPtr;
5774 DWORD dataLen;
5775 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5776 if ( FAILED( result ) ) {
5777 input->Release();
5778 buffer->Release();
5779 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5780 errorText_ = errorStream_.str();
5781 return FAILURE;
5782 }
5783
5784 // Zero the buffer
5785 ZeroMemory( audioPtr, dataLen );
5786
5787 // Unlock the buffer
5788 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5789 if ( FAILED( result ) ) {
5790 input->Release();
5791 buffer->Release();
5792 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5793 errorText_ = errorStream_.str();
5794 return FAILURE;
5795 }
5796
5797 ohandle = (void *) input;
5798 bhandle = (void *) buffer;
5799 }
5800
5801 // Set various stream parameters
5802 DsHandle *handle = 0;
5803 stream_.nDeviceChannels[mode] = channels + firstChannel;
5804 stream_.nUserChannels[mode] = channels;
5805 stream_.bufferSize = *bufferSize;
5806 stream_.channelOffset[mode] = firstChannel;
5807 stream_.deviceInterleaved[mode] = true;
5808 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5809 else stream_.userInterleaved = true;
5810
5811 // Set flag for buffer conversion
5812 stream_.doConvertBuffer[mode] = false;
5813 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5814 stream_.doConvertBuffer[mode] = true;
5815 if (stream_.userFormat != stream_.deviceFormat[mode])
5816 stream_.doConvertBuffer[mode] = true;
5817 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5818 stream_.nUserChannels[mode] > 1 )
5819 stream_.doConvertBuffer[mode] = true;
5820
5821 // Allocate necessary internal buffers
5822 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5823 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5824 if ( stream_.userBuffer[mode] == NULL ) {
5825 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5826 goto error;
5827 }
5828
5829 if ( stream_.doConvertBuffer[mode] ) {
5830
5831 bool makeBuffer = true;
5832 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5833 if ( mode == INPUT ) {
5834 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5835 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5836 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5837 }
5838 }
5839
5840 if ( makeBuffer ) {
5841 bufferBytes *= *bufferSize;
5842 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5843 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5844 if ( stream_.deviceBuffer == NULL ) {
5845 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5846 goto error;
5847 }
5848 }
5849 }
5850
5851 // Allocate our DsHandle structures for the stream.
5852 if ( stream_.apiHandle == 0 ) {
5853 try {
5854 handle = new DsHandle;
5855 }
5856 catch ( std::bad_alloc& ) {
5857 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5858 goto error;
5859 }
5860
5861 // Create a manual-reset event.
5862 handle->condition = CreateEvent( NULL, // no security
5863 TRUE, // manual-reset
5864 FALSE, // non-signaled initially
5865 NULL ); // unnamed
5866 stream_.apiHandle = (void *) handle;
5867 }
5868 else
5869 handle = (DsHandle *) stream_.apiHandle;
5870 handle->id[mode] = ohandle;
5871 handle->buffer[mode] = bhandle;
5872 handle->dsBufferSize[mode] = dsBufferSize;
5873 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5874
5875 stream_.device[mode] = device;
5876 stream_.state = STREAM_STOPPED;
5877 if ( stream_.mode == OUTPUT && mode == INPUT )
5878 // We had already set up an output stream.
5879 stream_.mode = DUPLEX;
5880 else
5881 stream_.mode = mode;
5882 stream_.nBuffers = nBuffers;
5883 stream_.sampleRate = sampleRate;
5884
5885 // Setup the buffer conversion information structure.
5886 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5887
5888 // Setup the callback thread.
5889 if ( stream_.callbackInfo.isRunning == false ) {
5890 unsigned threadId;
5891 stream_.callbackInfo.isRunning = true;
5892 stream_.callbackInfo.object = (void *) this;
5893 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5894 &stream_.callbackInfo, 0, &threadId );
5895 if ( stream_.callbackInfo.thread == 0 ) {
5896 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5897 goto error;
5898 }
5899
5900 // Boost DS thread priority
5901 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5902 }
5903 return SUCCESS;
5904
5905 error:
5906 if ( handle ) {
5907 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5908 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5909 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5910 if ( buffer ) buffer->Release();
5911 object->Release();
5912 }
5913 if ( handle->buffer[1] ) {
5914 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5915 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5916 if ( buffer ) buffer->Release();
5917 object->Release();
5918 }
5919 CloseHandle( handle->condition );
5920 delete handle;
5921 stream_.apiHandle = 0;
5922 }
5923
5924 for ( int i=0; i<2; i++ ) {
5925 if ( stream_.userBuffer[i] ) {
5926 free( stream_.userBuffer[i] );
5927 stream_.userBuffer[i] = 0;
5928 }
5929 }
5930
5931 if ( stream_.deviceBuffer ) {
5932 free( stream_.deviceBuffer );
5933 stream_.deviceBuffer = 0;
5934 }
5935
5936 stream_.state = STREAM_CLOSED;
5937 return FAILURE;
5938 }
5939
closeStream()5940 void RtApiDs :: closeStream()
5941 {
5942 if ( stream_.state == STREAM_CLOSED ) {
5943 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5944 error( RtAudioError::WARNING );
5945 return;
5946 }
5947
5948 // Stop the callback thread.
5949 stream_.callbackInfo.isRunning = false;
5950 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5951 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5952
5953 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5954 if ( handle ) {
5955 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5956 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5957 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5958 if ( buffer ) {
5959 buffer->Stop();
5960 buffer->Release();
5961 }
5962 object->Release();
5963 }
5964 if ( handle->buffer[1] ) {
5965 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5966 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5967 if ( buffer ) {
5968 buffer->Stop();
5969 buffer->Release();
5970 }
5971 object->Release();
5972 }
5973 CloseHandle( handle->condition );
5974 delete handle;
5975 stream_.apiHandle = 0;
5976 }
5977
5978 for ( int i=0; i<2; i++ ) {
5979 if ( stream_.userBuffer[i] ) {
5980 free( stream_.userBuffer[i] );
5981 stream_.userBuffer[i] = 0;
5982 }
5983 }
5984
5985 if ( stream_.deviceBuffer ) {
5986 free( stream_.deviceBuffer );
5987 stream_.deviceBuffer = 0;
5988 }
5989
5990 stream_.mode = UNINITIALIZED;
5991 stream_.state = STREAM_CLOSED;
5992 }
5993
startStream()5994 void RtApiDs :: startStream()
5995 {
5996 verifyStream();
5997 if ( stream_.state == STREAM_RUNNING ) {
5998 errorText_ = "RtApiDs::startStream(): the stream is already running!";
5999 error( RtAudioError::WARNING );
6000 return;
6001 }
6002
6003 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6004
6005 // Increase scheduler frequency on lesser windows (a side-effect of
6006 // increasing timer accuracy). On greater windows (Win2K or later),
6007 // this is already in effect.
6008 timeBeginPeriod( 1 );
6009
6010 buffersRolling = false;
6011 duplexPrerollBytes = 0;
6012
6013 if ( stream_.mode == DUPLEX ) {
6014 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6015 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6016 }
6017
6018 HRESULT result = 0;
6019 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6020
6021 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6022 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6023 if ( FAILED( result ) ) {
6024 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6025 errorText_ = errorStream_.str();
6026 goto unlock;
6027 }
6028 }
6029
6030 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6031
6032 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6033 result = buffer->Start( DSCBSTART_LOOPING );
6034 if ( FAILED( result ) ) {
6035 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6036 errorText_ = errorStream_.str();
6037 goto unlock;
6038 }
6039 }
6040
6041 handle->drainCounter = 0;
6042 handle->internalDrain = false;
6043 ResetEvent( handle->condition );
6044 stream_.state = STREAM_RUNNING;
6045
6046 unlock:
6047 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6048 }
6049
stopStream()6050 void RtApiDs :: stopStream()
6051 {
6052 verifyStream();
6053 if ( stream_.state == STREAM_STOPPED ) {
6054 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6055 error( RtAudioError::WARNING );
6056 return;
6057 }
6058
6059 HRESULT result = 0;
6060 LPVOID audioPtr;
6061 DWORD dataLen;
6062 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6063 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6064 if ( handle->drainCounter == 0 ) {
6065 handle->drainCounter = 2;
6066 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6067 }
6068
6069 stream_.state = STREAM_STOPPED;
6070
6071 MUTEX_LOCK( &stream_.mutex );
6072
6073 // Stop the buffer and clear memory
6074 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6075 result = buffer->Stop();
6076 if ( FAILED( result ) ) {
6077 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6078 errorText_ = errorStream_.str();
6079 goto unlock;
6080 }
6081
6082 // Lock the buffer and clear it so that if we start to play again,
6083 // we won't have old data playing.
6084 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6085 if ( FAILED( result ) ) {
6086 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6087 errorText_ = errorStream_.str();
6088 goto unlock;
6089 }
6090
6091 // Zero the DS buffer
6092 ZeroMemory( audioPtr, dataLen );
6093
6094 // Unlock the DS buffer
6095 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6096 if ( FAILED( result ) ) {
6097 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6098 errorText_ = errorStream_.str();
6099 goto unlock;
6100 }
6101
6102 // If we start playing again, we must begin at beginning of buffer.
6103 handle->bufferPointer[0] = 0;
6104 }
6105
6106 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6107 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6108 audioPtr = NULL;
6109 dataLen = 0;
6110
6111 stream_.state = STREAM_STOPPED;
6112
6113 if ( stream_.mode != DUPLEX )
6114 MUTEX_LOCK( &stream_.mutex );
6115
6116 result = buffer->Stop();
6117 if ( FAILED( result ) ) {
6118 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6119 errorText_ = errorStream_.str();
6120 goto unlock;
6121 }
6122
6123 // Lock the buffer and clear it so that if we start to play again,
6124 // we won't have old data playing.
6125 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6126 if ( FAILED( result ) ) {
6127 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6128 errorText_ = errorStream_.str();
6129 goto unlock;
6130 }
6131
6132 // Zero the DS buffer
6133 ZeroMemory( audioPtr, dataLen );
6134
6135 // Unlock the DS buffer
6136 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6137 if ( FAILED( result ) ) {
6138 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6139 errorText_ = errorStream_.str();
6140 goto unlock;
6141 }
6142
6143 // If we start recording again, we must begin at beginning of buffer.
6144 handle->bufferPointer[1] = 0;
6145 }
6146
6147 unlock:
6148 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6149 MUTEX_UNLOCK( &stream_.mutex );
6150
6151 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6152 }
6153
abortStream()6154 void RtApiDs :: abortStream()
6155 {
6156 verifyStream();
6157 if ( stream_.state == STREAM_STOPPED ) {
6158 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6159 error( RtAudioError::WARNING );
6160 return;
6161 }
6162
6163 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6164 handle->drainCounter = 2;
6165
6166 stopStream();
6167 }
6168
callbackEvent()6169 void RtApiDs :: callbackEvent()
6170 {
6171 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6172 Sleep( 50 ); // sleep 50 milliseconds
6173 return;
6174 }
6175
6176 if ( stream_.state == STREAM_CLOSED ) {
6177 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6178 error( RtAudioError::WARNING );
6179 return;
6180 }
6181
6182 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6183 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6184
6185 // Check if we were draining the stream and signal is finished.
6186 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6187
6188 stream_.state = STREAM_STOPPING;
6189 if ( handle->internalDrain == false )
6190 SetEvent( handle->condition );
6191 else
6192 stopStream();
6193 return;
6194 }
6195
6196 // Invoke user callback to get fresh output data UNLESS we are
6197 // draining stream.
6198 if ( handle->drainCounter == 0 ) {
6199 RtAudioCallback callback = (RtAudioCallback) info->callback;
6200 double streamTime = getStreamTime();
6201 RtAudioStreamStatus status = 0;
6202 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6203 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6204 handle->xrun[0] = false;
6205 }
6206 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6207 status |= RTAUDIO_INPUT_OVERFLOW;
6208 handle->xrun[1] = false;
6209 }
6210 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6211 stream_.bufferSize, streamTime, status, info->userData );
6212 if ( cbReturnValue == 2 ) {
6213 stream_.state = STREAM_STOPPING;
6214 handle->drainCounter = 2;
6215 abortStream();
6216 return;
6217 }
6218 else if ( cbReturnValue == 1 ) {
6219 handle->drainCounter = 1;
6220 handle->internalDrain = true;
6221 }
6222 }
6223
6224 HRESULT result;
6225 DWORD currentWritePointer, safeWritePointer;
6226 DWORD currentReadPointer, safeReadPointer;
6227 UINT nextWritePointer;
6228
6229 LPVOID buffer1 = NULL;
6230 LPVOID buffer2 = NULL;
6231 DWORD bufferSize1 = 0;
6232 DWORD bufferSize2 = 0;
6233
6234 char *buffer;
6235 long bufferBytes;
6236
6237 MUTEX_LOCK( &stream_.mutex );
6238 if ( stream_.state == STREAM_STOPPED ) {
6239 MUTEX_UNLOCK( &stream_.mutex );
6240 return;
6241 }
6242
6243 if ( buffersRolling == false ) {
6244 if ( stream_.mode == DUPLEX ) {
6245 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6246
6247 // It takes a while for the devices to get rolling. As a result,
6248 // there's no guarantee that the capture and write device pointers
6249 // will move in lockstep. Wait here for both devices to start
6250 // rolling, and then set our buffer pointers accordingly.
6251 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6252 // bytes later than the write buffer.
6253
6254 // Stub: a serious risk of having a pre-emptive scheduling round
6255 // take place between the two GetCurrentPosition calls... but I'm
6256 // really not sure how to solve the problem. Temporarily boost to
6257 // Realtime priority, maybe; but I'm not sure what priority the
6258 // DirectSound service threads run at. We *should* be roughly
6259 // within a ms or so of correct.
6260
6261 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6262 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6263
6264 DWORD startSafeWritePointer, startSafeReadPointer;
6265
6266 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6267 if ( FAILED( result ) ) {
6268 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6269 errorText_ = errorStream_.str();
6270 MUTEX_UNLOCK( &stream_.mutex );
6271 error( RtAudioError::SYSTEM_ERROR );
6272 return;
6273 }
6274 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6275 if ( FAILED( result ) ) {
6276 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6277 errorText_ = errorStream_.str();
6278 MUTEX_UNLOCK( &stream_.mutex );
6279 error( RtAudioError::SYSTEM_ERROR );
6280 return;
6281 }
6282 while ( true ) {
6283 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6284 if ( FAILED( result ) ) {
6285 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6286 errorText_ = errorStream_.str();
6287 MUTEX_UNLOCK( &stream_.mutex );
6288 error( RtAudioError::SYSTEM_ERROR );
6289 return;
6290 }
6291 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6292 if ( FAILED( result ) ) {
6293 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6294 errorText_ = errorStream_.str();
6295 MUTEX_UNLOCK( &stream_.mutex );
6296 error( RtAudioError::SYSTEM_ERROR );
6297 return;
6298 }
6299 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6300 Sleep( 1 );
6301 }
6302
6303 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6304
6305 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6306 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6307 handle->bufferPointer[1] = safeReadPointer;
6308 }
6309 else if ( stream_.mode == OUTPUT ) {
6310
6311 // Set the proper nextWritePosition after initial startup.
6312 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6313 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6314 if ( FAILED( result ) ) {
6315 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6316 errorText_ = errorStream_.str();
6317 MUTEX_UNLOCK( &stream_.mutex );
6318 error( RtAudioError::SYSTEM_ERROR );
6319 return;
6320 }
6321 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6322 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6323 }
6324
6325 buffersRolling = true;
6326 }
6327
6328 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6329
6330 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6331
6332 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6333 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6334 bufferBytes *= formatBytes( stream_.userFormat );
6335 memset( stream_.userBuffer[0], 0, bufferBytes );
6336 }
6337
6338 // Setup parameters and do buffer conversion if necessary.
6339 if ( stream_.doConvertBuffer[0] ) {
6340 buffer = stream_.deviceBuffer;
6341 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6342 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6343 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6344 }
6345 else {
6346 buffer = stream_.userBuffer[0];
6347 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6348 bufferBytes *= formatBytes( stream_.userFormat );
6349 }
6350
6351 // No byte swapping necessary in DirectSound implementation.
6352
6353 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6354 // unsigned. So, we need to convert our signed 8-bit data here to
6355 // unsigned.
6356 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6357 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6358
6359 DWORD dsBufferSize = handle->dsBufferSize[0];
6360 nextWritePointer = handle->bufferPointer[0];
6361
6362 DWORD endWrite, leadPointer;
6363 while ( true ) {
6364 // Find out where the read and "safe write" pointers are.
6365 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6366 if ( FAILED( result ) ) {
6367 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6368 errorText_ = errorStream_.str();
6369 MUTEX_UNLOCK( &stream_.mutex );
6370 error( RtAudioError::SYSTEM_ERROR );
6371 return;
6372 }
6373
6374 // We will copy our output buffer into the region between
6375 // safeWritePointer and leadPointer. If leadPointer is not
6376 // beyond the next endWrite position, wait until it is.
6377 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6378 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6379 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6380 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6381 endWrite = nextWritePointer + bufferBytes;
6382
6383 // Check whether the entire write region is behind the play pointer.
6384 if ( leadPointer >= endWrite ) break;
6385
6386 // If we are here, then we must wait until the leadPointer advances
6387 // beyond the end of our next write region. We use the
6388 // Sleep() function to suspend operation until that happens.
6389 double millis = ( endWrite - leadPointer ) * 1000.0;
6390 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6391 if ( millis < 1.0 ) millis = 1.0;
6392 Sleep( (DWORD) millis );
6393 }
6394
6395 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6396 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6397 // We've strayed into the forbidden zone ... resync the read pointer.
6398 handle->xrun[0] = true;
6399 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6400 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6401 handle->bufferPointer[0] = nextWritePointer;
6402 endWrite = nextWritePointer + bufferBytes;
6403 }
6404
6405 // Lock free space in the buffer
6406 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6407 &bufferSize1, &buffer2, &bufferSize2, 0 );
6408 if ( FAILED( result ) ) {
6409 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6410 errorText_ = errorStream_.str();
6411 MUTEX_UNLOCK( &stream_.mutex );
6412 error( RtAudioError::SYSTEM_ERROR );
6413 return;
6414 }
6415
6416 // Copy our buffer into the DS buffer
6417 CopyMemory( buffer1, buffer, bufferSize1 );
6418 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6419
6420 // Update our buffer offset and unlock sound buffer
6421 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6424 errorText_ = errorStream_.str();
6425 MUTEX_UNLOCK( &stream_.mutex );
6426 error( RtAudioError::SYSTEM_ERROR );
6427 return;
6428 }
6429 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6430 handle->bufferPointer[0] = nextWritePointer;
6431 }
6432
6433 // Don't bother draining input
6434 if ( handle->drainCounter ) {
6435 handle->drainCounter++;
6436 goto unlock;
6437 }
6438
6439 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6440
6441 // Setup parameters.
6442 if ( stream_.doConvertBuffer[1] ) {
6443 buffer = stream_.deviceBuffer;
6444 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6445 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6446 }
6447 else {
6448 buffer = stream_.userBuffer[1];
6449 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6450 bufferBytes *= formatBytes( stream_.userFormat );
6451 }
6452
6453 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6454 long nextReadPointer = handle->bufferPointer[1];
6455 DWORD dsBufferSize = handle->dsBufferSize[1];
6456
6457 // Find out where the write and "safe read" pointers are.
6458 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6459 if ( FAILED( result ) ) {
6460 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6461 errorText_ = errorStream_.str();
6462 MUTEX_UNLOCK( &stream_.mutex );
6463 error( RtAudioError::SYSTEM_ERROR );
6464 return;
6465 }
6466
6467 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6468 DWORD endRead = nextReadPointer + bufferBytes;
6469
6470 // Handling depends on whether we are INPUT or DUPLEX.
6471 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6472 // then a wait here will drag the write pointers into the forbidden zone.
6473 //
6474 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6475 // it's in a safe position. This causes dropouts, but it seems to be the only
6476 // practical way to sync up the read and write pointers reliably, given the
6477 // the very complex relationship between phase and increment of the read and write
6478 // pointers.
6479 //
6480 // In order to minimize audible dropouts in DUPLEX mode, we will
6481 // provide a pre-roll period of 0.5 seconds in which we return
6482 // zeros from the read buffer while the pointers sync up.
6483
6484 if ( stream_.mode == DUPLEX ) {
6485 if ( safeReadPointer < endRead ) {
6486 if ( duplexPrerollBytes <= 0 ) {
6487 // Pre-roll time over. Be more agressive.
6488 int adjustment = endRead-safeReadPointer;
6489
6490 handle->xrun[1] = true;
6491 // Two cases:
6492 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6493 // and perform fine adjustments later.
6494 // - small adjustments: back off by twice as much.
6495 if ( adjustment >= 2*bufferBytes )
6496 nextReadPointer = safeReadPointer-2*bufferBytes;
6497 else
6498 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6499
6500 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6501
6502 }
6503 else {
6504 // In pre=roll time. Just do it.
6505 nextReadPointer = safeReadPointer - bufferBytes;
6506 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6507 }
6508 endRead = nextReadPointer + bufferBytes;
6509 }
6510 }
6511 else { // mode == INPUT
6512 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6513 // See comments for playback.
6514 double millis = (endRead - safeReadPointer) * 1000.0;
6515 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6516 if ( millis < 1.0 ) millis = 1.0;
6517 Sleep( (DWORD) millis );
6518
6519 // Wake up and find out where we are now.
6520 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6521 if ( FAILED( result ) ) {
6522 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6523 errorText_ = errorStream_.str();
6524 MUTEX_UNLOCK( &stream_.mutex );
6525 error( RtAudioError::SYSTEM_ERROR );
6526 return;
6527 }
6528
6529 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6530 }
6531 }
6532
6533 // Lock free space in the buffer
6534 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6535 &bufferSize1, &buffer2, &bufferSize2, 0 );
6536 if ( FAILED( result ) ) {
6537 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6538 errorText_ = errorStream_.str();
6539 MUTEX_UNLOCK( &stream_.mutex );
6540 error( RtAudioError::SYSTEM_ERROR );
6541 return;
6542 }
6543
6544 if ( duplexPrerollBytes <= 0 ) {
6545 // Copy our buffer into the DS buffer
6546 CopyMemory( buffer, buffer1, bufferSize1 );
6547 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6548 }
6549 else {
6550 memset( buffer, 0, bufferSize1 );
6551 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6552 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6553 }
6554
6555 // Update our buffer offset and unlock sound buffer
6556 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6557 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6558 if ( FAILED( result ) ) {
6559 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6560 errorText_ = errorStream_.str();
6561 MUTEX_UNLOCK( &stream_.mutex );
6562 error( RtAudioError::SYSTEM_ERROR );
6563 return;
6564 }
6565 handle->bufferPointer[1] = nextReadPointer;
6566
6567 // No byte swapping necessary in DirectSound implementation.
6568
6569 // If necessary, convert 8-bit data from unsigned to signed.
6570 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6571 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6572
6573 // Do buffer conversion if necessary.
6574 if ( stream_.doConvertBuffer[1] )
6575 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6576 }
6577
6578 unlock:
6579 MUTEX_UNLOCK( &stream_.mutex );
6580 RtApi::tickStreamTime();
6581 }
6582
6583 // Definitions for utility functions and callbacks
6584 // specific to the DirectSound implementation.
6585
callbackHandler(void * ptr)6586 static unsigned __stdcall callbackHandler( void *ptr )
6587 {
6588 CallbackInfo *info = (CallbackInfo *) ptr;
6589 RtApiDs *object = (RtApiDs *) info->object;
6590 bool* isRunning = &info->isRunning;
6591
6592 while ( *isRunning == true ) {
6593 object->callbackEvent();
6594 }
6595
6596 _endthreadex( 0 );
6597 return 0;
6598 }
6599
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6600 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6601 LPCTSTR description,
6602 LPCTSTR /*module*/,
6603 LPVOID lpContext )
6604 {
6605 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6606 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6607
6608 HRESULT hr;
6609 bool validDevice = false;
6610 if ( probeInfo.isInput == true ) {
6611 DSCCAPS caps;
6612 LPDIRECTSOUNDCAPTURE object;
6613
6614 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6615 if ( hr != DS_OK ) return TRUE;
6616
6617 caps.dwSize = sizeof(caps);
6618 hr = object->GetCaps( &caps );
6619 if ( hr == DS_OK ) {
6620 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6621 validDevice = true;
6622 }
6623 object->Release();
6624 }
6625 else {
6626 DSCAPS caps;
6627 LPDIRECTSOUND object;
6628 hr = DirectSoundCreate( lpguid, &object, NULL );
6629 if ( hr != DS_OK ) return TRUE;
6630
6631 caps.dwSize = sizeof(caps);
6632 hr = object->GetCaps( &caps );
6633 if ( hr == DS_OK ) {
6634 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6635 validDevice = true;
6636 }
6637 object->Release();
6638 }
6639
6640 // If good device, then save its name and guid.
6641 std::string name = convertCharPointerToStdString( description );
6642 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6643 if ( lpguid == NULL )
6644 name = "Default Device";
6645 if ( validDevice ) {
6646 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6647 if ( dsDevices[i].name == name ) {
6648 dsDevices[i].found = true;
6649 if ( probeInfo.isInput ) {
6650 dsDevices[i].id[1] = lpguid;
6651 dsDevices[i].validId[1] = true;
6652 }
6653 else {
6654 dsDevices[i].id[0] = lpguid;
6655 dsDevices[i].validId[0] = true;
6656 }
6657 return TRUE;
6658 }
6659 }
6660
6661 DsDevice device;
6662 device.name = name;
6663 device.found = true;
6664 if ( probeInfo.isInput ) {
6665 device.id[1] = lpguid;
6666 device.validId[1] = true;
6667 }
6668 else {
6669 device.id[0] = lpguid;
6670 device.validId[0] = true;
6671 }
6672 dsDevices.push_back( device );
6673 }
6674
6675 return TRUE;
6676 }
6677
getErrorString(int code)6678 static const char* getErrorString( int code )
6679 {
6680 switch ( code ) {
6681
6682 case DSERR_ALLOCATED:
6683 return "Already allocated";
6684
6685 case DSERR_CONTROLUNAVAIL:
6686 return "Control unavailable";
6687
6688 case DSERR_INVALIDPARAM:
6689 return "Invalid parameter";
6690
6691 case DSERR_INVALIDCALL:
6692 return "Invalid call";
6693
6694 case DSERR_GENERIC:
6695 return "Generic error";
6696
6697 case DSERR_PRIOLEVELNEEDED:
6698 return "Priority level needed";
6699
6700 case DSERR_OUTOFMEMORY:
6701 return "Out of memory";
6702
6703 case DSERR_BADFORMAT:
6704 return "The sample rate or the channel format is not supported";
6705
6706 case DSERR_UNSUPPORTED:
6707 return "Not supported";
6708
6709 case DSERR_NODRIVER:
6710 return "No driver";
6711
6712 case DSERR_ALREADYINITIALIZED:
6713 return "Already initialized";
6714
6715 case DSERR_NOAGGREGATION:
6716 return "No aggregation";
6717
6718 case DSERR_BUFFERLOST:
6719 return "Buffer lost";
6720
6721 case DSERR_OTHERAPPHASPRIO:
6722 return "Another application already has priority";
6723
6724 case DSERR_UNINITIALIZED:
6725 return "Uninitialized";
6726
6727 default:
6728 return "DirectSound unknown error";
6729 }
6730 }
6731 //******************** End of __WINDOWS_DS__ *********************//
6732 #endif
6733
6734
6735 #if defined(__LINUX_ALSA__)
6736
6737 #include <alsa/asoundlib.h>
6738 #include <unistd.h>
6739
6740 // A structure to hold various information related to the ALSA API
6741 // implementation.
6742 struct AlsaHandle {
6743 snd_pcm_t *handles[2];
6744 bool synchronized;
6745 bool xrun[2];
6746 pthread_cond_t runnable_cv;
6747 bool runnable;
6748
AlsaHandleAlsaHandle6749 AlsaHandle()
6750 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6751 };
6752
6753 static void *alsaCallbackHandler( void * ptr );
6754
RtApiAlsa()6755 RtApiAlsa :: RtApiAlsa()
6756 {
6757 // Nothing to do here.
6758 }
6759
~RtApiAlsa()6760 RtApiAlsa :: ~RtApiAlsa()
6761 {
6762 if ( stream_.state != STREAM_CLOSED ) closeStream();
6763 }
6764
getDeviceCount(void)6765 unsigned int RtApiAlsa :: getDeviceCount( void )
6766 {
6767 unsigned nDevices = 0;
6768 int result, subdevice, card;
6769 char name[64];
6770 snd_ctl_t *handle;
6771
6772 // Count cards and devices
6773 card = -1;
6774 snd_card_next( &card );
6775 while ( card >= 0 ) {
6776 sprintf( name, "hw:%d", card );
6777 result = snd_ctl_open( &handle, name, 0 );
6778 if ( result < 0 ) {
6779 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6780 errorText_ = errorStream_.str();
6781 error( RtAudioError::WARNING );
6782 goto nextcard;
6783 }
6784 subdevice = -1;
6785 while( 1 ) {
6786 result = snd_ctl_pcm_next_device( handle, &subdevice );
6787 if ( result < 0 ) {
6788 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6789 errorText_ = errorStream_.str();
6790 error( RtAudioError::WARNING );
6791 break;
6792 }
6793 if ( subdevice < 0 )
6794 break;
6795 nDevices++;
6796 }
6797 nextcard:
6798 snd_ctl_close( handle );
6799 snd_card_next( &card );
6800 }
6801
6802 result = snd_ctl_open( &handle, "default", 0 );
6803 if (result == 0) {
6804 nDevices++;
6805 snd_ctl_close( handle );
6806 }
6807
6808 return nDevices;
6809 }
6810
getDeviceInfo(unsigned int device)6811 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6812 {
6813 RtAudio::DeviceInfo info;
6814 info.probed = false;
6815
6816 unsigned nDevices = 0;
6817 int result, subdevice, card;
6818 char name[64];
6819 snd_ctl_t *chandle;
6820
6821 // Count cards and devices
6822 card = -1;
6823 subdevice = -1;
6824 snd_card_next( &card );
6825 while ( card >= 0 ) {
6826 sprintf( name, "hw:%d", card );
6827 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6828 if ( result < 0 ) {
6829 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6830 errorText_ = errorStream_.str();
6831 error( RtAudioError::WARNING );
6832 goto nextcard;
6833 }
6834 subdevice = -1;
6835 while( 1 ) {
6836 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6837 if ( result < 0 ) {
6838 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6839 errorText_ = errorStream_.str();
6840 error( RtAudioError::WARNING );
6841 break;
6842 }
6843 if ( subdevice < 0 ) break;
6844 if ( nDevices == device ) {
6845 sprintf( name, "hw:%d,%d", card, subdevice );
6846 goto foundDevice;
6847 }
6848 nDevices++;
6849 }
6850 nextcard:
6851 snd_ctl_close( chandle );
6852 snd_card_next( &card );
6853 }
6854
6855 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6856 if ( result == 0 ) {
6857 if ( nDevices == device ) {
6858 strcpy( name, "default" );
6859 goto foundDevice;
6860 }
6861 nDevices++;
6862 }
6863
6864 if ( nDevices == 0 ) {
6865 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6866 error( RtAudioError::INVALID_USE );
6867 return info;
6868 }
6869
6870 if ( device >= nDevices ) {
6871 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6872 error( RtAudioError::INVALID_USE );
6873 return info;
6874 }
6875
6876 foundDevice:
6877
6878 // If a stream is already open, we cannot probe the stream devices.
6879 // Thus, use the saved results.
6880 if ( stream_.state != STREAM_CLOSED &&
6881 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6882 snd_ctl_close( chandle );
6883 if ( device >= devices_.size() ) {
6884 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6885 error( RtAudioError::WARNING );
6886 return info;
6887 }
6888 return devices_[ device ];
6889 }
6890
6891 int openMode = SND_PCM_ASYNC;
6892 snd_pcm_stream_t stream;
6893 snd_pcm_info_t *pcminfo;
6894 snd_pcm_info_alloca( &pcminfo );
6895 snd_pcm_t *phandle;
6896 snd_pcm_hw_params_t *params;
6897 snd_pcm_hw_params_alloca( ¶ms );
6898
6899 // First try for playback unless default device (which has subdev -1)
6900 stream = SND_PCM_STREAM_PLAYBACK;
6901 snd_pcm_info_set_stream( pcminfo, stream );
6902 if ( subdevice != -1 ) {
6903 snd_pcm_info_set_device( pcminfo, subdevice );
6904 snd_pcm_info_set_subdevice( pcminfo, 0 );
6905
6906 result = snd_ctl_pcm_info( chandle, pcminfo );
6907 if ( result < 0 ) {
6908 // Device probably doesn't support playback.
6909 goto captureProbe;
6910 }
6911 }
6912
6913 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6914 if ( result < 0 ) {
6915 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6916 errorText_ = errorStream_.str();
6917 error( RtAudioError::WARNING );
6918 goto captureProbe;
6919 }
6920
6921 // The device is open ... fill the parameter structure.
6922 result = snd_pcm_hw_params_any( phandle, params );
6923 if ( result < 0 ) {
6924 snd_pcm_close( phandle );
6925 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6926 errorText_ = errorStream_.str();
6927 error( RtAudioError::WARNING );
6928 goto captureProbe;
6929 }
6930
6931 // Get output channel information.
6932 unsigned int value;
6933 result = snd_pcm_hw_params_get_channels_max( params, &value );
6934 if ( result < 0 ) {
6935 snd_pcm_close( phandle );
6936 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6937 errorText_ = errorStream_.str();
6938 error( RtAudioError::WARNING );
6939 goto captureProbe;
6940 }
6941 info.outputChannels = value;
6942 snd_pcm_close( phandle );
6943
6944 captureProbe:
6945 stream = SND_PCM_STREAM_CAPTURE;
6946 snd_pcm_info_set_stream( pcminfo, stream );
6947
6948 // Now try for capture unless default device (with subdev = -1)
6949 if ( subdevice != -1 ) {
6950 result = snd_ctl_pcm_info( chandle, pcminfo );
6951 snd_ctl_close( chandle );
6952 if ( result < 0 ) {
6953 // Device probably doesn't support capture.
6954 if ( info.outputChannels == 0 ) return info;
6955 goto probeParameters;
6956 }
6957 }
6958 else
6959 snd_ctl_close( chandle );
6960
6961 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6962 if ( result < 0 ) {
6963 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6964 errorText_ = errorStream_.str();
6965 error( RtAudioError::WARNING );
6966 if ( info.outputChannels == 0 ) return info;
6967 goto probeParameters;
6968 }
6969
6970 // The device is open ... fill the parameter structure.
6971 result = snd_pcm_hw_params_any( phandle, params );
6972 if ( result < 0 ) {
6973 snd_pcm_close( phandle );
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6975 errorText_ = errorStream_.str();
6976 error( RtAudioError::WARNING );
6977 if ( info.outputChannels == 0 ) return info;
6978 goto probeParameters;
6979 }
6980
6981 result = snd_pcm_hw_params_get_channels_max( params, &value );
6982 if ( result < 0 ) {
6983 snd_pcm_close( phandle );
6984 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6985 errorText_ = errorStream_.str();
6986 error( RtAudioError::WARNING );
6987 if ( info.outputChannels == 0 ) return info;
6988 goto probeParameters;
6989 }
6990 info.inputChannels = value;
6991 snd_pcm_close( phandle );
6992
6993 // If device opens for both playback and capture, we determine the channels.
6994 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6995 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6996
6997 // ALSA doesn't provide default devices so we'll use the first available one.
6998 if ( device == 0 && info.outputChannels > 0 )
6999 info.isDefaultOutput = true;
7000 if ( device == 0 && info.inputChannels > 0 )
7001 info.isDefaultInput = true;
7002
7003 probeParameters:
7004 // At this point, we just need to figure out the supported data
7005 // formats and sample rates. We'll proceed by opening the device in
7006 // the direction with the maximum number of channels, or playback if
7007 // they are equal. This might limit our sample rate options, but so
7008 // be it.
7009
7010 if ( info.outputChannels >= info.inputChannels )
7011 stream = SND_PCM_STREAM_PLAYBACK;
7012 else
7013 stream = SND_PCM_STREAM_CAPTURE;
7014 snd_pcm_info_set_stream( pcminfo, stream );
7015
7016 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7017 if ( result < 0 ) {
7018 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7019 errorText_ = errorStream_.str();
7020 error( RtAudioError::WARNING );
7021 return info;
7022 }
7023
7024 // The device is open ... fill the parameter structure.
7025 result = snd_pcm_hw_params_any( phandle, params );
7026 if ( result < 0 ) {
7027 snd_pcm_close( phandle );
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7029 errorText_ = errorStream_.str();
7030 error( RtAudioError::WARNING );
7031 return info;
7032 }
7033
7034 // Test our discrete set of sample rate values.
7035 info.sampleRates.clear();
7036 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7037 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7038 info.sampleRates.push_back( SAMPLE_RATES[i] );
7039
7040 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7041 info.preferredSampleRate = SAMPLE_RATES[i];
7042 }
7043 }
7044 if ( info.sampleRates.size() == 0 ) {
7045 snd_pcm_close( phandle );
7046 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7047 errorText_ = errorStream_.str();
7048 error( RtAudioError::WARNING );
7049 return info;
7050 }
7051
7052 // Probe the supported data formats ... we don't care about endian-ness just yet
7053 snd_pcm_format_t format;
7054 info.nativeFormats = 0;
7055 format = SND_PCM_FORMAT_S8;
7056 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7057 info.nativeFormats |= RTAUDIO_SINT8;
7058 format = SND_PCM_FORMAT_S16;
7059 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7060 info.nativeFormats |= RTAUDIO_SINT16;
7061 format = SND_PCM_FORMAT_S24;
7062 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7063 info.nativeFormats |= RTAUDIO_SINT24;
7064 format = SND_PCM_FORMAT_S32;
7065 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7066 info.nativeFormats |= RTAUDIO_SINT32;
7067 format = SND_PCM_FORMAT_FLOAT;
7068 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7069 info.nativeFormats |= RTAUDIO_FLOAT32;
7070 format = SND_PCM_FORMAT_FLOAT64;
7071 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7072 info.nativeFormats |= RTAUDIO_FLOAT64;
7073
7074 // Check that we have at least one supported format
7075 if ( info.nativeFormats == 0 ) {
7076 snd_pcm_close( phandle );
7077 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7078 errorText_ = errorStream_.str();
7079 error( RtAudioError::WARNING );
7080 return info;
7081 }
7082
7083 // Get the device name
7084 char *cardname;
7085 result = snd_card_get_name( card, &cardname );
7086 if ( result >= 0 ) {
7087 sprintf( name, "hw:%s,%d", cardname, subdevice );
7088 free( cardname );
7089 }
7090 info.name = name;
7091
7092 // That's all ... close the device and return
7093 snd_pcm_close( phandle );
7094 info.probed = true;
7095 return info;
7096 }
7097
saveDeviceInfo(void)7098 void RtApiAlsa :: saveDeviceInfo( void )
7099 {
7100 devices_.clear();
7101
7102 unsigned int nDevices = getDeviceCount();
7103 devices_.resize( nDevices );
7104 for ( unsigned int i=0; i<nDevices; i++ )
7105 devices_[i] = getDeviceInfo( i );
7106 }
7107
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7108 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7109 unsigned int firstChannel, unsigned int sampleRate,
7110 RtAudioFormat format, unsigned int *bufferSize,
7111 RtAudio::StreamOptions *options )
7112
7113 {
7114 #if defined(__RTAUDIO_DEBUG__)
7115 snd_output_t *out;
7116 snd_output_stdio_attach(&out, stderr, 0);
7117 #endif
7118
7119 // I'm not using the "plug" interface ... too much inconsistent behavior.
7120
7121 unsigned nDevices = 0;
7122 int result, subdevice, card;
7123 char name[64];
7124 snd_ctl_t *chandle;
7125
7126 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7127 snprintf(name, sizeof(name), "%s", "default");
7128 else {
7129 // Count cards and devices
7130 card = -1;
7131 snd_card_next( &card );
7132 while ( card >= 0 ) {
7133 sprintf( name, "hw:%d", card );
7134 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7135 if ( result < 0 ) {
7136 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7137 errorText_ = errorStream_.str();
7138 return FAILURE;
7139 }
7140 subdevice = -1;
7141 while( 1 ) {
7142 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7143 if ( result < 0 ) break;
7144 if ( subdevice < 0 ) break;
7145 if ( nDevices == device ) {
7146 sprintf( name, "hw:%d,%d", card, subdevice );
7147 snd_ctl_close( chandle );
7148 goto foundDevice;
7149 }
7150 nDevices++;
7151 }
7152 snd_ctl_close( chandle );
7153 snd_card_next( &card );
7154 }
7155
7156 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7157 if ( result == 0 ) {
7158 if ( nDevices == device ) {
7159 strcpy( name, "default" );
7160 goto foundDevice;
7161 }
7162 nDevices++;
7163 }
7164
7165 if ( nDevices == 0 ) {
7166 // This should not happen because a check is made before this function is called.
7167 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7168 return FAILURE;
7169 }
7170
7171 if ( device >= nDevices ) {
7172 // This should not happen because a check is made before this function is called.
7173 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7174 return FAILURE;
7175 }
7176 }
7177
7178 foundDevice:
7179
7180 // The getDeviceInfo() function will not work for a device that is
7181 // already open. Thus, we'll probe the system before opening a
7182 // stream and save the results for use by getDeviceInfo().
7183 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7184 this->saveDeviceInfo();
7185
7186 snd_pcm_stream_t stream;
7187 if ( mode == OUTPUT )
7188 stream = SND_PCM_STREAM_PLAYBACK;
7189 else
7190 stream = SND_PCM_STREAM_CAPTURE;
7191
7192 snd_pcm_t *phandle;
7193 int openMode = SND_PCM_ASYNC;
7194 result = snd_pcm_open( &phandle, name, stream, openMode );
7195 if ( result < 0 ) {
7196 if ( mode == OUTPUT )
7197 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7198 else
7199 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7200 errorText_ = errorStream_.str();
7201 return FAILURE;
7202 }
7203
7204 // Fill the parameter structure.
7205 snd_pcm_hw_params_t *hw_params;
7206 snd_pcm_hw_params_alloca( &hw_params );
7207 result = snd_pcm_hw_params_any( phandle, hw_params );
7208 if ( result < 0 ) {
7209 snd_pcm_close( phandle );
7210 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7211 errorText_ = errorStream_.str();
7212 return FAILURE;
7213 }
7214
7215 #if defined(__RTAUDIO_DEBUG__)
7216 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7217 snd_pcm_hw_params_dump( hw_params, out );
7218 #endif
7219
7220 // Set access ... check user preference.
7221 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7222 stream_.userInterleaved = false;
7223 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7224 if ( result < 0 ) {
7225 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7226 stream_.deviceInterleaved[mode] = true;
7227 }
7228 else
7229 stream_.deviceInterleaved[mode] = false;
7230 }
7231 else {
7232 stream_.userInterleaved = true;
7233 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7234 if ( result < 0 ) {
7235 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7236 stream_.deviceInterleaved[mode] = false;
7237 }
7238 else
7239 stream_.deviceInterleaved[mode] = true;
7240 }
7241
7242 if ( result < 0 ) {
7243 snd_pcm_close( phandle );
7244 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7245 errorText_ = errorStream_.str();
7246 return FAILURE;
7247 }
7248
7249 // Determine how to set the device format.
7250 stream_.userFormat = format;
7251 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7252
7253 if ( format == RTAUDIO_SINT8 )
7254 deviceFormat = SND_PCM_FORMAT_S8;
7255 else if ( format == RTAUDIO_SINT16 )
7256 deviceFormat = SND_PCM_FORMAT_S16;
7257 else if ( format == RTAUDIO_SINT24 )
7258 deviceFormat = SND_PCM_FORMAT_S24;
7259 else if ( format == RTAUDIO_SINT32 )
7260 deviceFormat = SND_PCM_FORMAT_S32;
7261 else if ( format == RTAUDIO_FLOAT32 )
7262 deviceFormat = SND_PCM_FORMAT_FLOAT;
7263 else if ( format == RTAUDIO_FLOAT64 )
7264 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7265
7266 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7267 stream_.deviceFormat[mode] = format;
7268 goto setFormat;
7269 }
7270
7271 // The user requested format is not natively supported by the device.
7272 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7273 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7274 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7275 goto setFormat;
7276 }
7277
7278 deviceFormat = SND_PCM_FORMAT_FLOAT;
7279 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7280 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7281 goto setFormat;
7282 }
7283
7284 deviceFormat = SND_PCM_FORMAT_S32;
7285 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7286 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7287 goto setFormat;
7288 }
7289
7290 deviceFormat = SND_PCM_FORMAT_S24;
7291 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7292 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7293 goto setFormat;
7294 }
7295
7296 deviceFormat = SND_PCM_FORMAT_S16;
7297 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7298 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7299 goto setFormat;
7300 }
7301
7302 deviceFormat = SND_PCM_FORMAT_S8;
7303 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7304 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7305 goto setFormat;
7306 }
7307
7308 // If we get here, no supported format was found.
7309 snd_pcm_close( phandle );
7310 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7311 errorText_ = errorStream_.str();
7312 return FAILURE;
7313
7314 setFormat:
7315 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7316 if ( result < 0 ) {
7317 snd_pcm_close( phandle );
7318 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7319 errorText_ = errorStream_.str();
7320 return FAILURE;
7321 }
7322
7323 // Determine whether byte-swaping is necessary.
7324 stream_.doByteSwap[mode] = false;
7325 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7326 result = snd_pcm_format_cpu_endian( deviceFormat );
7327 if ( result == 0 )
7328 stream_.doByteSwap[mode] = true;
7329 else if (result < 0) {
7330 snd_pcm_close( phandle );
7331 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7332 errorText_ = errorStream_.str();
7333 return FAILURE;
7334 }
7335 }
7336
7337 // Set the sample rate.
7338 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7339 if ( result < 0 ) {
7340 snd_pcm_close( phandle );
7341 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7342 errorText_ = errorStream_.str();
7343 return FAILURE;
7344 }
7345
7346 // Determine the number of channels for this device. We support a possible
7347 // minimum device channel number > than the value requested by the user.
7348 stream_.nUserChannels[mode] = channels;
7349 unsigned int value;
7350 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7351 unsigned int deviceChannels = value;
7352 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7353 snd_pcm_close( phandle );
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7355 errorText_ = errorStream_.str();
7356 return FAILURE;
7357 }
7358
7359 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7360 if ( result < 0 ) {
7361 snd_pcm_close( phandle );
7362 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7363 errorText_ = errorStream_.str();
7364 return FAILURE;
7365 }
7366 deviceChannels = value;
7367 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7368 stream_.nDeviceChannels[mode] = deviceChannels;
7369
7370 // Set the device channels.
7371 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7372 if ( result < 0 ) {
7373 snd_pcm_close( phandle );
7374 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7375 errorText_ = errorStream_.str();
7376 return FAILURE;
7377 }
7378
7379 // Set the buffer (or period) size.
7380 int dir = 0;
7381 snd_pcm_uframes_t periodSize = *bufferSize;
7382 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7383 if ( result < 0 ) {
7384 snd_pcm_close( phandle );
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7386 errorText_ = errorStream_.str();
7387 return FAILURE;
7388 }
7389 *bufferSize = periodSize;
7390
7391 // Set the buffer number, which in ALSA is referred to as the "period".
7392 unsigned int periods = 0;
7393 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7394 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7395 if ( periods < 2 ) periods = 4; // a fairly safe default value
7396 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7397 if ( result < 0 ) {
7398 snd_pcm_close( phandle );
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7400 errorText_ = errorStream_.str();
7401 return FAILURE;
7402 }
7403
7404 // If attempting to setup a duplex stream, the bufferSize parameter
7405 // MUST be the same in both directions!
7406 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7407 snd_pcm_close( phandle );
7408 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7409 errorText_ = errorStream_.str();
7410 return FAILURE;
7411 }
7412
7413 stream_.bufferSize = *bufferSize;
7414
7415 // Install the hardware configuration
7416 result = snd_pcm_hw_params( phandle, hw_params );
7417 if ( result < 0 ) {
7418 snd_pcm_close( phandle );
7419 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7420 errorText_ = errorStream_.str();
7421 return FAILURE;
7422 }
7423
7424 #if defined(__RTAUDIO_DEBUG__)
7425 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7426 snd_pcm_hw_params_dump( hw_params, out );
7427 #endif
7428
7429 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7430 snd_pcm_sw_params_t *sw_params = NULL;
7431 snd_pcm_sw_params_alloca( &sw_params );
7432 snd_pcm_sw_params_current( phandle, sw_params );
7433 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7434 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7435 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7436
7437 // The following two settings were suggested by Theo Veenker
7438 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7439 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7440
7441 // here are two options for a fix
7442 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7443 snd_pcm_uframes_t val;
7444 snd_pcm_sw_params_get_boundary( sw_params, &val );
7445 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7446
7447 result = snd_pcm_sw_params( phandle, sw_params );
7448 if ( result < 0 ) {
7449 snd_pcm_close( phandle );
7450 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7451 errorText_ = errorStream_.str();
7452 return FAILURE;
7453 }
7454
7455 #if defined(__RTAUDIO_DEBUG__)
7456 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7457 snd_pcm_sw_params_dump( sw_params, out );
7458 #endif
7459
7460 // Set flags for buffer conversion
7461 stream_.doConvertBuffer[mode] = false;
7462 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7463 stream_.doConvertBuffer[mode] = true;
7464 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7465 stream_.doConvertBuffer[mode] = true;
7466 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7467 stream_.nUserChannels[mode] > 1 )
7468 stream_.doConvertBuffer[mode] = true;
7469
7470 // Allocate the ApiHandle if necessary and then save.
7471 AlsaHandle *apiInfo = 0;
7472 if ( stream_.apiHandle == 0 ) {
7473 try {
7474 apiInfo = (AlsaHandle *) new AlsaHandle;
7475 }
7476 catch ( std::bad_alloc& ) {
7477 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7478 goto error;
7479 }
7480
7481 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7482 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7483 goto error;
7484 }
7485
7486 stream_.apiHandle = (void *) apiInfo;
7487 apiInfo->handles[0] = 0;
7488 apiInfo->handles[1] = 0;
7489 }
7490 else {
7491 apiInfo = (AlsaHandle *) stream_.apiHandle;
7492 }
7493 apiInfo->handles[mode] = phandle;
7494 phandle = 0;
7495
7496 // Allocate necessary internal buffers.
7497 unsigned long bufferBytes;
7498 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7499 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7500 if ( stream_.userBuffer[mode] == NULL ) {
7501 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7502 goto error;
7503 }
7504
7505 if ( stream_.doConvertBuffer[mode] ) {
7506
7507 bool makeBuffer = true;
7508 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7509 if ( mode == INPUT ) {
7510 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7511 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7512 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7513 }
7514 }
7515
7516 if ( makeBuffer ) {
7517 bufferBytes *= *bufferSize;
7518 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7519 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7520 if ( stream_.deviceBuffer == NULL ) {
7521 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7522 goto error;
7523 }
7524 }
7525 }
7526
7527 stream_.sampleRate = sampleRate;
7528 stream_.nBuffers = periods;
7529 stream_.device[mode] = device;
7530 stream_.state = STREAM_STOPPED;
7531
7532 // Setup the buffer conversion information structure.
7533 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7534
7535 // Setup thread if necessary.
7536 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7537 // We had already set up an output stream.
7538 stream_.mode = DUPLEX;
7539 // Link the streams if possible.
7540 apiInfo->synchronized = false;
7541 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7542 apiInfo->synchronized = true;
7543 else {
7544 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7545 error( RtAudioError::WARNING );
7546 }
7547 }
7548 else {
7549 stream_.mode = mode;
7550
7551 // Setup callback thread.
7552 stream_.callbackInfo.object = (void *) this;
7553
7554 // Set the thread attributes for joinable and realtime scheduling
7555 // priority (optional). The higher priority will only take affect
7556 // if the program is run as root or suid. Note, under Linux
7557 // processes with CAP_SYS_NICE privilege, a user can change
7558 // scheduling policy and priority (thus need not be root). See
7559 // POSIX "capabilities".
7560 pthread_attr_t attr;
7561 pthread_attr_init( &attr );
7562 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7563
7564 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7565 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7566 // We previously attempted to increase the audio callback priority
7567 // to SCHED_RR here via the attributes. However, while no errors
7568 // were reported in doing so, it did not work. So, now this is
7569 // done in the alsaCallbackHandler function.
7570 stream_.callbackInfo.doRealtime = true;
7571 int priority = options->priority;
7572 int min = sched_get_priority_min( SCHED_RR );
7573 int max = sched_get_priority_max( SCHED_RR );
7574 if ( priority < min ) priority = min;
7575 else if ( priority > max ) priority = max;
7576 stream_.callbackInfo.priority = priority;
7577 }
7578 #endif
7579
7580 stream_.callbackInfo.isRunning = true;
7581 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7582 pthread_attr_destroy( &attr );
7583 if ( result ) {
7584 stream_.callbackInfo.isRunning = false;
7585 errorText_ = "RtApiAlsa::error creating callback thread!";
7586 goto error;
7587 }
7588 }
7589
7590 return SUCCESS;
7591
7592 error:
7593 if ( apiInfo ) {
7594 pthread_cond_destroy( &apiInfo->runnable_cv );
7595 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7596 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7597 delete apiInfo;
7598 stream_.apiHandle = 0;
7599 }
7600
7601 if ( phandle) snd_pcm_close( phandle );
7602
7603 for ( int i=0; i<2; i++ ) {
7604 if ( stream_.userBuffer[i] ) {
7605 free( stream_.userBuffer[i] );
7606 stream_.userBuffer[i] = 0;
7607 }
7608 }
7609
7610 if ( stream_.deviceBuffer ) {
7611 free( stream_.deviceBuffer );
7612 stream_.deviceBuffer = 0;
7613 }
7614
7615 stream_.state = STREAM_CLOSED;
7616 return FAILURE;
7617 }
7618
closeStream()7619 void RtApiAlsa :: closeStream()
7620 {
7621 if ( stream_.state == STREAM_CLOSED ) {
7622 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7623 error( RtAudioError::WARNING );
7624 return;
7625 }
7626
7627 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7628 stream_.callbackInfo.isRunning = false;
7629 MUTEX_LOCK( &stream_.mutex );
7630 if ( stream_.state == STREAM_STOPPED ) {
7631 apiInfo->runnable = true;
7632 pthread_cond_signal( &apiInfo->runnable_cv );
7633 }
7634 MUTEX_UNLOCK( &stream_.mutex );
7635 pthread_join( stream_.callbackInfo.thread, NULL );
7636
7637 if ( stream_.state == STREAM_RUNNING ) {
7638 stream_.state = STREAM_STOPPED;
7639 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7640 snd_pcm_drop( apiInfo->handles[0] );
7641 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7642 snd_pcm_drop( apiInfo->handles[1] );
7643 }
7644
7645 if ( apiInfo ) {
7646 pthread_cond_destroy( &apiInfo->runnable_cv );
7647 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7648 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7649 delete apiInfo;
7650 stream_.apiHandle = 0;
7651 }
7652
7653 for ( int i=0; i<2; i++ ) {
7654 if ( stream_.userBuffer[i] ) {
7655 free( stream_.userBuffer[i] );
7656 stream_.userBuffer[i] = 0;
7657 }
7658 }
7659
7660 if ( stream_.deviceBuffer ) {
7661 free( stream_.deviceBuffer );
7662 stream_.deviceBuffer = 0;
7663 }
7664
7665 stream_.mode = UNINITIALIZED;
7666 stream_.state = STREAM_CLOSED;
7667 }
7668
startStream()7669 void RtApiAlsa :: startStream()
7670 {
7671 // This method calls snd_pcm_prepare if the device isn't already in that state.
7672
7673 verifyStream();
7674 if ( stream_.state == STREAM_RUNNING ) {
7675 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7676 error( RtAudioError::WARNING );
7677 return;
7678 }
7679
7680 MUTEX_LOCK( &stream_.mutex );
7681
7682 int result = 0;
7683 snd_pcm_state_t state;
7684 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7685 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7686 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7687 state = snd_pcm_state( handle[0] );
7688 if ( state != SND_PCM_STATE_PREPARED ) {
7689 result = snd_pcm_prepare( handle[0] );
7690 if ( result < 0 ) {
7691 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7692 errorText_ = errorStream_.str();
7693 goto unlock;
7694 }
7695 }
7696 }
7697
7698 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7699 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7700 state = snd_pcm_state( handle[1] );
7701 if ( state != SND_PCM_STATE_PREPARED ) {
7702 result = snd_pcm_prepare( handle[1] );
7703 if ( result < 0 ) {
7704 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7705 errorText_ = errorStream_.str();
7706 goto unlock;
7707 }
7708 }
7709 }
7710
7711 stream_.state = STREAM_RUNNING;
7712
7713 unlock:
7714 apiInfo->runnable = true;
7715 pthread_cond_signal( &apiInfo->runnable_cv );
7716 MUTEX_UNLOCK( &stream_.mutex );
7717
7718 if ( result >= 0 ) return;
7719 error( RtAudioError::SYSTEM_ERROR );
7720 }
7721
stopStream()7722 void RtApiAlsa :: stopStream()
7723 {
7724 verifyStream();
7725 if ( stream_.state == STREAM_STOPPED ) {
7726 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7727 error( RtAudioError::WARNING );
7728 return;
7729 }
7730
7731 stream_.state = STREAM_STOPPED;
7732 MUTEX_LOCK( &stream_.mutex );
7733
7734 int result = 0;
7735 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7736 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7738 if ( apiInfo->synchronized )
7739 result = snd_pcm_drop( handle[0] );
7740 else
7741 result = snd_pcm_drain( handle[0] );
7742 if ( result < 0 ) {
7743 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7744 errorText_ = errorStream_.str();
7745 goto unlock;
7746 }
7747 }
7748
7749 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7750 result = snd_pcm_drop( handle[1] );
7751 if ( result < 0 ) {
7752 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7753 errorText_ = errorStream_.str();
7754 goto unlock;
7755 }
7756 }
7757
7758 unlock:
7759 apiInfo->runnable = false; // fixes high CPU usage when stopped
7760 MUTEX_UNLOCK( &stream_.mutex );
7761
7762 if ( result >= 0 ) return;
7763 error( RtAudioError::SYSTEM_ERROR );
7764 }
7765
abortStream()7766 void RtApiAlsa :: abortStream()
7767 {
7768 verifyStream();
7769 if ( stream_.state == STREAM_STOPPED ) {
7770 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7771 error( RtAudioError::WARNING );
7772 return;
7773 }
7774
7775 stream_.state = STREAM_STOPPED;
7776 MUTEX_LOCK( &stream_.mutex );
7777
7778 int result = 0;
7779 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7780 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7781 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7782 result = snd_pcm_drop( handle[0] );
7783 if ( result < 0 ) {
7784 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7785 errorText_ = errorStream_.str();
7786 goto unlock;
7787 }
7788 }
7789
7790 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7791 result = snd_pcm_drop( handle[1] );
7792 if ( result < 0 ) {
7793 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7794 errorText_ = errorStream_.str();
7795 goto unlock;
7796 }
7797 }
7798
7799 unlock:
7800 apiInfo->runnable = false; // fixes high CPU usage when stopped
7801 MUTEX_UNLOCK( &stream_.mutex );
7802
7803 if ( result >= 0 ) return;
7804 error( RtAudioError::SYSTEM_ERROR );
7805 }
7806
callbackEvent()7807 void RtApiAlsa :: callbackEvent()
7808 {
7809 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7810 if ( stream_.state == STREAM_STOPPED ) {
7811 MUTEX_LOCK( &stream_.mutex );
7812 while ( !apiInfo->runnable )
7813 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7814
7815 if ( stream_.state != STREAM_RUNNING ) {
7816 MUTEX_UNLOCK( &stream_.mutex );
7817 return;
7818 }
7819 MUTEX_UNLOCK( &stream_.mutex );
7820 }
7821
7822 if ( stream_.state == STREAM_CLOSED ) {
7823 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7824 error( RtAudioError::WARNING );
7825 return;
7826 }
7827
7828 int doStopStream = 0;
7829 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7830 double streamTime = getStreamTime();
7831 RtAudioStreamStatus status = 0;
7832 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7833 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7834 apiInfo->xrun[0] = false;
7835 }
7836 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7837 status |= RTAUDIO_INPUT_OVERFLOW;
7838 apiInfo->xrun[1] = false;
7839 }
7840 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7841 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7842
7843 if ( doStopStream == 2 ) {
7844 abortStream();
7845 return;
7846 }
7847
7848 MUTEX_LOCK( &stream_.mutex );
7849
7850 // The state might change while waiting on a mutex.
7851 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7852
7853 int result;
7854 char *buffer;
7855 int channels;
7856 snd_pcm_t **handle;
7857 snd_pcm_sframes_t frames;
7858 RtAudioFormat format;
7859 handle = (snd_pcm_t **) apiInfo->handles;
7860
7861 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7862
7863 // Setup parameters.
7864 if ( stream_.doConvertBuffer[1] ) {
7865 buffer = stream_.deviceBuffer;
7866 channels = stream_.nDeviceChannels[1];
7867 format = stream_.deviceFormat[1];
7868 }
7869 else {
7870 buffer = stream_.userBuffer[1];
7871 channels = stream_.nUserChannels[1];
7872 format = stream_.userFormat;
7873 }
7874
7875 // Read samples from device in interleaved/non-interleaved format.
7876 if ( stream_.deviceInterleaved[1] )
7877 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7878 else {
7879 void *bufs[channels];
7880 size_t offset = stream_.bufferSize * formatBytes( format );
7881 for ( int i=0; i<channels; i++ )
7882 bufs[i] = (void *) (buffer + (i * offset));
7883 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7884 }
7885
7886 if ( result < (int) stream_.bufferSize ) {
7887 // Either an error or overrun occured.
7888 if ( result == -EPIPE ) {
7889 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7890 if ( state == SND_PCM_STATE_XRUN ) {
7891 apiInfo->xrun[1] = true;
7892 result = snd_pcm_prepare( handle[1] );
7893 if ( result < 0 ) {
7894 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7895 errorText_ = errorStream_.str();
7896 }
7897 }
7898 else {
7899 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7900 errorText_ = errorStream_.str();
7901 }
7902 }
7903 else {
7904 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7905 errorText_ = errorStream_.str();
7906 }
7907 error( RtAudioError::WARNING );
7908 goto tryOutput;
7909 }
7910
7911 // Do byte swapping if necessary.
7912 if ( stream_.doByteSwap[1] )
7913 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7914
7915 // Do buffer conversion if necessary.
7916 if ( stream_.doConvertBuffer[1] )
7917 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7918
7919 // Check stream latency
7920 result = snd_pcm_delay( handle[1], &frames );
7921 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7922 }
7923
7924 tryOutput:
7925
7926 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7927
7928 // Setup parameters and do buffer conversion if necessary.
7929 if ( stream_.doConvertBuffer[0] ) {
7930 buffer = stream_.deviceBuffer;
7931 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7932 channels = stream_.nDeviceChannels[0];
7933 format = stream_.deviceFormat[0];
7934 }
7935 else {
7936 buffer = stream_.userBuffer[0];
7937 channels = stream_.nUserChannels[0];
7938 format = stream_.userFormat;
7939 }
7940
7941 // Do byte swapping if necessary.
7942 if ( stream_.doByteSwap[0] )
7943 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7944
7945 // Write samples to device in interleaved/non-interleaved format.
7946 if ( stream_.deviceInterleaved[0] )
7947 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7948 else {
7949 void *bufs[channels];
7950 size_t offset = stream_.bufferSize * formatBytes( format );
7951 for ( int i=0; i<channels; i++ )
7952 bufs[i] = (void *) (buffer + (i * offset));
7953 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7954 }
7955
7956 if ( result < (int) stream_.bufferSize ) {
7957 // Either an error or underrun occured.
7958 if ( result == -EPIPE ) {
7959 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7960 if ( state == SND_PCM_STATE_XRUN ) {
7961 apiInfo->xrun[0] = true;
7962 result = snd_pcm_prepare( handle[0] );
7963 if ( result < 0 ) {
7964 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7965 errorText_ = errorStream_.str();
7966 }
7967 else
7968 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
7969 }
7970 else {
7971 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7972 errorText_ = errorStream_.str();
7973 }
7974 }
7975 else {
7976 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7977 errorText_ = errorStream_.str();
7978 }
7979 error( RtAudioError::WARNING );
7980 goto unlock;
7981 }
7982
7983 // Check stream latency
7984 result = snd_pcm_delay( handle[0], &frames );
7985 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
7986 }
7987
7988 unlock:
7989 MUTEX_UNLOCK( &stream_.mutex );
7990
7991 RtApi::tickStreamTime();
7992 if ( doStopStream == 1 ) this->stopStream();
7993 }
7994
alsaCallbackHandler(void * ptr)7995 static void *alsaCallbackHandler( void *ptr )
7996 {
7997 CallbackInfo *info = (CallbackInfo *) ptr;
7998 RtApiAlsa *object = (RtApiAlsa *) info->object;
7999 bool *isRunning = &info->isRunning;
8000
8001 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8002 if ( info->doRealtime ) {
8003 pthread_t tID = pthread_self(); // ID of this thread
8004 sched_param prio = { info->priority }; // scheduling priority of thread
8005 pthread_setschedparam( tID, SCHED_RR, &prio );
8006 }
8007 #endif
8008
8009 while ( *isRunning == true ) {
8010 pthread_testcancel();
8011 object->callbackEvent();
8012 }
8013
8014 pthread_exit( NULL );
8015 }
8016
8017 //******************** End of __LINUX_ALSA__ *********************//
8018 #endif
8019
8020 #if defined(__LINUX_PULSE__)
8021
8022 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8023 // and Tristan Matthews.
8024
8025 #include <pulse/error.h>
8026 #include <pulse/simple.h>
8027 #include <cstdio>
8028
8029 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8030 44100, 48000, 96000, 0};
8031
8032 struct rtaudio_pa_format_mapping_t {
8033 RtAudioFormat rtaudio_format;
8034 pa_sample_format_t pa_format;
8035 };
8036
8037 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8038 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8039 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8040 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8041 {0, PA_SAMPLE_INVALID}};
8042
8043 struct PulseAudioHandle {
8044 pa_simple *s_play;
8045 pa_simple *s_rec;
8046 pthread_t thread;
8047 pthread_cond_t runnable_cv;
8048 bool runnable;
PulseAudioHandlePulseAudioHandle8049 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8050 };
8051
~RtApiPulse()8052 RtApiPulse::~RtApiPulse()
8053 {
8054 if ( stream_.state != STREAM_CLOSED )
8055 closeStream();
8056 }
8057
getDeviceCount(void)8058 unsigned int RtApiPulse::getDeviceCount( void )
8059 {
8060 return 1;
8061 }
8062
getDeviceInfo(unsigned int)8063 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8064 {
8065 RtAudio::DeviceInfo info;
8066 info.probed = true;
8067 info.name = "PulseAudio";
8068 info.outputChannels = 2;
8069 info.inputChannels = 2;
8070 info.duplexChannels = 2;
8071 info.isDefaultOutput = true;
8072 info.isDefaultInput = true;
8073
8074 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8075 info.sampleRates.push_back( *sr );
8076
8077 info.preferredSampleRate = 48000;
8078 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8079
8080 return info;
8081 }
8082
pulseaudio_callback(void * user)8083 static void *pulseaudio_callback( void * user )
8084 {
8085 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8086 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8087 volatile bool *isRunning = &cbi->isRunning;
8088
8089 while ( *isRunning ) {
8090 pthread_testcancel();
8091 context->callbackEvent();
8092 }
8093
8094 pthread_exit( NULL );
8095 }
8096
closeStream(void)8097 void RtApiPulse::closeStream( void )
8098 {
8099 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8100
8101 stream_.callbackInfo.isRunning = false;
8102 if ( pah ) {
8103 MUTEX_LOCK( &stream_.mutex );
8104 if ( stream_.state == STREAM_STOPPED ) {
8105 pah->runnable = true;
8106 pthread_cond_signal( &pah->runnable_cv );
8107 }
8108 MUTEX_UNLOCK( &stream_.mutex );
8109
8110 pthread_join( pah->thread, 0 );
8111 if ( pah->s_play ) {
8112 pa_simple_flush( pah->s_play, NULL );
8113 pa_simple_free( pah->s_play );
8114 }
8115 if ( pah->s_rec )
8116 pa_simple_free( pah->s_rec );
8117
8118 pthread_cond_destroy( &pah->runnable_cv );
8119 delete pah;
8120 stream_.apiHandle = 0;
8121 }
8122
8123 if ( stream_.userBuffer[0] ) {
8124 free( stream_.userBuffer[0] );
8125 stream_.userBuffer[0] = 0;
8126 }
8127 if ( stream_.userBuffer[1] ) {
8128 free( stream_.userBuffer[1] );
8129 stream_.userBuffer[1] = 0;
8130 }
8131
8132 stream_.state = STREAM_CLOSED;
8133 stream_.mode = UNINITIALIZED;
8134 }
8135
callbackEvent(void)8136 void RtApiPulse::callbackEvent( void )
8137 {
8138 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8139
8140 if ( stream_.state == STREAM_STOPPED ) {
8141 MUTEX_LOCK( &stream_.mutex );
8142 while ( !pah->runnable )
8143 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8144
8145 if ( stream_.state != STREAM_RUNNING ) {
8146 MUTEX_UNLOCK( &stream_.mutex );
8147 return;
8148 }
8149 MUTEX_UNLOCK( &stream_.mutex );
8150 }
8151
8152 if ( stream_.state == STREAM_CLOSED ) {
8153 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8154 "this shouldn't happen!";
8155 error( RtAudioError::WARNING );
8156 return;
8157 }
8158
8159 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8160 double streamTime = getStreamTime();
8161 RtAudioStreamStatus status = 0;
8162 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8163 stream_.bufferSize, streamTime, status,
8164 stream_.callbackInfo.userData );
8165
8166 if ( doStopStream == 2 ) {
8167 abortStream();
8168 return;
8169 }
8170
8171 MUTEX_LOCK( &stream_.mutex );
8172 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8173 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8174
8175 if ( stream_.state != STREAM_RUNNING )
8176 goto unlock;
8177
8178 int pa_error;
8179 size_t bytes;
8180 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8181 if ( stream_.doConvertBuffer[OUTPUT] ) {
8182 convertBuffer( stream_.deviceBuffer,
8183 stream_.userBuffer[OUTPUT],
8184 stream_.convertInfo[OUTPUT] );
8185 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8186 formatBytes( stream_.deviceFormat[OUTPUT] );
8187 } else
8188 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8189 formatBytes( stream_.userFormat );
8190
8191 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8192 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8193 pa_strerror( pa_error ) << ".";
8194 errorText_ = errorStream_.str();
8195 error( RtAudioError::WARNING );
8196 }
8197 }
8198
8199 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8200 if ( stream_.doConvertBuffer[INPUT] )
8201 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8202 formatBytes( stream_.deviceFormat[INPUT] );
8203 else
8204 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8205 formatBytes( stream_.userFormat );
8206
8207 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8208 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8209 pa_strerror( pa_error ) << ".";
8210 errorText_ = errorStream_.str();
8211 error( RtAudioError::WARNING );
8212 }
8213 if ( stream_.doConvertBuffer[INPUT] ) {
8214 convertBuffer( stream_.userBuffer[INPUT],
8215 stream_.deviceBuffer,
8216 stream_.convertInfo[INPUT] );
8217 }
8218 }
8219
8220 unlock:
8221 MUTEX_UNLOCK( &stream_.mutex );
8222 RtApi::tickStreamTime();
8223
8224 if ( doStopStream == 1 )
8225 stopStream();
8226 }
8227
startStream(void)8228 void RtApiPulse::startStream( void )
8229 {
8230 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8231
8232 if ( stream_.state == STREAM_CLOSED ) {
8233 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8234 error( RtAudioError::INVALID_USE );
8235 return;
8236 }
8237 if ( stream_.state == STREAM_RUNNING ) {
8238 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8239 error( RtAudioError::WARNING );
8240 return;
8241 }
8242
8243 MUTEX_LOCK( &stream_.mutex );
8244
8245 stream_.state = STREAM_RUNNING;
8246
8247 pah->runnable = true;
8248 pthread_cond_signal( &pah->runnable_cv );
8249 MUTEX_UNLOCK( &stream_.mutex );
8250 }
8251
stopStream(void)8252 void RtApiPulse::stopStream( void )
8253 {
8254 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8255
8256 if ( stream_.state == STREAM_CLOSED ) {
8257 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8258 error( RtAudioError::INVALID_USE );
8259 return;
8260 }
8261 if ( stream_.state == STREAM_STOPPED ) {
8262 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8263 error( RtAudioError::WARNING );
8264 return;
8265 }
8266
8267 stream_.state = STREAM_STOPPED;
8268 MUTEX_LOCK( &stream_.mutex );
8269
8270 if ( pah && pah->s_play ) {
8271 int pa_error;
8272 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8273 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8274 pa_strerror( pa_error ) << ".";
8275 errorText_ = errorStream_.str();
8276 MUTEX_UNLOCK( &stream_.mutex );
8277 error( RtAudioError::SYSTEM_ERROR );
8278 return;
8279 }
8280 }
8281
8282 stream_.state = STREAM_STOPPED;
8283 MUTEX_UNLOCK( &stream_.mutex );
8284 }
8285
abortStream(void)8286 void RtApiPulse::abortStream( void )
8287 {
8288 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8289
8290 if ( stream_.state == STREAM_CLOSED ) {
8291 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8292 error( RtAudioError::INVALID_USE );
8293 return;
8294 }
8295 if ( stream_.state == STREAM_STOPPED ) {
8296 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8297 error( RtAudioError::WARNING );
8298 return;
8299 }
8300
8301 stream_.state = STREAM_STOPPED;
8302 MUTEX_LOCK( &stream_.mutex );
8303
8304 if ( pah && pah->s_play ) {
8305 int pa_error;
8306 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8307 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8308 pa_strerror( pa_error ) << ".";
8309 errorText_ = errorStream_.str();
8310 MUTEX_UNLOCK( &stream_.mutex );
8311 error( RtAudioError::SYSTEM_ERROR );
8312 return;
8313 }
8314 }
8315
8316 stream_.state = STREAM_STOPPED;
8317 MUTEX_UNLOCK( &stream_.mutex );
8318 }
8319
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8320 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8321 unsigned int channels, unsigned int firstChannel,
8322 unsigned int sampleRate, RtAudioFormat format,
8323 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8324 {
8325 PulseAudioHandle *pah = 0;
8326 unsigned long bufferBytes = 0;
8327 pa_sample_spec ss;
8328
8329 if ( device != 0 ) return false;
8330 if ( mode != INPUT && mode != OUTPUT ) return false;
8331 if ( channels != 1 && channels != 2 ) {
8332 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8333 return false;
8334 }
8335 ss.channels = channels;
8336
8337 if ( firstChannel != 0 ) return false;
8338
8339 bool sr_found = false;
8340 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8341 if ( sampleRate == *sr ) {
8342 sr_found = true;
8343 stream_.sampleRate = sampleRate;
8344 ss.rate = sampleRate;
8345 break;
8346 }
8347 }
8348 if ( !sr_found ) {
8349 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8350 return false;
8351 }
8352
8353 bool sf_found = 0;
8354 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8355 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8356 if ( format == sf->rtaudio_format ) {
8357 sf_found = true;
8358 stream_.userFormat = sf->rtaudio_format;
8359 stream_.deviceFormat[mode] = stream_.userFormat;
8360 ss.format = sf->pa_format;
8361 break;
8362 }
8363 }
8364 if ( !sf_found ) { // Use internal data format conversion.
8365 stream_.userFormat = format;
8366 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8367 ss.format = PA_SAMPLE_FLOAT32LE;
8368 }
8369
8370 // Set other stream parameters.
8371 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8372 else stream_.userInterleaved = true;
8373 stream_.deviceInterleaved[mode] = true;
8374 stream_.nBuffers = 1;
8375 stream_.doByteSwap[mode] = false;
8376 stream_.nUserChannels[mode] = channels;
8377 stream_.nDeviceChannels[mode] = channels + firstChannel;
8378 stream_.channelOffset[mode] = 0;
8379 std::string streamName = "RtAudio";
8380
8381 // Set flags for buffer conversion.
8382 stream_.doConvertBuffer[mode] = false;
8383 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8384 stream_.doConvertBuffer[mode] = true;
8385 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8386 stream_.doConvertBuffer[mode] = true;
8387
8388 // Allocate necessary internal buffers.
8389 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8390 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8391 if ( stream_.userBuffer[mode] == NULL ) {
8392 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8393 goto error;
8394 }
8395 stream_.bufferSize = *bufferSize;
8396
8397 if ( stream_.doConvertBuffer[mode] ) {
8398
8399 bool makeBuffer = true;
8400 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8401 if ( mode == INPUT ) {
8402 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8403 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8404 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8405 }
8406 }
8407
8408 if ( makeBuffer ) {
8409 bufferBytes *= *bufferSize;
8410 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8411 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8412 if ( stream_.deviceBuffer == NULL ) {
8413 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8414 goto error;
8415 }
8416 }
8417 }
8418
8419 stream_.device[mode] = device;
8420
8421 // Setup the buffer conversion information structure.
8422 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8423
8424 if ( !stream_.apiHandle ) {
8425 PulseAudioHandle *pah = new PulseAudioHandle;
8426 if ( !pah ) {
8427 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8428 goto error;
8429 }
8430
8431 stream_.apiHandle = pah;
8432 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8433 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8434 goto error;
8435 }
8436 }
8437 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8438
8439 int error;
8440 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8441 switch ( mode ) {
8442 case INPUT:
8443 pa_buffer_attr buffer_attr;
8444 buffer_attr.fragsize = bufferBytes;
8445 buffer_attr.maxlength = -1;
8446
8447 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8448 if ( !pah->s_rec ) {
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8450 goto error;
8451 }
8452 break;
8453 case OUTPUT:
8454 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8455 if ( !pah->s_play ) {
8456 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8457 goto error;
8458 }
8459 break;
8460 default:
8461 goto error;
8462 }
8463
8464 if ( stream_.mode == UNINITIALIZED )
8465 stream_.mode = mode;
8466 else if ( stream_.mode == mode )
8467 goto error;
8468 else
8469 stream_.mode = DUPLEX;
8470
8471 if ( !stream_.callbackInfo.isRunning ) {
8472 stream_.callbackInfo.object = this;
8473 stream_.callbackInfo.isRunning = true;
8474 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8475 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8476 goto error;
8477 }
8478 }
8479
8480 stream_.state = STREAM_STOPPED;
8481 return true;
8482
8483 error:
8484 if ( pah && stream_.callbackInfo.isRunning ) {
8485 pthread_cond_destroy( &pah->runnable_cv );
8486 delete pah;
8487 stream_.apiHandle = 0;
8488 }
8489
8490 for ( int i=0; i<2; i++ ) {
8491 if ( stream_.userBuffer[i] ) {
8492 free( stream_.userBuffer[i] );
8493 stream_.userBuffer[i] = 0;
8494 }
8495 }
8496
8497 if ( stream_.deviceBuffer ) {
8498 free( stream_.deviceBuffer );
8499 stream_.deviceBuffer = 0;
8500 }
8501
8502 return FAILURE;
8503 }
8504
8505 //******************** End of __LINUX_PULSE__ *********************//
8506 #endif
8507
8508 #if defined(__LINUX_OSS__)
8509
8510 #include <unistd.h>
8511 #include <sys/ioctl.h>
8512 #include <unistd.h>
8513 #include <fcntl.h>
8514 #include <sys/soundcard.h>
8515 #include <errno.h>
8516 #include <math.h>
8517
8518 static void *ossCallbackHandler(void * ptr);
8519
8520 // A structure to hold various information related to the OSS API
8521 // implementation.
8522 struct OssHandle {
8523 int id[2]; // device ids
8524 bool xrun[2];
8525 bool triggered;
8526 pthread_cond_t runnable;
8527
OssHandleOssHandle8528 OssHandle()
8529 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8530 };
8531
RtApiOss()8532 RtApiOss :: RtApiOss()
8533 {
8534 // Nothing to do here.
8535 }
8536
~RtApiOss()8537 RtApiOss :: ~RtApiOss()
8538 {
8539 if ( stream_.state != STREAM_CLOSED ) closeStream();
8540 }
8541
getDeviceCount(void)8542 unsigned int RtApiOss :: getDeviceCount( void )
8543 {
8544 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8545 if ( mixerfd == -1 ) {
8546 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8547 error( RtAudioError::WARNING );
8548 return 0;
8549 }
8550
8551 oss_sysinfo sysinfo;
8552 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8553 close( mixerfd );
8554 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8555 error( RtAudioError::WARNING );
8556 return 0;
8557 }
8558
8559 close( mixerfd );
8560 return sysinfo.numaudios;
8561 }
8562
getDeviceInfo(unsigned int device)8563 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8564 {
8565 RtAudio::DeviceInfo info;
8566 info.probed = false;
8567
8568 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8569 if ( mixerfd == -1 ) {
8570 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8571 error( RtAudioError::WARNING );
8572 return info;
8573 }
8574
8575 oss_sysinfo sysinfo;
8576 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8577 if ( result == -1 ) {
8578 close( mixerfd );
8579 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8580 error( RtAudioError::WARNING );
8581 return info;
8582 }
8583
8584 unsigned nDevices = sysinfo.numaudios;
8585 if ( nDevices == 0 ) {
8586 close( mixerfd );
8587 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8588 error( RtAudioError::INVALID_USE );
8589 return info;
8590 }
8591
8592 if ( device >= nDevices ) {
8593 close( mixerfd );
8594 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8595 error( RtAudioError::INVALID_USE );
8596 return info;
8597 }
8598
8599 oss_audioinfo ainfo;
8600 ainfo.dev = device;
8601 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8602 close( mixerfd );
8603 if ( result == -1 ) {
8604 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8605 errorText_ = errorStream_.str();
8606 error( RtAudioError::WARNING );
8607 return info;
8608 }
8609
8610 // Probe channels
8611 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8612 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8613 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8614 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8615 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8616 }
8617
8618 // Probe data formats ... do for input
8619 unsigned long mask = ainfo.iformats;
8620 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8621 info.nativeFormats |= RTAUDIO_SINT16;
8622 if ( mask & AFMT_S8 )
8623 info.nativeFormats |= RTAUDIO_SINT8;
8624 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8625 info.nativeFormats |= RTAUDIO_SINT32;
8626 #ifdef AFMT_FLOAT
8627 if ( mask & AFMT_FLOAT )
8628 info.nativeFormats |= RTAUDIO_FLOAT32;
8629 #endif
8630 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8631 info.nativeFormats |= RTAUDIO_SINT24;
8632
8633 // Check that we have at least one supported format
8634 if ( info.nativeFormats == 0 ) {
8635 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8636 errorText_ = errorStream_.str();
8637 error( RtAudioError::WARNING );
8638 return info;
8639 }
8640
8641 // Probe the supported sample rates.
8642 info.sampleRates.clear();
8643 if ( ainfo.nrates ) {
8644 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8645 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8646 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8647 info.sampleRates.push_back( SAMPLE_RATES[k] );
8648
8649 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8650 info.preferredSampleRate = SAMPLE_RATES[k];
8651
8652 break;
8653 }
8654 }
8655 }
8656 }
8657 else {
8658 // Check min and max rate values;
8659 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8660 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8661 info.sampleRates.push_back( SAMPLE_RATES[k] );
8662
8663 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8664 info.preferredSampleRate = SAMPLE_RATES[k];
8665 }
8666 }
8667 }
8668
8669 if ( info.sampleRates.size() == 0 ) {
8670 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8671 errorText_ = errorStream_.str();
8672 error( RtAudioError::WARNING );
8673 }
8674 else {
8675 info.probed = true;
8676 info.name = ainfo.name;
8677 }
8678
8679 return info;
8680 }
8681
8682
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8683 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8684 unsigned int firstChannel, unsigned int sampleRate,
8685 RtAudioFormat format, unsigned int *bufferSize,
8686 RtAudio::StreamOptions *options )
8687 {
8688 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8689 if ( mixerfd == -1 ) {
8690 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8691 return FAILURE;
8692 }
8693
8694 oss_sysinfo sysinfo;
8695 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8696 if ( result == -1 ) {
8697 close( mixerfd );
8698 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8699 return FAILURE;
8700 }
8701
8702 unsigned nDevices = sysinfo.numaudios;
8703 if ( nDevices == 0 ) {
8704 // This should not happen because a check is made before this function is called.
8705 close( mixerfd );
8706 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8707 return FAILURE;
8708 }
8709
8710 if ( device >= nDevices ) {
8711 // This should not happen because a check is made before this function is called.
8712 close( mixerfd );
8713 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8714 return FAILURE;
8715 }
8716
8717 oss_audioinfo ainfo;
8718 ainfo.dev = device;
8719 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8720 close( mixerfd );
8721 if ( result == -1 ) {
8722 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8723 errorText_ = errorStream_.str();
8724 return FAILURE;
8725 }
8726
8727 // Check if device supports input or output
8728 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8729 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8730 if ( mode == OUTPUT )
8731 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8732 else
8733 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8734 errorText_ = errorStream_.str();
8735 return FAILURE;
8736 }
8737
8738 int flags = 0;
8739 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8740 if ( mode == OUTPUT )
8741 flags |= O_WRONLY;
8742 else { // mode == INPUT
8743 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8744 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8745 close( handle->id[0] );
8746 handle->id[0] = 0;
8747 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8748 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8749 errorText_ = errorStream_.str();
8750 return FAILURE;
8751 }
8752 // Check that the number previously set channels is the same.
8753 if ( stream_.nUserChannels[0] != channels ) {
8754 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8755 errorText_ = errorStream_.str();
8756 return FAILURE;
8757 }
8758 flags |= O_RDWR;
8759 }
8760 else
8761 flags |= O_RDONLY;
8762 }
8763
8764 // Set exclusive access if specified.
8765 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8766
8767 // Try to open the device.
8768 int fd;
8769 fd = open( ainfo.devnode, flags, 0 );
8770 if ( fd == -1 ) {
8771 if ( errno == EBUSY )
8772 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8773 else
8774 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8775 errorText_ = errorStream_.str();
8776 return FAILURE;
8777 }
8778
8779 // For duplex operation, specifically set this mode (this doesn't seem to work).
8780 /*
8781 if ( flags | O_RDWR ) {
8782 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8783 if ( result == -1) {
8784 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8785 errorText_ = errorStream_.str();
8786 return FAILURE;
8787 }
8788 }
8789 */
8790
8791 // Check the device channel support.
8792 stream_.nUserChannels[mode] = channels;
8793 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8794 close( fd );
8795 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8796 errorText_ = errorStream_.str();
8797 return FAILURE;
8798 }
8799
8800 // Set the number of channels.
8801 int deviceChannels = channels + firstChannel;
8802 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8803 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8804 close( fd );
8805 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8806 errorText_ = errorStream_.str();
8807 return FAILURE;
8808 }
8809 stream_.nDeviceChannels[mode] = deviceChannels;
8810
8811 // Get the data format mask
8812 int mask;
8813 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8814 if ( result == -1 ) {
8815 close( fd );
8816 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8817 errorText_ = errorStream_.str();
8818 return FAILURE;
8819 }
8820
8821 // Determine how to set the device format.
8822 stream_.userFormat = format;
8823 int deviceFormat = -1;
8824 stream_.doByteSwap[mode] = false;
8825 if ( format == RTAUDIO_SINT8 ) {
8826 if ( mask & AFMT_S8 ) {
8827 deviceFormat = AFMT_S8;
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8829 }
8830 }
8831 else if ( format == RTAUDIO_SINT16 ) {
8832 if ( mask & AFMT_S16_NE ) {
8833 deviceFormat = AFMT_S16_NE;
8834 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8835 }
8836 else if ( mask & AFMT_S16_OE ) {
8837 deviceFormat = AFMT_S16_OE;
8838 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8839 stream_.doByteSwap[mode] = true;
8840 }
8841 }
8842 else if ( format == RTAUDIO_SINT24 ) {
8843 if ( mask & AFMT_S24_NE ) {
8844 deviceFormat = AFMT_S24_NE;
8845 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8846 }
8847 else if ( mask & AFMT_S24_OE ) {
8848 deviceFormat = AFMT_S24_OE;
8849 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8850 stream_.doByteSwap[mode] = true;
8851 }
8852 }
8853 else if ( format == RTAUDIO_SINT32 ) {
8854 if ( mask & AFMT_S32_NE ) {
8855 deviceFormat = AFMT_S32_NE;
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8857 }
8858 else if ( mask & AFMT_S32_OE ) {
8859 deviceFormat = AFMT_S32_OE;
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8861 stream_.doByteSwap[mode] = true;
8862 }
8863 }
8864
8865 if ( deviceFormat == -1 ) {
8866 // The user requested format is not natively supported by the device.
8867 if ( mask & AFMT_S16_NE ) {
8868 deviceFormat = AFMT_S16_NE;
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8870 }
8871 else if ( mask & AFMT_S32_NE ) {
8872 deviceFormat = AFMT_S32_NE;
8873 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8874 }
8875 else if ( mask & AFMT_S24_NE ) {
8876 deviceFormat = AFMT_S24_NE;
8877 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8878 }
8879 else if ( mask & AFMT_S16_OE ) {
8880 deviceFormat = AFMT_S16_OE;
8881 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8882 stream_.doByteSwap[mode] = true;
8883 }
8884 else if ( mask & AFMT_S32_OE ) {
8885 deviceFormat = AFMT_S32_OE;
8886 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8887 stream_.doByteSwap[mode] = true;
8888 }
8889 else if ( mask & AFMT_S24_OE ) {
8890 deviceFormat = AFMT_S24_OE;
8891 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8892 stream_.doByteSwap[mode] = true;
8893 }
8894 else if ( mask & AFMT_S8) {
8895 deviceFormat = AFMT_S8;
8896 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8897 }
8898 }
8899
8900 if ( stream_.deviceFormat[mode] == 0 ) {
8901 // This really shouldn't happen ...
8902 close( fd );
8903 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8904 errorText_ = errorStream_.str();
8905 return FAILURE;
8906 }
8907
8908 // Set the data format.
8909 int temp = deviceFormat;
8910 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8911 if ( result == -1 || deviceFormat != temp ) {
8912 close( fd );
8913 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8914 errorText_ = errorStream_.str();
8915 return FAILURE;
8916 }
8917
8918 // Attempt to set the buffer size. According to OSS, the minimum
8919 // number of buffers is two. The supposed minimum buffer size is 16
8920 // bytes, so that will be our lower bound. The argument to this
8921 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8922 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8923 // We'll check the actual value used near the end of the setup
8924 // procedure.
8925 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8926 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8927 int buffers = 0;
8928 if ( options ) buffers = options->numberOfBuffers;
8929 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8930 if ( buffers < 2 ) buffers = 3;
8931 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8932 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8933 if ( result == -1 ) {
8934 close( fd );
8935 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
8936 errorText_ = errorStream_.str();
8937 return FAILURE;
8938 }
8939 stream_.nBuffers = buffers;
8940
8941 // Save buffer size (in sample frames).
8942 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
8943 stream_.bufferSize = *bufferSize;
8944
8945 // Set the sample rate.
8946 int srate = sampleRate;
8947 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
8948 if ( result == -1 ) {
8949 close( fd );
8950 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
8951 errorText_ = errorStream_.str();
8952 return FAILURE;
8953 }
8954
8955 // Verify the sample rate setup worked.
8956 if ( abs( srate - (int)sampleRate ) > 100 ) {
8957 close( fd );
8958 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
8959 errorText_ = errorStream_.str();
8960 return FAILURE;
8961 }
8962 stream_.sampleRate = sampleRate;
8963
8964 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
8965 // We're doing duplex setup here.
8966 stream_.deviceFormat[0] = stream_.deviceFormat[1];
8967 stream_.nDeviceChannels[0] = deviceChannels;
8968 }
8969
8970 // Set interleaving parameters.
8971 stream_.userInterleaved = true;
8972 stream_.deviceInterleaved[mode] = true;
8973 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
8974 stream_.userInterleaved = false;
8975
8976 // Set flags for buffer conversion
8977 stream_.doConvertBuffer[mode] = false;
8978 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8979 stream_.doConvertBuffer[mode] = true;
8980 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8981 stream_.doConvertBuffer[mode] = true;
8982 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
8983 stream_.nUserChannels[mode] > 1 )
8984 stream_.doConvertBuffer[mode] = true;
8985
8986 // Allocate the stream handles if necessary and then save.
8987 if ( stream_.apiHandle == 0 ) {
8988 try {
8989 handle = new OssHandle;
8990 }
8991 catch ( std::bad_alloc& ) {
8992 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
8993 goto error;
8994 }
8995
8996 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
8997 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
8998 goto error;
8999 }
9000
9001 stream_.apiHandle = (void *) handle;
9002 }
9003 else {
9004 handle = (OssHandle *) stream_.apiHandle;
9005 }
9006 handle->id[mode] = fd;
9007
9008 // Allocate necessary internal buffers.
9009 unsigned long bufferBytes;
9010 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9011 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9012 if ( stream_.userBuffer[mode] == NULL ) {
9013 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9014 goto error;
9015 }
9016
9017 if ( stream_.doConvertBuffer[mode] ) {
9018
9019 bool makeBuffer = true;
9020 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9021 if ( mode == INPUT ) {
9022 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9023 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9024 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9025 }
9026 }
9027
9028 if ( makeBuffer ) {
9029 bufferBytes *= *bufferSize;
9030 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9031 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9032 if ( stream_.deviceBuffer == NULL ) {
9033 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9034 goto error;
9035 }
9036 }
9037 }
9038
9039 stream_.device[mode] = device;
9040 stream_.state = STREAM_STOPPED;
9041
9042 // Setup the buffer conversion information structure.
9043 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9044
9045 // Setup thread if necessary.
9046 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9047 // We had already set up an output stream.
9048 stream_.mode = DUPLEX;
9049 if ( stream_.device[0] == device ) handle->id[0] = fd;
9050 }
9051 else {
9052 stream_.mode = mode;
9053
9054 // Setup callback thread.
9055 stream_.callbackInfo.object = (void *) this;
9056
9057 // Set the thread attributes for joinable and realtime scheduling
9058 // priority. The higher priority will only take affect if the
9059 // program is run as root or suid.
9060 pthread_attr_t attr;
9061 pthread_attr_init( &attr );
9062 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9063 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9064 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9065 struct sched_param param;
9066 int priority = options->priority;
9067 int min = sched_get_priority_min( SCHED_RR );
9068 int max = sched_get_priority_max( SCHED_RR );
9069 if ( priority < min ) priority = min;
9070 else if ( priority > max ) priority = max;
9071 param.sched_priority = priority;
9072 pthread_attr_setschedparam( &attr, ¶m );
9073 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9074 }
9075 else
9076 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9077 #else
9078 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9079 #endif
9080
9081 stream_.callbackInfo.isRunning = true;
9082 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9083 pthread_attr_destroy( &attr );
9084 if ( result ) {
9085 stream_.callbackInfo.isRunning = false;
9086 errorText_ = "RtApiOss::error creating callback thread!";
9087 goto error;
9088 }
9089 }
9090
9091 return SUCCESS;
9092
9093 error:
9094 if ( handle ) {
9095 pthread_cond_destroy( &handle->runnable );
9096 if ( handle->id[0] ) close( handle->id[0] );
9097 if ( handle->id[1] ) close( handle->id[1] );
9098 delete handle;
9099 stream_.apiHandle = 0;
9100 }
9101
9102 for ( int i=0; i<2; i++ ) {
9103 if ( stream_.userBuffer[i] ) {
9104 free( stream_.userBuffer[i] );
9105 stream_.userBuffer[i] = 0;
9106 }
9107 }
9108
9109 if ( stream_.deviceBuffer ) {
9110 free( stream_.deviceBuffer );
9111 stream_.deviceBuffer = 0;
9112 }
9113
9114 return FAILURE;
9115 }
9116
closeStream()9117 void RtApiOss :: closeStream()
9118 {
9119 if ( stream_.state == STREAM_CLOSED ) {
9120 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9121 error( RtAudioError::WARNING );
9122 return;
9123 }
9124
9125 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9126 stream_.callbackInfo.isRunning = false;
9127 MUTEX_LOCK( &stream_.mutex );
9128 if ( stream_.state == STREAM_STOPPED )
9129 pthread_cond_signal( &handle->runnable );
9130 MUTEX_UNLOCK( &stream_.mutex );
9131 pthread_join( stream_.callbackInfo.thread, NULL );
9132
9133 if ( stream_.state == STREAM_RUNNING ) {
9134 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9135 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9136 else
9137 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9138 stream_.state = STREAM_STOPPED;
9139 }
9140
9141 if ( handle ) {
9142 pthread_cond_destroy( &handle->runnable );
9143 if ( handle->id[0] ) close( handle->id[0] );
9144 if ( handle->id[1] ) close( handle->id[1] );
9145 delete handle;
9146 stream_.apiHandle = 0;
9147 }
9148
9149 for ( int i=0; i<2; i++ ) {
9150 if ( stream_.userBuffer[i] ) {
9151 free( stream_.userBuffer[i] );
9152 stream_.userBuffer[i] = 0;
9153 }
9154 }
9155
9156 if ( stream_.deviceBuffer ) {
9157 free( stream_.deviceBuffer );
9158 stream_.deviceBuffer = 0;
9159 }
9160
9161 stream_.mode = UNINITIALIZED;
9162 stream_.state = STREAM_CLOSED;
9163 }
9164
startStream()9165 void RtApiOss :: startStream()
9166 {
9167 verifyStream();
9168 if ( stream_.state == STREAM_RUNNING ) {
9169 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9170 error( RtAudioError::WARNING );
9171 return;
9172 }
9173
9174 MUTEX_LOCK( &stream_.mutex );
9175
9176 stream_.state = STREAM_RUNNING;
9177
9178 // No need to do anything else here ... OSS automatically starts
9179 // when fed samples.
9180
9181 MUTEX_UNLOCK( &stream_.mutex );
9182
9183 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9184 pthread_cond_signal( &handle->runnable );
9185 }
9186
stopStream()9187 void RtApiOss :: stopStream()
9188 {
9189 verifyStream();
9190 if ( stream_.state == STREAM_STOPPED ) {
9191 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9192 error( RtAudioError::WARNING );
9193 return;
9194 }
9195
9196 MUTEX_LOCK( &stream_.mutex );
9197
9198 // The state might change while waiting on a mutex.
9199 if ( stream_.state == STREAM_STOPPED ) {
9200 MUTEX_UNLOCK( &stream_.mutex );
9201 return;
9202 }
9203
9204 int result = 0;
9205 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9206 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9207
9208 // Flush the output with zeros a few times.
9209 char *buffer;
9210 int samples;
9211 RtAudioFormat format;
9212
9213 if ( stream_.doConvertBuffer[0] ) {
9214 buffer = stream_.deviceBuffer;
9215 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9216 format = stream_.deviceFormat[0];
9217 }
9218 else {
9219 buffer = stream_.userBuffer[0];
9220 samples = stream_.bufferSize * stream_.nUserChannels[0];
9221 format = stream_.userFormat;
9222 }
9223
9224 memset( buffer, 0, samples * formatBytes(format) );
9225 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9226 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9227 if ( result == -1 ) {
9228 errorText_ = "RtApiOss::stopStream: audio write error.";
9229 error( RtAudioError::WARNING );
9230 }
9231 }
9232
9233 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9234 if ( result == -1 ) {
9235 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9236 errorText_ = errorStream_.str();
9237 goto unlock;
9238 }
9239 handle->triggered = false;
9240 }
9241
9242 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9243 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9244 if ( result == -1 ) {
9245 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9246 errorText_ = errorStream_.str();
9247 goto unlock;
9248 }
9249 }
9250
9251 unlock:
9252 stream_.state = STREAM_STOPPED;
9253 MUTEX_UNLOCK( &stream_.mutex );
9254
9255 if ( result != -1 ) return;
9256 error( RtAudioError::SYSTEM_ERROR );
9257 }
9258
abortStream()9259 void RtApiOss :: abortStream()
9260 {
9261 verifyStream();
9262 if ( stream_.state == STREAM_STOPPED ) {
9263 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9264 error( RtAudioError::WARNING );
9265 return;
9266 }
9267
9268 MUTEX_LOCK( &stream_.mutex );
9269
9270 // The state might change while waiting on a mutex.
9271 if ( stream_.state == STREAM_STOPPED ) {
9272 MUTEX_UNLOCK( &stream_.mutex );
9273 return;
9274 }
9275
9276 int result = 0;
9277 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9278 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9279 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9280 if ( result == -1 ) {
9281 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9282 errorText_ = errorStream_.str();
9283 goto unlock;
9284 }
9285 handle->triggered = false;
9286 }
9287
9288 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9289 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9290 if ( result == -1 ) {
9291 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9292 errorText_ = errorStream_.str();
9293 goto unlock;
9294 }
9295 }
9296
9297 unlock:
9298 stream_.state = STREAM_STOPPED;
9299 MUTEX_UNLOCK( &stream_.mutex );
9300
9301 if ( result != -1 ) return;
9302 error( RtAudioError::SYSTEM_ERROR );
9303 }
9304
callbackEvent()9305 void RtApiOss :: callbackEvent()
9306 {
9307 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9308 if ( stream_.state == STREAM_STOPPED ) {
9309 MUTEX_LOCK( &stream_.mutex );
9310 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9311 if ( stream_.state != STREAM_RUNNING ) {
9312 MUTEX_UNLOCK( &stream_.mutex );
9313 return;
9314 }
9315 MUTEX_UNLOCK( &stream_.mutex );
9316 }
9317
9318 if ( stream_.state == STREAM_CLOSED ) {
9319 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9320 error( RtAudioError::WARNING );
9321 return;
9322 }
9323
9324 // Invoke user callback to get fresh output data.
9325 int doStopStream = 0;
9326 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9327 double streamTime = getStreamTime();
9328 RtAudioStreamStatus status = 0;
9329 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9330 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9331 handle->xrun[0] = false;
9332 }
9333 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9334 status |= RTAUDIO_INPUT_OVERFLOW;
9335 handle->xrun[1] = false;
9336 }
9337 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9338 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9339 if ( doStopStream == 2 ) {
9340 this->abortStream();
9341 return;
9342 }
9343
9344 MUTEX_LOCK( &stream_.mutex );
9345
9346 // The state might change while waiting on a mutex.
9347 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9348
9349 int result;
9350 char *buffer;
9351 int samples;
9352 RtAudioFormat format;
9353
9354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9355
9356 // Setup parameters and do buffer conversion if necessary.
9357 if ( stream_.doConvertBuffer[0] ) {
9358 buffer = stream_.deviceBuffer;
9359 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9360 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9361 format = stream_.deviceFormat[0];
9362 }
9363 else {
9364 buffer = stream_.userBuffer[0];
9365 samples = stream_.bufferSize * stream_.nUserChannels[0];
9366 format = stream_.userFormat;
9367 }
9368
9369 // Do byte swapping if necessary.
9370 if ( stream_.doByteSwap[0] )
9371 byteSwapBuffer( buffer, samples, format );
9372
9373 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9374 int trig = 0;
9375 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9376 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9377 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9378 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9379 handle->triggered = true;
9380 }
9381 else
9382 // Write samples to device.
9383 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9384
9385 if ( result == -1 ) {
9386 // We'll assume this is an underrun, though there isn't a
9387 // specific means for determining that.
9388 handle->xrun[0] = true;
9389 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9390 error( RtAudioError::WARNING );
9391 // Continue on to input section.
9392 }
9393 }
9394
9395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9396
9397 // Setup parameters.
9398 if ( stream_.doConvertBuffer[1] ) {
9399 buffer = stream_.deviceBuffer;
9400 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9401 format = stream_.deviceFormat[1];
9402 }
9403 else {
9404 buffer = stream_.userBuffer[1];
9405 samples = stream_.bufferSize * stream_.nUserChannels[1];
9406 format = stream_.userFormat;
9407 }
9408
9409 // Read samples from device.
9410 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9411
9412 if ( result == -1 ) {
9413 // We'll assume this is an overrun, though there isn't a
9414 // specific means for determining that.
9415 handle->xrun[1] = true;
9416 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9417 error( RtAudioError::WARNING );
9418 goto unlock;
9419 }
9420
9421 // Do byte swapping if necessary.
9422 if ( stream_.doByteSwap[1] )
9423 byteSwapBuffer( buffer, samples, format );
9424
9425 // Do buffer conversion if necessary.
9426 if ( stream_.doConvertBuffer[1] )
9427 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9428 }
9429
9430 unlock:
9431 MUTEX_UNLOCK( &stream_.mutex );
9432
9433 RtApi::tickStreamTime();
9434 if ( doStopStream == 1 ) this->stopStream();
9435 }
9436
ossCallbackHandler(void * ptr)9437 static void *ossCallbackHandler( void *ptr )
9438 {
9439 CallbackInfo *info = (CallbackInfo *) ptr;
9440 RtApiOss *object = (RtApiOss *) info->object;
9441 bool *isRunning = &info->isRunning;
9442
9443 while ( *isRunning == true ) {
9444 pthread_testcancel();
9445 object->callbackEvent();
9446 }
9447
9448 pthread_exit( NULL );
9449 }
9450
9451 //******************** End of __LINUX_OSS__ *********************//
9452 #endif
9453
9454
9455 // *************************************************** //
9456 //
9457 // Protected common (OS-independent) RtAudio methods.
9458 //
9459 // *************************************************** //
9460
9461 // This method can be modified to control the behavior of error
9462 // message printing.
error(RtAudioError::Type type)9463 void RtApi :: error( RtAudioError::Type type )
9464 {
9465 errorStream_.str(""); // clear the ostringstream
9466
9467 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9468 if ( errorCallback ) {
9469 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9470
9471 if ( firstErrorOccurred_ )
9472 return;
9473
9474 firstErrorOccurred_ = true;
9475 const std::string errorMessage = errorText_;
9476
9477 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9478 stream_.callbackInfo.isRunning = false; // exit from the thread
9479 abortStream();
9480 }
9481
9482 errorCallback( type, errorMessage );
9483 firstErrorOccurred_ = false;
9484 return;
9485 }
9486
9487 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9488 std::cerr << '\n' << errorText_ << "\n\n";
9489 else if ( type != RtAudioError::WARNING )
9490 throw( RtAudioError( errorText_, type ) );
9491 }
9492
verifyStream()9493 void RtApi :: verifyStream()
9494 {
9495 if ( stream_.state == STREAM_CLOSED ) {
9496 errorText_ = "RtApi:: a stream is not open!";
9497 error( RtAudioError::INVALID_USE );
9498 }
9499 }
9500
clearStreamInfo()9501 void RtApi :: clearStreamInfo()
9502 {
9503 stream_.mode = UNINITIALIZED;
9504 stream_.state = STREAM_CLOSED;
9505 stream_.sampleRate = 0;
9506 stream_.bufferSize = 0;
9507 stream_.nBuffers = 0;
9508 stream_.userFormat = 0;
9509 stream_.userInterleaved = true;
9510 stream_.streamTime = 0.0;
9511 stream_.apiHandle = 0;
9512 stream_.deviceBuffer = 0;
9513 stream_.callbackInfo.callback = 0;
9514 stream_.callbackInfo.userData = 0;
9515 stream_.callbackInfo.isRunning = false;
9516 stream_.callbackInfo.errorCallback = 0;
9517 for ( int i=0; i<2; i++ ) {
9518 stream_.device[i] = 11111;
9519 stream_.doConvertBuffer[i] = false;
9520 stream_.deviceInterleaved[i] = true;
9521 stream_.doByteSwap[i] = false;
9522 stream_.nUserChannels[i] = 0;
9523 stream_.nDeviceChannels[i] = 0;
9524 stream_.channelOffset[i] = 0;
9525 stream_.deviceFormat[i] = 0;
9526 stream_.latency[i] = 0;
9527 stream_.userBuffer[i] = 0;
9528 stream_.convertInfo[i].channels = 0;
9529 stream_.convertInfo[i].inJump = 0;
9530 stream_.convertInfo[i].outJump = 0;
9531 stream_.convertInfo[i].inFormat = 0;
9532 stream_.convertInfo[i].outFormat = 0;
9533 stream_.convertInfo[i].inOffset.clear();
9534 stream_.convertInfo[i].outOffset.clear();
9535 }
9536 }
9537
formatBytes(RtAudioFormat format)9538 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9539 {
9540 if ( format == RTAUDIO_SINT16 )
9541 return 2;
9542 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9543 return 4;
9544 else if ( format == RTAUDIO_FLOAT64 )
9545 return 8;
9546 else if ( format == RTAUDIO_SINT24 )
9547 return 3;
9548 else if ( format == RTAUDIO_SINT8 )
9549 return 1;
9550
9551 errorText_ = "RtApi::formatBytes: undefined format.";
9552 error( RtAudioError::WARNING );
9553
9554 return 0;
9555 }
9556
setConvertInfo(StreamMode mode,unsigned int firstChannel)9557 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9558 {
9559 if ( mode == INPUT ) { // convert device to user buffer
9560 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9561 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9562 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9563 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9564 }
9565 else { // convert user to device buffer
9566 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9567 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9568 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9569 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9570 }
9571
9572 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9573 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9574 else
9575 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9576
9577 // Set up the interleave/deinterleave offsets.
9578 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9579 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9580 ( mode == INPUT && stream_.userInterleaved ) ) {
9581 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9582 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9583 stream_.convertInfo[mode].outOffset.push_back( k );
9584 stream_.convertInfo[mode].inJump = 1;
9585 }
9586 }
9587 else {
9588 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9589 stream_.convertInfo[mode].inOffset.push_back( k );
9590 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9591 stream_.convertInfo[mode].outJump = 1;
9592 }
9593 }
9594 }
9595 else { // no (de)interleaving
9596 if ( stream_.userInterleaved ) {
9597 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9598 stream_.convertInfo[mode].inOffset.push_back( k );
9599 stream_.convertInfo[mode].outOffset.push_back( k );
9600 }
9601 }
9602 else {
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9604 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9605 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9606 stream_.convertInfo[mode].inJump = 1;
9607 stream_.convertInfo[mode].outJump = 1;
9608 }
9609 }
9610 }
9611
9612 // Add channel offset.
9613 if ( firstChannel > 0 ) {
9614 if ( stream_.deviceInterleaved[mode] ) {
9615 if ( mode == OUTPUT ) {
9616 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9617 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9618 }
9619 else {
9620 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9621 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9622 }
9623 }
9624 else {
9625 if ( mode == OUTPUT ) {
9626 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9627 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9628 }
9629 else {
9630 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9631 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9632 }
9633 }
9634 }
9635 }
9636
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)9637 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9638 {
9639 // This function does format conversion, input/output channel compensation, and
9640 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9641 // the lower three bytes of a 32-bit integer.
9642
9643 // Clear our device buffer when in/out duplex device channels are different
9644 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9645 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9646 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9647
9648 int j;
9649 if (info.outFormat == RTAUDIO_FLOAT64) {
9650 Float64 scale;
9651 Float64 *out = (Float64 *)outBuffer;
9652
9653 if (info.inFormat == RTAUDIO_SINT8) {
9654 signed char *in = (signed char *)inBuffer;
9655 scale = 1.0 / 127.5;
9656 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9657 for (j=0; j<info.channels; j++) {
9658 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9659 out[info.outOffset[j]] += 0.5;
9660 out[info.outOffset[j]] *= scale;
9661 }
9662 in += info.inJump;
9663 out += info.outJump;
9664 }
9665 }
9666 else if (info.inFormat == RTAUDIO_SINT16) {
9667 Int16 *in = (Int16 *)inBuffer;
9668 scale = 1.0 / 32767.5;
9669 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9670 for (j=0; j<info.channels; j++) {
9671 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9672 out[info.outOffset[j]] += 0.5;
9673 out[info.outOffset[j]] *= scale;
9674 }
9675 in += info.inJump;
9676 out += info.outJump;
9677 }
9678 }
9679 else if (info.inFormat == RTAUDIO_SINT24) {
9680 Int24 *in = (Int24 *)inBuffer;
9681 scale = 1.0 / 8388607.5;
9682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9683 for (j=0; j<info.channels; j++) {
9684 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9685 out[info.outOffset[j]] += 0.5;
9686 out[info.outOffset[j]] *= scale;
9687 }
9688 in += info.inJump;
9689 out += info.outJump;
9690 }
9691 }
9692 else if (info.inFormat == RTAUDIO_SINT32) {
9693 Int32 *in = (Int32 *)inBuffer;
9694 scale = 1.0 / 2147483647.5;
9695 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9696 for (j=0; j<info.channels; j++) {
9697 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9698 out[info.outOffset[j]] += 0.5;
9699 out[info.outOffset[j]] *= scale;
9700 }
9701 in += info.inJump;
9702 out += info.outJump;
9703 }
9704 }
9705 else if (info.inFormat == RTAUDIO_FLOAT32) {
9706 Float32 *in = (Float32 *)inBuffer;
9707 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9708 for (j=0; j<info.channels; j++) {
9709 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9710 }
9711 in += info.inJump;
9712 out += info.outJump;
9713 }
9714 }
9715 else if (info.inFormat == RTAUDIO_FLOAT64) {
9716 // Channel compensation and/or (de)interleaving only.
9717 Float64 *in = (Float64 *)inBuffer;
9718 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9719 for (j=0; j<info.channels; j++) {
9720 out[info.outOffset[j]] = in[info.inOffset[j]];
9721 }
9722 in += info.inJump;
9723 out += info.outJump;
9724 }
9725 }
9726 }
9727 else if (info.outFormat == RTAUDIO_FLOAT32) {
9728 Float32 scale;
9729 Float32 *out = (Float32 *)outBuffer;
9730
9731 if (info.inFormat == RTAUDIO_SINT8) {
9732 signed char *in = (signed char *)inBuffer;
9733 scale = (Float32) ( 1.0 / 127.5 );
9734 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9735 for (j=0; j<info.channels; j++) {
9736 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9737 out[info.outOffset[j]] += 0.5;
9738 out[info.outOffset[j]] *= scale;
9739 }
9740 in += info.inJump;
9741 out += info.outJump;
9742 }
9743 }
9744 else if (info.inFormat == RTAUDIO_SINT16) {
9745 Int16 *in = (Int16 *)inBuffer;
9746 scale = (Float32) ( 1.0 / 32767.5 );
9747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9748 for (j=0; j<info.channels; j++) {
9749 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9750 out[info.outOffset[j]] += 0.5;
9751 out[info.outOffset[j]] *= scale;
9752 }
9753 in += info.inJump;
9754 out += info.outJump;
9755 }
9756 }
9757 else if (info.inFormat == RTAUDIO_SINT24) {
9758 Int24 *in = (Int24 *)inBuffer;
9759 scale = (Float32) ( 1.0 / 8388607.5 );
9760 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9761 for (j=0; j<info.channels; j++) {
9762 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9763 out[info.outOffset[j]] += 0.5;
9764 out[info.outOffset[j]] *= scale;
9765 }
9766 in += info.inJump;
9767 out += info.outJump;
9768 }
9769 }
9770 else if (info.inFormat == RTAUDIO_SINT32) {
9771 Int32 *in = (Int32 *)inBuffer;
9772 scale = (Float32) ( 1.0 / 2147483647.5 );
9773 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9774 for (j=0; j<info.channels; j++) {
9775 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9776 out[info.outOffset[j]] += 0.5;
9777 out[info.outOffset[j]] *= scale;
9778 }
9779 in += info.inJump;
9780 out += info.outJump;
9781 }
9782 }
9783 else if (info.inFormat == RTAUDIO_FLOAT32) {
9784 // Channel compensation and/or (de)interleaving only.
9785 Float32 *in = (Float32 *)inBuffer;
9786 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9787 for (j=0; j<info.channels; j++) {
9788 out[info.outOffset[j]] = in[info.inOffset[j]];
9789 }
9790 in += info.inJump;
9791 out += info.outJump;
9792 }
9793 }
9794 else if (info.inFormat == RTAUDIO_FLOAT64) {
9795 Float64 *in = (Float64 *)inBuffer;
9796 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9797 for (j=0; j<info.channels; j++) {
9798 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9799 }
9800 in += info.inJump;
9801 out += info.outJump;
9802 }
9803 }
9804 }
9805 else if (info.outFormat == RTAUDIO_SINT32) {
9806 Int32 *out = (Int32 *)outBuffer;
9807 if (info.inFormat == RTAUDIO_SINT8) {
9808 signed char *in = (signed char *)inBuffer;
9809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9810 for (j=0; j<info.channels; j++) {
9811 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9812 out[info.outOffset[j]] <<= 24;
9813 }
9814 in += info.inJump;
9815 out += info.outJump;
9816 }
9817 }
9818 else if (info.inFormat == RTAUDIO_SINT16) {
9819 Int16 *in = (Int16 *)inBuffer;
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9821 for (j=0; j<info.channels; j++) {
9822 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9823 out[info.outOffset[j]] <<= 16;
9824 }
9825 in += info.inJump;
9826 out += info.outJump;
9827 }
9828 }
9829 else if (info.inFormat == RTAUDIO_SINT24) {
9830 Int24 *in = (Int24 *)inBuffer;
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9832 for (j=0; j<info.channels; j++) {
9833 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9834 out[info.outOffset[j]] <<= 8;
9835 }
9836 in += info.inJump;
9837 out += info.outJump;
9838 }
9839 }
9840 else if (info.inFormat == RTAUDIO_SINT32) {
9841 // Channel compensation and/or (de)interleaving only.
9842 Int32 *in = (Int32 *)inBuffer;
9843 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9844 for (j=0; j<info.channels; j++) {
9845 out[info.outOffset[j]] = in[info.inOffset[j]];
9846 }
9847 in += info.inJump;
9848 out += info.outJump;
9849 }
9850 }
9851 else if (info.inFormat == RTAUDIO_FLOAT32) {
9852 Float32 *in = (Float32 *)inBuffer;
9853 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9854 for (j=0; j<info.channels; j++) {
9855 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9856 }
9857 in += info.inJump;
9858 out += info.outJump;
9859 }
9860 }
9861 else if (info.inFormat == RTAUDIO_FLOAT64) {
9862 Float64 *in = (Float64 *)inBuffer;
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9864 for (j=0; j<info.channels; j++) {
9865 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9866 }
9867 in += info.inJump;
9868 out += info.outJump;
9869 }
9870 }
9871 }
9872 else if (info.outFormat == RTAUDIO_SINT24) {
9873 Int24 *out = (Int24 *)outBuffer;
9874 if (info.inFormat == RTAUDIO_SINT8) {
9875 signed char *in = (signed char *)inBuffer;
9876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9877 for (j=0; j<info.channels; j++) {
9878 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9879 //out[info.outOffset[j]] <<= 16;
9880 }
9881 in += info.inJump;
9882 out += info.outJump;
9883 }
9884 }
9885 else if (info.inFormat == RTAUDIO_SINT16) {
9886 Int16 *in = (Int16 *)inBuffer;
9887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9888 for (j=0; j<info.channels; j++) {
9889 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9890 //out[info.outOffset[j]] <<= 8;
9891 }
9892 in += info.inJump;
9893 out += info.outJump;
9894 }
9895 }
9896 else if (info.inFormat == RTAUDIO_SINT24) {
9897 // Channel compensation and/or (de)interleaving only.
9898 Int24 *in = (Int24 *)inBuffer;
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9900 for (j=0; j<info.channels; j++) {
9901 out[info.outOffset[j]] = in[info.inOffset[j]];
9902 }
9903 in += info.inJump;
9904 out += info.outJump;
9905 }
9906 }
9907 else if (info.inFormat == RTAUDIO_SINT32) {
9908 Int32 *in = (Int32 *)inBuffer;
9909 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9910 for (j=0; j<info.channels; j++) {
9911 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9912 //out[info.outOffset[j]] >>= 8;
9913 }
9914 in += info.inJump;
9915 out += info.outJump;
9916 }
9917 }
9918 else if (info.inFormat == RTAUDIO_FLOAT32) {
9919 Float32 *in = (Float32 *)inBuffer;
9920 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9921 for (j=0; j<info.channels; j++) {
9922 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9923 }
9924 in += info.inJump;
9925 out += info.outJump;
9926 }
9927 }
9928 else if (info.inFormat == RTAUDIO_FLOAT64) {
9929 Float64 *in = (Float64 *)inBuffer;
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9931 for (j=0; j<info.channels; j++) {
9932 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9933 }
9934 in += info.inJump;
9935 out += info.outJump;
9936 }
9937 }
9938 }
9939 else if (info.outFormat == RTAUDIO_SINT16) {
9940 Int16 *out = (Int16 *)outBuffer;
9941 if (info.inFormat == RTAUDIO_SINT8) {
9942 signed char *in = (signed char *)inBuffer;
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9944 for (j=0; j<info.channels; j++) {
9945 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
9946 out[info.outOffset[j]] <<= 8;
9947 }
9948 in += info.inJump;
9949 out += info.outJump;
9950 }
9951 }
9952 else if (info.inFormat == RTAUDIO_SINT16) {
9953 // Channel compensation and/or (de)interleaving only.
9954 Int16 *in = (Int16 *)inBuffer;
9955 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9956 for (j=0; j<info.channels; j++) {
9957 out[info.outOffset[j]] = in[info.inOffset[j]];
9958 }
9959 in += info.inJump;
9960 out += info.outJump;
9961 }
9962 }
9963 else if (info.inFormat == RTAUDIO_SINT24) {
9964 Int24 *in = (Int24 *)inBuffer;
9965 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9966 for (j=0; j<info.channels; j++) {
9967 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
9968 }
9969 in += info.inJump;
9970 out += info.outJump;
9971 }
9972 }
9973 else if (info.inFormat == RTAUDIO_SINT32) {
9974 Int32 *in = (Int32 *)inBuffer;
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9976 for (j=0; j<info.channels; j++) {
9977 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
9978 }
9979 in += info.inJump;
9980 out += info.outJump;
9981 }
9982 }
9983 else if (info.inFormat == RTAUDIO_FLOAT32) {
9984 Float32 *in = (Float32 *)inBuffer;
9985 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9986 for (j=0; j<info.channels; j++) {
9987 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9988 }
9989 in += info.inJump;
9990 out += info.outJump;
9991 }
9992 }
9993 else if (info.inFormat == RTAUDIO_FLOAT64) {
9994 Float64 *in = (Float64 *)inBuffer;
9995 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9996 for (j=0; j<info.channels; j++) {
9997 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9998 }
9999 in += info.inJump;
10000 out += info.outJump;
10001 }
10002 }
10003 }
10004 else if (info.outFormat == RTAUDIO_SINT8) {
10005 signed char *out = (signed char *)outBuffer;
10006 if (info.inFormat == RTAUDIO_SINT8) {
10007 // Channel compensation and/or (de)interleaving only.
10008 signed char *in = (signed char *)inBuffer;
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10010 for (j=0; j<info.channels; j++) {
10011 out[info.outOffset[j]] = in[info.inOffset[j]];
10012 }
10013 in += info.inJump;
10014 out += info.outJump;
10015 }
10016 }
10017 if (info.inFormat == RTAUDIO_SINT16) {
10018 Int16 *in = (Int16 *)inBuffer;
10019 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10020 for (j=0; j<info.channels; j++) {
10021 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10022 }
10023 in += info.inJump;
10024 out += info.outJump;
10025 }
10026 }
10027 else if (info.inFormat == RTAUDIO_SINT24) {
10028 Int24 *in = (Int24 *)inBuffer;
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10030 for (j=0; j<info.channels; j++) {
10031 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10032 }
10033 in += info.inJump;
10034 out += info.outJump;
10035 }
10036 }
10037 else if (info.inFormat == RTAUDIO_SINT32) {
10038 Int32 *in = (Int32 *)inBuffer;
10039 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10040 for (j=0; j<info.channels; j++) {
10041 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10042 }
10043 in += info.inJump;
10044 out += info.outJump;
10045 }
10046 }
10047 else if (info.inFormat == RTAUDIO_FLOAT32) {
10048 Float32 *in = (Float32 *)inBuffer;
10049 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10050 for (j=0; j<info.channels; j++) {
10051 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10052 }
10053 in += info.inJump;
10054 out += info.outJump;
10055 }
10056 }
10057 else if (info.inFormat == RTAUDIO_FLOAT64) {
10058 Float64 *in = (Float64 *)inBuffer;
10059 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10060 for (j=0; j<info.channels; j++) {
10061 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10062 }
10063 in += info.inJump;
10064 out += info.outJump;
10065 }
10066 }
10067 }
10068 }
10069
10070 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10071 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10072 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10073
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10074 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10075 {
10076 char val;
10077 char *ptr;
10078
10079 ptr = buffer;
10080 if ( format == RTAUDIO_SINT16 ) {
10081 for ( unsigned int i=0; i<samples; i++ ) {
10082 // Swap 1st and 2nd bytes.
10083 val = *(ptr);
10084 *(ptr) = *(ptr+1);
10085 *(ptr+1) = val;
10086
10087 // Increment 2 bytes.
10088 ptr += 2;
10089 }
10090 }
10091 else if ( format == RTAUDIO_SINT32 ||
10092 format == RTAUDIO_FLOAT32 ) {
10093 for ( unsigned int i=0; i<samples; i++ ) {
10094 // Swap 1st and 4th bytes.
10095 val = *(ptr);
10096 *(ptr) = *(ptr+3);
10097 *(ptr+3) = val;
10098
10099 // Swap 2nd and 3rd bytes.
10100 ptr += 1;
10101 val = *(ptr);
10102 *(ptr) = *(ptr+1);
10103 *(ptr+1) = val;
10104
10105 // Increment 3 more bytes.
10106 ptr += 3;
10107 }
10108 }
10109 else if ( format == RTAUDIO_SINT24 ) {
10110 for ( unsigned int i=0; i<samples; i++ ) {
10111 // Swap 1st and 3rd bytes.
10112 val = *(ptr);
10113 *(ptr) = *(ptr+2);
10114 *(ptr+2) = val;
10115
10116 // Increment 2 more bytes.
10117 ptr += 2;
10118 }
10119 }
10120 else if ( format == RTAUDIO_FLOAT64 ) {
10121 for ( unsigned int i=0; i<samples; i++ ) {
10122 // Swap 1st and 8th bytes
10123 val = *(ptr);
10124 *(ptr) = *(ptr+7);
10125 *(ptr+7) = val;
10126
10127 // Swap 2nd and 7th bytes
10128 ptr += 1;
10129 val = *(ptr);
10130 *(ptr) = *(ptr+5);
10131 *(ptr+5) = val;
10132
10133 // Swap 3rd and 6th bytes
10134 ptr += 1;
10135 val = *(ptr);
10136 *(ptr) = *(ptr+3);
10137 *(ptr+3) = val;
10138
10139 // Swap 4th and 5th bytes
10140 ptr += 1;
10141 val = *(ptr);
10142 *(ptr) = *(ptr+1);
10143 *(ptr+1) = val;
10144
10145 // Increment 5 more bytes.
10146 ptr += 5;
10147 }
10148 }
10149 }
10150
10151 // Indentation settings for Vim and Emacs
10152 //
10153 // Local Variables:
10154 // c-basic-offset: 2
10155 // indent-tabs-mode: nil
10156 // End:
10157 //
10158 // vim: et sts=2 sw=2
10159
10160