1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
14
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
22
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
25
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
30
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 /************************************************************************/
40
41 // RtAudio: Version 5.0.0
42
43 #include "RtAudio.h"
44 #include <iostream>
45 #include <cstdlib>
46 #include <cstring>
47 #include <climits>
48 #include <cmath>
49 #include <algorithm>
50
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
56 };
57
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
63
64 #include "tchar.h"
65
convertCharPointerToStdString(const char * text)66 static std::string convertCharPointerToStdString(const char *text)
67 {
68 return std::string(text);
69 }
70
convertCharPointerToStdString(const wchar_t * text)71 static std::string convertCharPointerToStdString(const wchar_t *text)
72 {
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
76 return s;
77 }
78
79 #elif defined(__LINUX_ALSA__) || defined(__UNIX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__) || defined(__HAIKU__)
80 // pthread API
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
85 #else
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
88 #endif
89
90 // *************************************************** //
91 //
92 // RtAudio definitions.
93 //
94 // *************************************************** //
95
getVersion(void)96 std::string RtAudio :: getVersion( void )
97 {
98 return RTAUDIO_VERSION;
99 }
100
getCompiledApi(std::vector<RtAudio::Api> & apis)101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
102 {
103 apis.clear();
104
105 // The order here will control the order of RtAudio's API search in
106 // the constructor.
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
109 #endif
110 #if defined(__LINUX_ALSA__)
111 apis.push_back( LINUX_ALSA );
112 #endif
113 #if defined(__UNIX_PULSE__)
114 apis.push_back( UNIX_PULSE );
115 #endif
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
118 #endif
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
121 #endif
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
124 #endif
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
127 #endif
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
130 #endif
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
133 #endif
134 }
135
openRtApi(RtAudio::Api api)136 void RtAudio :: openRtApi( RtAudio::Api api )
137 {
138 if ( rtapi_ )
139 delete rtapi_;
140 rtapi_ = 0;
141
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
145 #endif
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
149 #endif
150 #if defined(__UNIX_PULSE__)
151 if ( api == UNIX_PULSE )
152 rtapi_ = new RtApiPulse();
153 #endif
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
157 #endif
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
161 #endif
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
165 #endif
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
169 #endif
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
173 #endif
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
177 #endif
178 }
179
RtAudio(RtAudio::Api api)180 RtAudio :: RtAudio( RtAudio::Api api )
181 {
182 rtapi_ = 0;
183
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
186 openRtApi( api );
187 if ( rtapi_ ) return;
188
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
192 }
193
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
201 }
202
203 if ( rtapi_ ) return;
204
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
211 }
212
~RtAudio()213 RtAudio :: ~RtAudio()
214 {
215 if ( rtapi_ )
216 delete rtapi_;
217 }
218
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioBufferSizeCallback bufSizeCallback,RtAudioErrorCallback errorCallback)219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioBufferSizeCallback bufSizeCallback,
226 RtAudioErrorCallback errorCallback )
227 {
228 return rtapi_->openStream( outputParameters, inputParameters, format,
229 sampleRate, bufferFrames, callback,
230 userData, options, bufSizeCallback, errorCallback );
231 }
232
233 // *************************************************** //
234 //
235 // Public RtApi definitions (see end of file for
236 // private or protected utility functions).
237 //
238 // *************************************************** //
239
RtApi()240 RtApi :: RtApi()
241 {
242 stream_.state = STREAM_CLOSED;
243 stream_.mode = UNINITIALIZED;
244 stream_.apiHandle = 0;
245 stream_.userBuffer[0] = 0;
246 stream_.userBuffer[1] = 0;
247 MUTEX_INITIALIZE( &stream_.mutex );
248 showWarnings_ = true;
249 firstErrorOccurred_ = false;
250 }
251
~RtApi()252 RtApi :: ~RtApi()
253 {
254 MUTEX_DESTROY( &stream_.mutex );
255 }
256
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioBufferSizeCallback bufSizeCallback,RtAudioErrorCallback errorCallback)257 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
258 RtAudio::StreamParameters *iParams,
259 RtAudioFormat format, unsigned int sampleRate,
260 unsigned int *bufferFrames,
261 RtAudioCallback callback, void *userData,
262 RtAudio::StreamOptions *options,
263 RtAudioBufferSizeCallback bufSizeCallback,
264 RtAudioErrorCallback errorCallback )
265 {
266 if ( stream_.state != STREAM_CLOSED ) {
267 errorText_ = "RtApi::openStream: a stream is already open!";
268 error( RtAudioError::INVALID_USE );
269 return;
270 }
271
272 // Clear stream information potentially left from a previously open stream.
273 clearStreamInfo();
274
275 if ( oParams && oParams->nChannels < 1 ) {
276 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
277 error( RtAudioError::INVALID_USE );
278 return;
279 }
280
281 if ( iParams && iParams->nChannels < 1 ) {
282 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
283 error( RtAudioError::INVALID_USE );
284 return;
285 }
286
287 if ( oParams == NULL && iParams == NULL && getCurrentApi() != RtAudio::RTAUDIO_DUMMY ) {
288 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
289 error( RtAudioError::INVALID_USE );
290 return;
291 }
292
293 if ( formatBytes(format) == 0 ) {
294 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
295 error( RtAudioError::INVALID_USE );
296 return;
297 }
298
299 unsigned int nDevices = getDeviceCount();
300 unsigned int oChannels = 0;
301 if ( oParams ) {
302 oChannels = oParams->nChannels;
303 if ( oParams->deviceId >= nDevices ) {
304 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
305 error( RtAudioError::INVALID_USE );
306 return;
307 }
308 }
309
310 unsigned int iChannels = 0;
311 if ( iParams ) {
312 iChannels = iParams->nChannels;
313 if ( iParams->deviceId >= nDevices ) {
314 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
315 error( RtAudioError::INVALID_USE );
316 return;
317 }
318 }
319
320 bool result;
321
322 if ( oChannels > 0 ) {
323
324 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
325 sampleRate, format, bufferFrames, options );
326 if ( result == false ) {
327 error( RtAudioError::SYSTEM_ERROR );
328 return;
329 }
330 }
331
332 if ( iChannels > 0 ) {
333
334 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
335 sampleRate, format, bufferFrames, options );
336 if ( result == false ) {
337 if ( oChannels > 0 ) closeStream();
338 error( RtAudioError::SYSTEM_ERROR );
339 return;
340 }
341 }
342
343 stream_.callbackInfo.callback = (void *) callback;
344 stream_.callbackInfo.userData = userData;
345 stream_.callbackInfo.bufSizeCallback = (void *) bufSizeCallback;
346 stream_.callbackInfo.errorCallback = (void *) errorCallback;
347
348 if ( options ) options->numberOfBuffers = stream_.nBuffers;
349 stream_.state = STREAM_STOPPED;
350 }
351
getDefaultInputDevice(void)352 unsigned int RtApi :: getDefaultInputDevice( void )
353 {
354 // Should be implemented in subclasses if possible.
355 return 0;
356 }
357
getDefaultOutputDevice(void)358 unsigned int RtApi :: getDefaultOutputDevice( void )
359 {
360 // Should be implemented in subclasses if possible.
361 return 0;
362 }
363
closeStream(void)364 void RtApi :: closeStream( void )
365 {
366 // MUST be implemented in subclasses!
367 return;
368 }
369
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)370 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
371 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
372 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
373 RtAudio::StreamOptions * /*options*/ )
374 {
375 // MUST be implemented in subclasses!
376 return FAILURE;
377 }
378
tickStreamTime(void)379 void RtApi :: tickStreamTime( void )
380 {
381 // Subclasses that do not provide their own implementation of
382 // getStreamTime should call this function once per buffer I/O to
383 // provide basic stream time support.
384
385 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
386
387 #if defined( HAVE_GETTIMEOFDAY )
388 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 #endif
390 }
391
getStreamLatency(void)392 long RtApi :: getStreamLatency( void )
393 {
394 verifyStream();
395
396 long totalLatency = 0;
397 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
398 totalLatency = stream_.latency[0];
399 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
400 totalLatency += stream_.latency[1];
401
402 return totalLatency;
403 }
404
getStreamTime(void)405 double RtApi :: getStreamTime( void )
406 {
407 verifyStream();
408
409 #if defined( HAVE_GETTIMEOFDAY )
410 // Return a very accurate estimate of the stream time by
411 // adding in the elapsed time since the last tick.
412 struct timeval then;
413 struct timeval now;
414
415 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
416 return stream_.streamTime;
417
418 gettimeofday( &now, NULL );
419 then = stream_.lastTickTimestamp;
420 return stream_.streamTime +
421 ((now.tv_sec + 0.000001 * now.tv_usec) -
422 (then.tv_sec + 0.000001 * then.tv_usec));
423 #else
424 return stream_.streamTime;
425 #endif
426 }
427
setStreamTime(double time)428 void RtApi :: setStreamTime( double time )
429 {
430 verifyStream();
431
432 if ( time >= 0.0 )
433 stream_.streamTime = time;
434 #if defined( HAVE_GETTIMEOFDAY )
435 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 #endif
437 }
438
getStreamSampleRate(void)439 unsigned int RtApi :: getStreamSampleRate( void )
440 {
441 verifyStream();
442
443 return stream_.sampleRate;
444 }
445
446
447 // *************************************************** //
448 //
449 // OS/API-specific methods.
450 //
451 // *************************************************** //
452
453 #if defined(__MACOSX_CORE__)
454
455 // The OS X CoreAudio API is designed to use a separate callback
456 // procedure for each of its audio devices. A single RtAudio duplex
457 // stream using two different devices is supported here, though it
458 // cannot be guaranteed to always behave correctly because we cannot
459 // synchronize these two callbacks.
460 //
461 // A property listener is installed for over/underrun information.
462 // However, no functionality is currently provided to allow property
463 // listeners to trigger user handlers because it is unclear what could
464 // be done if a critical stream parameter (buffer size, sample rate,
465 // device disconnect) notification arrived. The listeners entail
466 // quite a bit of extra code and most likely, a user program wouldn't
467 // be prepared for the result anyway. However, we do provide a flag
468 // to the client callback function to inform of an over/underrun.
469
470 // A structure to hold various information related to the CoreAudio API
471 // implementation.
472 struct CoreHandle {
473 AudioDeviceID id[2]; // device ids
474 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
475 AudioDeviceIOProcID procId[2];
476 #endif
477 UInt32 iStream[2]; // device stream index (or first if using multiple)
478 UInt32 nStreams[2]; // number of streams to use
479 bool xrun[2];
480 char *deviceBuffer;
481 pthread_cond_t condition;
482 int drainCounter; // Tracks callback counts when draining
483 bool internalDrain; // Indicates if stop is initiated from callback or not.
484
CoreHandleCoreHandle485 CoreHandle()
486 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
487 };
488
RtApiCore()489 RtApiCore:: RtApiCore()
490 {
491 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
492 // This is a largely undocumented but absolutely necessary
493 // requirement starting with OS-X 10.6. If not called, queries and
494 // updates to various audio device properties are not handled
495 // correctly.
496 CFRunLoopRef theRunLoop = NULL;
497 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
498 kAudioObjectPropertyScopeGlobal,
499 kAudioObjectPropertyElementMaster };
500 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
501 if ( result != noErr ) {
502 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
503 error( RtAudioError::WARNING );
504 }
505 #endif
506 }
507
~RtApiCore()508 RtApiCore :: ~RtApiCore()
509 {
510 // The subclass destructor gets called before the base class
511 // destructor, so close an existing stream before deallocating
512 // apiDeviceId memory.
513 if ( stream_.state != STREAM_CLOSED ) closeStream();
514 }
515
getDeviceCount(void)516 unsigned int RtApiCore :: getDeviceCount( void )
517 {
518 // Find out how many audio devices there are, if any.
519 UInt32 dataSize;
520 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
521 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
522 if ( result != noErr ) {
523 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
524 error( RtAudioError::WARNING );
525 return 0;
526 }
527
528 return dataSize / sizeof( AudioDeviceID );
529 }
530
getDefaultInputDevice(void)531 unsigned int RtApiCore :: getDefaultInputDevice( void )
532 {
533 unsigned int nDevices = getDeviceCount();
534 if ( nDevices <= 1 ) return 0;
535
536 AudioDeviceID id;
537 UInt32 dataSize = sizeof( AudioDeviceID );
538 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
539 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
540 if ( result != noErr ) {
541 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
542 error( RtAudioError::WARNING );
543 return 0;
544 }
545
546 dataSize *= nDevices;
547 AudioDeviceID deviceList[ nDevices ];
548 property.mSelector = kAudioHardwarePropertyDevices;
549 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
550 if ( result != noErr ) {
551 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
552 error( RtAudioError::WARNING );
553 return 0;
554 }
555
556 for ( unsigned int i=0; i<nDevices; i++ )
557 if ( id == deviceList[i] ) return i;
558
559 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
560 error( RtAudioError::WARNING );
561 return 0;
562 }
563
getDefaultOutputDevice(void)564 unsigned int RtApiCore :: getDefaultOutputDevice( void )
565 {
566 unsigned int nDevices = getDeviceCount();
567 if ( nDevices <= 1 ) return 0;
568
569 AudioDeviceID id;
570 UInt32 dataSize = sizeof( AudioDeviceID );
571 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
572 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
573 if ( result != noErr ) {
574 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
575 error( RtAudioError::WARNING );
576 return 0;
577 }
578
579 dataSize = sizeof( AudioDeviceID ) * nDevices;
580 AudioDeviceID deviceList[ nDevices ];
581 property.mSelector = kAudioHardwarePropertyDevices;
582 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
583 if ( result != noErr ) {
584 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
585 error( RtAudioError::WARNING );
586 return 0;
587 }
588
589 for ( unsigned int i=0; i<nDevices; i++ )
590 if ( id == deviceList[i] ) return i;
591
592 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
593 error( RtAudioError::WARNING );
594 return 0;
595 }
596
getDeviceInfo(unsigned int device)597 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
598 {
599 RtAudio::DeviceInfo info;
600 info.probed = false;
601
602 // Get device ID
603 unsigned int nDevices = getDeviceCount();
604 if ( nDevices == 0 ) {
605 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
606 error( RtAudioError::INVALID_USE );
607 return info;
608 }
609
610 if ( device >= nDevices ) {
611 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
612 error( RtAudioError::INVALID_USE );
613 return info;
614 }
615
616 AudioDeviceID deviceList[ nDevices ];
617 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
618 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
619 kAudioObjectPropertyScopeGlobal,
620 kAudioObjectPropertyElementMaster };
621 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
622 0, NULL, &dataSize, (void *) &deviceList );
623 if ( result != noErr ) {
624 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
625 error( RtAudioError::WARNING );
626 return info;
627 }
628
629 AudioDeviceID id = deviceList[ device ];
630
631 // Get the device name.
632 info.name.erase();
633 CFStringRef cfname;
634 dataSize = sizeof( CFStringRef );
635 property.mSelector = kAudioObjectPropertyManufacturer;
636 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
637 if ( result != noErr ) {
638 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
639 errorText_ = errorStream_.str();
640 error( RtAudioError::WARNING );
641 return info;
642 }
643
644 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
645 int length = CFStringGetLength(cfname);
646 char *mname = (char *)malloc(length * 3 + 1);
647 #if defined( UNICODE ) || defined( _UNICODE )
648 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
649 #else
650 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
651 #endif
652 info.name.append( (const char *)mname, strlen(mname) );
653 info.name.append( ": " );
654 CFRelease( cfname );
655 free(mname);
656
657 property.mSelector = kAudioObjectPropertyName;
658 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
659 if ( result != noErr ) {
660 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
661 errorText_ = errorStream_.str();
662 error( RtAudioError::WARNING );
663 return info;
664 }
665
666 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
667 length = CFStringGetLength(cfname);
668 char *name = (char *)malloc(length * 3 + 1);
669 #if defined( UNICODE ) || defined( _UNICODE )
670 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
671 #else
672 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
673 #endif
674 info.name.append( (const char *)name, strlen(name) );
675 CFRelease( cfname );
676 free(name);
677
678 // Get the output stream "configuration".
679 AudioBufferList *bufferList = nil;
680 property.mSelector = kAudioDevicePropertyStreamConfiguration;
681 property.mScope = kAudioDevicePropertyScopeOutput;
682 // property.mElement = kAudioObjectPropertyElementWildcard;
683 dataSize = 0;
684 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
685 if ( result != noErr || dataSize == 0 ) {
686 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
687 errorText_ = errorStream_.str();
688 error( RtAudioError::WARNING );
689 return info;
690 }
691
692 // Allocate the AudioBufferList.
693 bufferList = (AudioBufferList *) malloc( dataSize );
694 if ( bufferList == NULL ) {
695 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
696 error( RtAudioError::WARNING );
697 return info;
698 }
699
700 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
701 if ( result != noErr || dataSize == 0 ) {
702 free( bufferList );
703 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
704 errorText_ = errorStream_.str();
705 error( RtAudioError::WARNING );
706 return info;
707 }
708
709 // Get output channel information.
710 unsigned int i, nStreams = bufferList->mNumberBuffers;
711 for ( i=0; i<nStreams; i++ )
712 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
713 free( bufferList );
714
715 // Get the input stream "configuration".
716 property.mScope = kAudioDevicePropertyScopeInput;
717 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
718 if ( result != noErr || dataSize == 0 ) {
719 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
720 errorText_ = errorStream_.str();
721 error( RtAudioError::WARNING );
722 return info;
723 }
724
725 // Allocate the AudioBufferList.
726 bufferList = (AudioBufferList *) malloc( dataSize );
727 if ( bufferList == NULL ) {
728 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
729 error( RtAudioError::WARNING );
730 return info;
731 }
732
733 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
734 if (result != noErr || dataSize == 0) {
735 free( bufferList );
736 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
737 errorText_ = errorStream_.str();
738 error( RtAudioError::WARNING );
739 return info;
740 }
741
742 // Get input channel information.
743 nStreams = bufferList->mNumberBuffers;
744 for ( i=0; i<nStreams; i++ )
745 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
746 free( bufferList );
747
748 // If device opens for both playback and capture, we determine the channels.
749 if ( info.outputChannels > 0 && info.inputChannels > 0 )
750 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
751
752 // Probe the device sample rates.
753 bool isInput = false;
754 if ( info.outputChannels == 0 ) isInput = true;
755
756 // Determine the supported sample rates.
757 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
758 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
759 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
760 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
761 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
762 errorText_ = errorStream_.str();
763 error( RtAudioError::WARNING );
764 return info;
765 }
766
767 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
768 AudioValueRange rangeList[ nRanges ];
769 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
770 if ( result != kAudioHardwareNoError ) {
771 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
772 errorText_ = errorStream_.str();
773 error( RtAudioError::WARNING );
774 return info;
775 }
776
777 // The sample rate reporting mechanism is a bit of a mystery. It
778 // seems that it can either return individual rates or a range of
779 // rates. I assume that if the min / max range values are the same,
780 // then that represents a single supported rate and if the min / max
781 // range values are different, the device supports an arbitrary
782 // range of values (though there might be multiple ranges, so we'll
783 // use the most conservative range).
784 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
785 bool haveValueRange = false;
786 info.sampleRates.clear();
787 for ( UInt32 i=0; i<nRanges; i++ ) {
788 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
789 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
790 info.sampleRates.push_back( tmpSr );
791
792 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
793 info.preferredSampleRate = tmpSr;
794
795 } else {
796 haveValueRange = true;
797 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
798 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 }
800 }
801
802 if ( haveValueRange ) {
803 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
804 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
805 info.sampleRates.push_back( SAMPLE_RATES[k] );
806
807 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
808 info.preferredSampleRate = SAMPLE_RATES[k];
809 }
810 }
811 }
812
813 // Sort and remove any redundant values
814 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
815 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
816
817 if ( info.sampleRates.size() == 0 ) {
818 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
819 errorText_ = errorStream_.str();
820 error( RtAudioError::WARNING );
821 return info;
822 }
823
824 // CoreAudio always uses 32-bit floating point data for PCM streams.
825 // Thus, any other "physical" formats supported by the device are of
826 // no interest to the client.
827 info.nativeFormats = RTAUDIO_FLOAT32;
828
829 if ( info.outputChannels > 0 )
830 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
831 if ( info.inputChannels > 0 )
832 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
833
834 info.probed = true;
835 return info;
836 }
837
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)838 static OSStatus callbackHandler( AudioDeviceID inDevice,
839 const AudioTimeStamp* /*inNow*/,
840 const AudioBufferList* inInputData,
841 const AudioTimeStamp* /*inInputTime*/,
842 AudioBufferList* outOutputData,
843 const AudioTimeStamp* /*inOutputTime*/,
844 void* infoPointer )
845 {
846 CallbackInfo *info = (CallbackInfo *) infoPointer;
847
848 RtApiCore *object = (RtApiCore *) info->object;
849 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
850 return kAudioHardwareUnspecifiedError;
851 else
852 return kAudioHardwareNoError;
853 }
854
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)855 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
856 UInt32 nAddresses,
857 const AudioObjectPropertyAddress properties[],
858 void* handlePointer )
859 {
860 CoreHandle *handle = (CoreHandle *) handlePointer;
861 for ( UInt32 i=0; i<nAddresses; i++ ) {
862 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
863 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
864 handle->xrun[1] = true;
865 else
866 handle->xrun[0] = true;
867 }
868 }
869
870 return kAudioHardwareNoError;
871 }
872
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)873 static OSStatus rateListener( AudioObjectID inDevice,
874 UInt32 /*nAddresses*/,
875 const AudioObjectPropertyAddress /*properties*/[],
876 void* ratePointer )
877 {
878 Float64 *rate = (Float64 *) ratePointer;
879 UInt32 dataSize = sizeof( Float64 );
880 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
881 kAudioObjectPropertyScopeGlobal,
882 kAudioObjectPropertyElementMaster };
883 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
884 return kAudioHardwareNoError;
885 }
886
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)887 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
888 unsigned int firstChannel, unsigned int sampleRate,
889 RtAudioFormat format, unsigned int *bufferSize,
890 RtAudio::StreamOptions *options )
891 {
892 // Get device ID
893 unsigned int nDevices = getDeviceCount();
894 if ( nDevices == 0 ) {
895 // This should not happen because a check is made before this function is called.
896 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 return FAILURE;
898 }
899
900 if ( device >= nDevices ) {
901 // This should not happen because a check is made before this function is called.
902 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 return FAILURE;
904 }
905
906 AudioDeviceID deviceList[ nDevices ];
907 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
908 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
909 kAudioObjectPropertyScopeGlobal,
910 kAudioObjectPropertyElementMaster };
911 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
912 0, NULL, &dataSize, (void *) &deviceList );
913 if ( result != noErr ) {
914 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 return FAILURE;
916 }
917
918 AudioDeviceID id = deviceList[ device ];
919
920 // Setup for stream mode.
921 bool isInput = false;
922 if ( mode == INPUT ) {
923 isInput = true;
924 property.mScope = kAudioDevicePropertyScopeInput;
925 }
926 else
927 property.mScope = kAudioDevicePropertyScopeOutput;
928
929 // Get the stream "configuration".
930 AudioBufferList *bufferList = nil;
931 dataSize = 0;
932 property.mSelector = kAudioDevicePropertyStreamConfiguration;
933 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
934 if ( result != noErr || dataSize == 0 ) {
935 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
936 errorText_ = errorStream_.str();
937 return FAILURE;
938 }
939
940 // Allocate the AudioBufferList.
941 bufferList = (AudioBufferList *) malloc( dataSize );
942 if ( bufferList == NULL ) {
943 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 return FAILURE;
945 }
946
947 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
948 if (result != noErr || dataSize == 0) {
949 free( bufferList );
950 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
951 errorText_ = errorStream_.str();
952 return FAILURE;
953 }
954
955 // Search for one or more streams that contain the desired number of
956 // channels. CoreAudio devices can have an arbitrary number of
957 // streams and each stream can have an arbitrary number of channels.
958 // For each stream, a single buffer of interleaved samples is
959 // provided. RtAudio prefers the use of one stream of interleaved
960 // data or multiple consecutive single-channel streams. However, we
961 // now support multiple consecutive multi-channel streams of
962 // interleaved data as well.
963 UInt32 iStream, offsetCounter = firstChannel;
964 UInt32 nStreams = bufferList->mNumberBuffers;
965 bool monoMode = false;
966 bool foundStream = false;
967
968 // First check that the device supports the requested number of
969 // channels.
970 UInt32 deviceChannels = 0;
971 for ( iStream=0; iStream<nStreams; iStream++ )
972 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
973
974 if ( deviceChannels < ( channels + firstChannel ) ) {
975 free( bufferList );
976 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
977 errorText_ = errorStream_.str();
978 return FAILURE;
979 }
980
981 // Look for a single stream meeting our needs.
982 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
983 for ( iStream=0; iStream<nStreams; iStream++ ) {
984 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
985 if ( streamChannels >= channels + offsetCounter ) {
986 firstStream = iStream;
987 channelOffset = offsetCounter;
988 foundStream = true;
989 break;
990 }
991 if ( streamChannels > offsetCounter ) break;
992 offsetCounter -= streamChannels;
993 }
994
995 // If we didn't find a single stream above, then we should be able
996 // to meet the channel specification with multiple streams.
997 if ( foundStream == false ) {
998 monoMode = true;
999 offsetCounter = firstChannel;
1000 for ( iStream=0; iStream<nStreams; iStream++ ) {
1001 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1002 if ( streamChannels > offsetCounter ) break;
1003 offsetCounter -= streamChannels;
1004 }
1005
1006 firstStream = iStream;
1007 channelOffset = offsetCounter;
1008 Int32 channelCounter = channels + offsetCounter - streamChannels;
1009
1010 if ( streamChannels > 1 ) monoMode = false;
1011 while ( channelCounter > 0 ) {
1012 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1013 if ( streamChannels > 1 ) monoMode = false;
1014 channelCounter -= streamChannels;
1015 streamCount++;
1016 }
1017 }
1018
1019 free( bufferList );
1020
1021 // Determine the buffer size.
1022 AudioValueRange bufferRange;
1023 dataSize = sizeof( AudioValueRange );
1024 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1025 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1026
1027 if ( result != noErr ) {
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1029 errorText_ = errorStream_.str();
1030 return FAILURE;
1031 }
1032
1033 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1035 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1036
1037 // Set the buffer size. For multiple streams, I'm assuming we only
1038 // need to make this setting for the master channel.
1039 UInt32 theSize = (UInt32) *bufferSize;
1040 dataSize = sizeof( UInt32 );
1041 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1042 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1043
1044 if ( result != noErr ) {
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1046 errorText_ = errorStream_.str();
1047 return FAILURE;
1048 }
1049
1050 // If attempting to setup a duplex stream, the bufferSize parameter
1051 // MUST be the same in both directions!
1052 *bufferSize = theSize;
1053 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1054 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1055 errorText_ = errorStream_.str();
1056 return FAILURE;
1057 }
1058
1059 stream_.bufferSize = *bufferSize;
1060 stream_.nBuffers = 1;
1061
1062 // Try to set "hog" mode ... it's not clear to me this is working.
1063 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1064 pid_t hog_pid;
1065 dataSize = sizeof( hog_pid );
1066 property.mSelector = kAudioDevicePropertyHogMode;
1067 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1068 if ( result != noErr ) {
1069 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1070 errorText_ = errorStream_.str();
1071 return FAILURE;
1072 }
1073
1074 if ( hog_pid != getpid() ) {
1075 hog_pid = getpid();
1076 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1077 if ( result != noErr ) {
1078 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1079 errorText_ = errorStream_.str();
1080 return FAILURE;
1081 }
1082 }
1083 }
1084
1085 // Check and if necessary, change the sample rate for the device.
1086 Float64 nominalRate;
1087 dataSize = sizeof( Float64 );
1088 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1089 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1090 if ( result != noErr ) {
1091 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1092 errorText_ = errorStream_.str();
1093 return FAILURE;
1094 }
1095
1096 // Only change the sample rate if off by more than 1 Hz.
1097 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1098
1099 // Set a property listener for the sample rate change
1100 Float64 reportedRate = 0.0;
1101 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1102 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1103 if ( result != noErr ) {
1104 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1105 errorText_ = errorStream_.str();
1106 return FAILURE;
1107 }
1108
1109 nominalRate = (Float64) sampleRate;
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1111 if ( result != noErr ) {
1112 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1114 errorText_ = errorStream_.str();
1115 return FAILURE;
1116 }
1117
1118 // Now wait until the reported nominal rate is what we just set.
1119 UInt32 microCounter = 0;
1120 while ( reportedRate != nominalRate ) {
1121 microCounter += 5000;
1122 if ( microCounter > 5000000 ) break;
1123 usleep( 5000 );
1124 }
1125
1126 // Remove the property listener.
1127 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1128
1129 if ( microCounter > 5000000 ) {
1130 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1131 errorText_ = errorStream_.str();
1132 return FAILURE;
1133 }
1134 }
1135
1136 // Now set the stream format for all streams. Also, check the
1137 // physical format of the device and change that if necessary.
1138 AudioStreamBasicDescription description;
1139 dataSize = sizeof( AudioStreamBasicDescription );
1140 property.mSelector = kAudioStreamPropertyVirtualFormat;
1141 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1142 if ( result != noErr ) {
1143 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1144 errorText_ = errorStream_.str();
1145 return FAILURE;
1146 }
1147
1148 // Set the sample rate and data format id. However, only make the
1149 // change if the sample rate is not within 1.0 of the desired
1150 // rate and the format is not linear pcm.
1151 bool updateFormat = false;
1152 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1153 description.mSampleRate = (Float64) sampleRate;
1154 updateFormat = true;
1155 }
1156
1157 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1158 description.mFormatID = kAudioFormatLinearPCM;
1159 updateFormat = true;
1160 }
1161
1162 if ( updateFormat ) {
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1164 if ( result != noErr ) {
1165 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1166 errorText_ = errorStream_.str();
1167 return FAILURE;
1168 }
1169 }
1170
1171 // Now check the physical format.
1172 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1173 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1174 if ( result != noErr ) {
1175 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1176 errorText_ = errorStream_.str();
1177 return FAILURE;
1178 }
1179
1180 //std::cout << "Current physical stream format:" << std::endl;
1181 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1182 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1183 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1184 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1185
1186 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1187 description.mFormatID = kAudioFormatLinearPCM;
1188 //description.mSampleRate = (Float64) sampleRate;
1189 AudioStreamBasicDescription testDescription = description;
1190 UInt32 formatFlags;
1191
1192 // We'll try higher bit rates first and then work our way down.
1193 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1194 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1199 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1200 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1201 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1203 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1204 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1205 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1206
1207 bool setPhysicalFormat = false;
1208 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1209 testDescription = description;
1210 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1211 testDescription.mFormatFlags = physicalFormats[i].second;
1212 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1213 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1214 else
1215 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1216 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1218 if ( result == noErr ) {
1219 setPhysicalFormat = true;
1220 //std::cout << "Updated physical stream format:" << std::endl;
1221 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1222 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1223 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1224 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1225 break;
1226 }
1227 }
1228
1229 if ( !setPhysicalFormat ) {
1230 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1231 errorText_ = errorStream_.str();
1232 return FAILURE;
1233 }
1234 } // done setting virtual/physical formats.
1235
1236 // Get the stream / device latency.
1237 UInt32 latency;
1238 dataSize = sizeof( UInt32 );
1239 property.mSelector = kAudioDevicePropertyLatency;
1240 if ( AudioObjectHasProperty( id, &property ) == true ) {
1241 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1242 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1243 else {
1244 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1245 errorText_ = errorStream_.str();
1246 error( RtAudioError::WARNING );
1247 }
1248 }
1249
1250 // Byte-swapping: According to AudioHardware.h, the stream data will
1251 // always be presented in native-endian format, so we should never
1252 // need to byte swap.
1253 stream_.doByteSwap[mode] = false;
1254
1255 // From the CoreAudio documentation, PCM data must be supplied as
1256 // 32-bit floats.
1257 stream_.userFormat = format;
1258 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1259
1260 if ( streamCount == 1 )
1261 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1262 else // multiple streams
1263 stream_.nDeviceChannels[mode] = channels;
1264 stream_.nUserChannels[mode] = channels;
1265 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1266 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1267 else stream_.userInterleaved = true;
1268 stream_.deviceInterleaved[mode] = true;
1269 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1270
1271 // Set flags for buffer conversion.
1272 stream_.doConvertBuffer[mode] = false;
1273 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1274 stream_.doConvertBuffer[mode] = true;
1275 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1276 stream_.doConvertBuffer[mode] = true;
1277 if ( streamCount == 1 ) {
1278 if ( stream_.nUserChannels[mode] > 1 &&
1279 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1280 stream_.doConvertBuffer[mode] = true;
1281 }
1282 else if ( monoMode && stream_.userInterleaved )
1283 stream_.doConvertBuffer[mode] = true;
1284
1285 // Allocate our CoreHandle structure for the stream.
1286 CoreHandle *handle = 0;
1287 if ( stream_.apiHandle == 0 ) {
1288 try {
1289 handle = new CoreHandle;
1290 }
1291 catch ( std::bad_alloc& ) {
1292 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 goto error;
1294 }
1295
1296 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1297 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1298 goto error;
1299 }
1300 stream_.apiHandle = (void *) handle;
1301 }
1302 else
1303 handle = (CoreHandle *) stream_.apiHandle;
1304 handle->iStream[mode] = firstStream;
1305 handle->nStreams[mode] = streamCount;
1306 handle->id[mode] = id;
1307
1308 // Allocate necessary internal buffers.
1309 unsigned long bufferBytes;
1310 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1311 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1312 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1313 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1314 if ( stream_.userBuffer[mode] == NULL ) {
1315 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 goto error;
1317 }
1318
1319 // If possible, we will make use of the CoreAudio stream buffers as
1320 // "device buffers". However, we can't do this if using multiple
1321 // streams.
1322 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1323
1324 bool makeBuffer = true;
1325 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1326 if ( mode == INPUT ) {
1327 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1328 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1329 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1330 }
1331 }
1332
1333 if ( makeBuffer ) {
1334 bufferBytes *= *bufferSize;
1335 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1336 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1337 if ( stream_.deviceBuffer == NULL ) {
1338 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1339 goto error;
1340 }
1341 }
1342 }
1343
1344 stream_.sampleRate = sampleRate;
1345 stream_.device[mode] = device;
1346 stream_.state = STREAM_STOPPED;
1347 stream_.callbackInfo.object = (void *) this;
1348
1349 // Setup the buffer conversion information structure.
1350 if ( stream_.doConvertBuffer[mode] ) {
1351 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1352 else setConvertInfo( mode, channelOffset );
1353 }
1354
1355 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1356 // Only one callback procedure per device.
1357 stream_.mode = DUPLEX;
1358 else {
1359 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1360 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1361 #else
1362 // deprecated in favor of AudioDeviceCreateIOProcID()
1363 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1364 #endif
1365 if ( result != noErr ) {
1366 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1367 errorText_ = errorStream_.str();
1368 goto error;
1369 }
1370 if ( stream_.mode == OUTPUT && mode == INPUT )
1371 stream_.mode = DUPLEX;
1372 else
1373 stream_.mode = mode;
1374 }
1375
1376 // Setup the device property listener for over/underload.
1377 property.mSelector = kAudioDeviceProcessorOverload;
1378 property.mScope = kAudioObjectPropertyScopeGlobal;
1379 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1380
1381 return SUCCESS;
1382
1383 error:
1384 if ( handle ) {
1385 pthread_cond_destroy( &handle->condition );
1386 delete handle;
1387 stream_.apiHandle = 0;
1388 }
1389
1390 for ( int i=0; i<2; i++ ) {
1391 if ( stream_.userBuffer[i] ) {
1392 free( stream_.userBuffer[i] );
1393 stream_.userBuffer[i] = 0;
1394 }
1395 }
1396
1397 if ( stream_.deviceBuffer ) {
1398 free( stream_.deviceBuffer );
1399 stream_.deviceBuffer = 0;
1400 }
1401
1402 stream_.state = STREAM_CLOSED;
1403 return FAILURE;
1404 }
1405
closeStream(void)1406 void RtApiCore :: closeStream( void )
1407 {
1408 if ( stream_.state == STREAM_CLOSED ) {
1409 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1410 error( RtAudioError::WARNING );
1411 return;
1412 }
1413
1414 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1415 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1416 if (handle) {
1417 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1418 kAudioObjectPropertyScopeGlobal,
1419 kAudioObjectPropertyElementMaster };
1420
1421 property.mSelector = kAudioDeviceProcessorOverload;
1422 property.mScope = kAudioObjectPropertyScopeGlobal;
1423 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1424 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1425 error( RtAudioError::WARNING );
1426 }
1427 }
1428 if ( stream_.state == STREAM_RUNNING )
1429 AudioDeviceStop( handle->id[0], callbackHandler );
1430 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1431 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1432 #else
1433 // deprecated in favor of AudioDeviceDestroyIOProcID()
1434 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 #endif
1436 }
1437
1438 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1439 if (handle) {
1440 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1441 kAudioObjectPropertyScopeGlobal,
1442 kAudioObjectPropertyElementMaster };
1443
1444 property.mSelector = kAudioDeviceProcessorOverload;
1445 property.mScope = kAudioObjectPropertyScopeGlobal;
1446 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1447 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1448 error( RtAudioError::WARNING );
1449 }
1450 }
1451 if ( stream_.state == STREAM_RUNNING )
1452 AudioDeviceStop( handle->id[1], callbackHandler );
1453 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1454 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1455 #else
1456 // deprecated in favor of AudioDeviceDestroyIOProcID()
1457 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 #endif
1459 }
1460
1461 for ( int i=0; i<2; i++ ) {
1462 if ( stream_.userBuffer[i] ) {
1463 free( stream_.userBuffer[i] );
1464 stream_.userBuffer[i] = 0;
1465 }
1466 }
1467
1468 if ( stream_.deviceBuffer ) {
1469 free( stream_.deviceBuffer );
1470 stream_.deviceBuffer = 0;
1471 }
1472
1473 // Destroy pthread condition variable.
1474 pthread_cond_destroy( &handle->condition );
1475 delete handle;
1476 stream_.apiHandle = 0;
1477
1478 stream_.mode = UNINITIALIZED;
1479 stream_.state = STREAM_CLOSED;
1480 }
1481
startStream(void)1482 void RtApiCore :: startStream( void )
1483 {
1484 verifyStream();
1485 if ( stream_.state == STREAM_RUNNING ) {
1486 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1487 error( RtAudioError::WARNING );
1488 return;
1489 }
1490
1491 OSStatus result = noErr;
1492 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1493 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1494
1495 result = AudioDeviceStart( handle->id[0], callbackHandler );
1496 if ( result != noErr ) {
1497 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1498 errorText_ = errorStream_.str();
1499 goto unlock;
1500 }
1501 }
1502
1503 if ( stream_.mode == INPUT ||
1504 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1505
1506 result = AudioDeviceStart( handle->id[1], callbackHandler );
1507 if ( result != noErr ) {
1508 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1509 errorText_ = errorStream_.str();
1510 goto unlock;
1511 }
1512 }
1513
1514 handle->drainCounter = 0;
1515 handle->internalDrain = false;
1516 stream_.state = STREAM_RUNNING;
1517
1518 unlock:
1519 if ( result == noErr ) return;
1520 error( RtAudioError::SYSTEM_ERROR );
1521 }
1522
stopStream(void)1523 void RtApiCore :: stopStream( void )
1524 {
1525 verifyStream();
1526 if ( stream_.state == STREAM_STOPPED ) {
1527 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1528 error( RtAudioError::WARNING );
1529 return;
1530 }
1531
1532 OSStatus result = noErr;
1533 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1535
1536 if ( handle->drainCounter == 0 ) {
1537 handle->drainCounter = 2;
1538 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1539 }
1540
1541 result = AudioDeviceStop( handle->id[0], callbackHandler );
1542 if ( result != noErr ) {
1543 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1544 errorText_ = errorStream_.str();
1545 goto unlock;
1546 }
1547 }
1548
1549 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1550
1551 result = AudioDeviceStop( handle->id[1], callbackHandler );
1552 if ( result != noErr ) {
1553 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1554 errorText_ = errorStream_.str();
1555 goto unlock;
1556 }
1557 }
1558
1559 stream_.state = STREAM_STOPPED;
1560
1561 unlock:
1562 if ( result == noErr ) return;
1563 error( RtAudioError::SYSTEM_ERROR );
1564 }
1565
abortStream(void)1566 void RtApiCore :: abortStream( void )
1567 {
1568 verifyStream();
1569 if ( stream_.state == STREAM_STOPPED ) {
1570 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1571 error( RtAudioError::WARNING );
1572 return;
1573 }
1574
1575 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1576 handle->drainCounter = 2;
1577
1578 stopStream();
1579 }
1580
1581 // This function will be called by a spawned thread when the user
1582 // callback function signals that the stream should be stopped or
1583 // aborted. It is better to handle it this way because the
1584 // callbackEvent() function probably should return before the AudioDeviceStop()
1585 // function is called.
coreStopStream(void * ptr)1586 static void *coreStopStream( void *ptr )
1587 {
1588 CallbackInfo *info = (CallbackInfo *) ptr;
1589 RtApiCore *object = (RtApiCore *) info->object;
1590
1591 object->stopStream();
1592 pthread_exit( NULL );
1593
1594 return NULL;
1595 }
1596
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1597 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1598 const AudioBufferList *inBufferList,
1599 const AudioBufferList *outBufferList )
1600 {
1601 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1602 if ( stream_.state == STREAM_CLOSED ) {
1603 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1604 error( RtAudioError::WARNING );
1605 return FAILURE;
1606 }
1607
1608 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1609 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1610
1611 // Check if we were draining the stream and signal is finished.
1612 if ( handle->drainCounter > 3 ) {
1613 ThreadHandle threadId;
1614
1615 stream_.state = STREAM_STOPPING;
1616 if ( handle->internalDrain == true )
1617 pthread_create( &threadId, NULL, coreStopStream, info );
1618 else // external call to stopStream()
1619 pthread_cond_signal( &handle->condition );
1620 return SUCCESS;
1621 }
1622
1623 AudioDeviceID outputDevice = handle->id[0];
1624
1625 // Invoke user callback to get fresh output data UNLESS we are
1626 // draining stream or duplex mode AND the input/output devices are
1627 // different AND this function is called for the input device.
1628 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1629 RtAudioCallback callback = (RtAudioCallback) info->callback;
1630 double streamTime = getStreamTime();
1631 RtAudioStreamStatus status = 0;
1632 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1633 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1634 handle->xrun[0] = false;
1635 }
1636 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1637 status |= RTAUDIO_INPUT_OVERFLOW;
1638 handle->xrun[1] = false;
1639 }
1640
1641 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1642 stream_.bufferSize, streamTime, status, info->userData );
1643 if ( cbReturnValue == 2 ) {
1644 stream_.state = STREAM_STOPPING;
1645 handle->drainCounter = 2;
1646 abortStream();
1647 return SUCCESS;
1648 }
1649 else if ( cbReturnValue == 1 ) {
1650 handle->drainCounter = 1;
1651 handle->internalDrain = true;
1652 }
1653 }
1654
1655 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1656
1657 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1658
1659 if ( handle->nStreams[0] == 1 ) {
1660 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1661 0,
1662 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1663 }
1664 else { // fill multiple streams with zeros
1665 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1666 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1667 0,
1668 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1669 }
1670 }
1671 }
1672 else if ( handle->nStreams[0] == 1 ) {
1673 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1674 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1675 stream_.userBuffer[0], stream_.convertInfo[0] );
1676 }
1677 else { // copy from user buffer
1678 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1679 stream_.userBuffer[0],
1680 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1681 }
1682 }
1683 else { // fill multiple streams
1684 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1685 if ( stream_.doConvertBuffer[0] ) {
1686 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1687 inBuffer = (Float32 *) stream_.deviceBuffer;
1688 }
1689
1690 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1691 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1692 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1693 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1694 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1695 }
1696 }
1697 else { // fill multiple multi-channel streams with interleaved data
1698 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1699 Float32 *out, *in;
1700
1701 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1702 UInt32 inChannels = stream_.nUserChannels[0];
1703 if ( stream_.doConvertBuffer[0] ) {
1704 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1705 inChannels = stream_.nDeviceChannels[0];
1706 }
1707
1708 if ( inInterleaved ) inOffset = 1;
1709 else inOffset = stream_.bufferSize;
1710
1711 channelsLeft = inChannels;
1712 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1713 in = inBuffer;
1714 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1715 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1716
1717 outJump = 0;
1718 // Account for possible channel offset in first stream
1719 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1720 streamChannels -= stream_.channelOffset[0];
1721 outJump = stream_.channelOffset[0];
1722 out += outJump;
1723 }
1724
1725 // Account for possible unfilled channels at end of the last stream
1726 if ( streamChannels > channelsLeft ) {
1727 outJump = streamChannels - channelsLeft;
1728 streamChannels = channelsLeft;
1729 }
1730
1731 // Determine input buffer offsets and skips
1732 if ( inInterleaved ) {
1733 inJump = inChannels;
1734 in += inChannels - channelsLeft;
1735 }
1736 else {
1737 inJump = 1;
1738 in += (inChannels - channelsLeft) * inOffset;
1739 }
1740
1741 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1742 for ( unsigned int j=0; j<streamChannels; j++ ) {
1743 *out++ = in[j*inOffset];
1744 }
1745 out += outJump;
1746 in += inJump;
1747 }
1748 channelsLeft -= streamChannels;
1749 }
1750 }
1751 }
1752 }
1753
1754 // Don't bother draining input
1755 if ( handle->drainCounter ) {
1756 handle->drainCounter++;
1757 goto unlock;
1758 }
1759
1760 AudioDeviceID inputDevice;
1761 inputDevice = handle->id[1];
1762 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1763
1764 if ( handle->nStreams[1] == 1 ) {
1765 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1766 convertBuffer( stream_.userBuffer[1],
1767 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1768 stream_.convertInfo[1] );
1769 }
1770 else { // copy to user buffer
1771 memcpy( stream_.userBuffer[1],
1772 inBufferList->mBuffers[handle->iStream[1]].mData,
1773 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1774 }
1775 }
1776 else { // read from multiple streams
1777 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1778 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1779
1780 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1781 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1782 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1783 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1784 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1785 }
1786 }
1787 else { // read from multiple multi-channel streams
1788 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1789 Float32 *out, *in;
1790
1791 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1792 UInt32 outChannels = stream_.nUserChannels[1];
1793 if ( stream_.doConvertBuffer[1] ) {
1794 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1795 outChannels = stream_.nDeviceChannels[1];
1796 }
1797
1798 if ( outInterleaved ) outOffset = 1;
1799 else outOffset = stream_.bufferSize;
1800
1801 channelsLeft = outChannels;
1802 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1803 out = outBuffer;
1804 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1805 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1806
1807 inJump = 0;
1808 // Account for possible channel offset in first stream
1809 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1810 streamChannels -= stream_.channelOffset[1];
1811 inJump = stream_.channelOffset[1];
1812 in += inJump;
1813 }
1814
1815 // Account for possible unread channels at end of the last stream
1816 if ( streamChannels > channelsLeft ) {
1817 inJump = streamChannels - channelsLeft;
1818 streamChannels = channelsLeft;
1819 }
1820
1821 // Determine output buffer offsets and skips
1822 if ( outInterleaved ) {
1823 outJump = outChannels;
1824 out += outChannels - channelsLeft;
1825 }
1826 else {
1827 outJump = 1;
1828 out += (outChannels - channelsLeft) * outOffset;
1829 }
1830
1831 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1832 for ( unsigned int j=0; j<streamChannels; j++ ) {
1833 out[j*outOffset] = *in++;
1834 }
1835 out += outJump;
1836 in += inJump;
1837 }
1838 channelsLeft -= streamChannels;
1839 }
1840 }
1841
1842 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1843 convertBuffer( stream_.userBuffer[1],
1844 stream_.deviceBuffer,
1845 stream_.convertInfo[1] );
1846 }
1847 }
1848 }
1849
1850 unlock:
1851 //MUTEX_UNLOCK( &stream_.mutex );
1852
1853 RtApi::tickStreamTime();
1854 return SUCCESS;
1855 }
1856
getErrorCode(OSStatus code)1857 const char* RtApiCore :: getErrorCode( OSStatus code )
1858 {
1859 switch( code ) {
1860
1861 case kAudioHardwareNotRunningError:
1862 return "kAudioHardwareNotRunningError";
1863
1864 case kAudioHardwareUnspecifiedError:
1865 return "kAudioHardwareUnspecifiedError";
1866
1867 case kAudioHardwareUnknownPropertyError:
1868 return "kAudioHardwareUnknownPropertyError";
1869
1870 case kAudioHardwareBadPropertySizeError:
1871 return "kAudioHardwareBadPropertySizeError";
1872
1873 case kAudioHardwareIllegalOperationError:
1874 return "kAudioHardwareIllegalOperationError";
1875
1876 case kAudioHardwareBadObjectError:
1877 return "kAudioHardwareBadObjectError";
1878
1879 case kAudioHardwareBadDeviceError:
1880 return "kAudioHardwareBadDeviceError";
1881
1882 case kAudioHardwareBadStreamError:
1883 return "kAudioHardwareBadStreamError";
1884
1885 case kAudioHardwareUnsupportedOperationError:
1886 return "kAudioHardwareUnsupportedOperationError";
1887
1888 case kAudioDeviceUnsupportedFormatError:
1889 return "kAudioDeviceUnsupportedFormatError";
1890
1891 case kAudioDevicePermissionsError:
1892 return "kAudioDevicePermissionsError";
1893
1894 default:
1895 return "CoreAudio unknown error";
1896 }
1897 }
1898
1899 //******************** End of __MACOSX_CORE__ *********************//
1900 #endif
1901
1902 #if defined(__UNIX_JACK__)
1903
1904 // JACK is a low-latency audio server, originally written for the
1905 // GNU/Linux operating system and now also ported to OS-X. It can
1906 // connect a number of different applications to an audio device, as
1907 // well as allowing them to share audio between themselves.
1908 //
1909 // When using JACK with RtAudio, "devices" refer to JACK clients that
1910 // have ports connected to the server. The JACK server is typically
1911 // started in a terminal as follows:
1912 //
1913 // .jackd -d alsa -d hw:0
1914 //
1915 // or through an interface program such as qjackctl. Many of the
1916 // parameters normally set for a stream are fixed by the JACK server
1917 // and can be specified when the JACK server is started. In
1918 // particular,
1919 //
1920 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1921 //
1922 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1923 // frames, and number of buffers = 4. Once the server is running, it
1924 // is not possible to override these values. If the values are not
1925 // specified in the command-line, the JACK server uses default values.
1926 //
1927 // The JACK server does not have to be running when an instance of
1928 // RtApiJack is created, though the function getDeviceCount() will
1929 // report 0 devices found until JACK has been started. When no
1930 // devices are available (i.e., the JACK server is not running), a
1931 // stream cannot be opened.
1932
1933 #include "jackbridge/JackBridge.hpp"
1934 #include <unistd.h>
1935 #include <cstdio>
1936
1937 // A structure to hold various information related to the Jack API
1938 // implementation.
1939 struct JackHandle {
1940 jack_client_t *client;
1941 jack_port_t **ports[2];
1942 bool xrun[2];
1943 pthread_cond_t condition;
1944 int drainCounter; // Tracks callback counts when draining
1945 bool internalDrain; // Indicates if stop is initiated from callback or not.
1946
JackHandleJackHandle1947 JackHandle()
1948 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1949 };
1950
RtApiJack()1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 }
1955
~RtApiJack()1956 RtApiJack :: ~RtApiJack()
1957 {
1958 if ( stream_.state != STREAM_CLOSED ) closeStream();
1959 }
1960
getDeviceCount(void)1961 unsigned int RtApiJack :: getDeviceCount( void )
1962 {
1963 return 2;
1964 }
1965
getDeviceInfo(unsigned int device)1966 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1967 {
1968 static RtAudio::DeviceInfo devInfo[3];
1969
1970 if (! devInfo[0].probed)
1971 {
1972 devInfo[0].probed = devInfo[1].probed = true;
1973 devInfo[0].outputChannels = devInfo[1].outputChannels = 2;
1974 devInfo[0].inputChannels = devInfo[1].inputChannels = 2;
1975 devInfo[0].duplexChannels = devInfo[1].duplexChannels = 2;
1976 devInfo[0].isDefaultOutput = devInfo[1].isDefaultOutput = true;
1977 devInfo[0].isDefaultInput = devInfo[1].isDefaultInput = true;
1978 devInfo[0].nativeFormats = devInfo[1].nativeFormats = RTAUDIO_FLOAT32;
1979 devInfo[0].name = "Auto-connect ON";
1980 devInfo[1].name = "Auto-connect OFF";
1981 }
1982
1983 if (device > 2)
1984 device = 2;
1985
1986 return devInfo[device];
1987 }
1988
jackBufferSizeHandler(jack_nframes_t nframes,void * infoPointer)1989 static int jackBufferSizeHandler( jack_nframes_t nframes, void *infoPointer )
1990 {
1991 CallbackInfo *info = (CallbackInfo *) infoPointer;
1992
1993 RtApiJack *object = (RtApiJack *) info->object;
1994 if ( object->bufferSizeEvent( (unsigned long) nframes ) == false ) return 1;
1995
1996 return 0;
1997 }
1998
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)1999 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2000 {
2001 CallbackInfo *info = (CallbackInfo *) infoPointer;
2002
2003 RtApiJack *object = (RtApiJack *) info->object;
2004 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2005
2006 return 0;
2007 }
2008
2009 // This function will be called by a spawned thread when the Jack
2010 // server signals that it is shutting down. It is necessary to handle
2011 // it this way because the jackShutdown() function must return before
2012 // the jackbridge_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2013 static void *jackCloseStream( void *ptr )
2014 {
2015 CallbackInfo *info = (CallbackInfo *) ptr;
2016 RtApiJack *object = (RtApiJack *) info->object;
2017
2018 object->closeStream();
2019
2020 pthread_exit( NULL );
2021
2022 return NULL;
2023 }
jackShutdown(void * infoPointer)2024 static void jackShutdown( void *infoPointer )
2025 {
2026 CallbackInfo *info = (CallbackInfo *) infoPointer;
2027 RtApiJack *object = (RtApiJack *) info->object;
2028
2029 // Check current stream state. If stopped, then we'll assume this
2030 // was called as a result of a call to RtApiJack::stopStream (the
2031 // deactivation of a client handle causes this function to be called).
2032 // If not, we'll assume the Jack server is shutting down or some
2033 // other problem occurred and we should close the stream.
2034 if ( object->isStreamRunning() == false ) return;
2035
2036 ThreadHandle threadId;
2037 pthread_create( &threadId, NULL, jackCloseStream, info );
2038 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2039 }
2040
jackXrun(void * infoPointer)2041 static int jackXrun( void *infoPointer )
2042 {
2043 JackHandle *handle = *((JackHandle **) infoPointer);
2044
2045 if ( handle->ports[0] ) handle->xrun[0] = true;
2046 if ( handle->ports[1] ) handle->xrun[1] = true;
2047
2048 return 0;
2049 }
2050
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2051 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2052 unsigned int firstChannel, unsigned int,
2053 RtAudioFormat format, unsigned int *bufferSize,
2054 RtAudio::StreamOptions *options )
2055 {
2056 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2057
2058 // Look for jack server and try to become a client (only do once per stream).
2059 jack_client_t *client = 0;
2060 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2061 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2062 jack_status_t *status = NULL;
2063 if ( options && !options->streamName.empty() )
2064 client = jackbridge_client_open( options->streamName.c_str(), jackoptions, status );
2065 else
2066 client = jackbridge_client_open( "Carla", jackoptions, status );
2067 if ( client == 0 ) {
2068 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2069 error( RtAudioError::WARNING );
2070 return FAILURE;
2071 }
2072 }
2073 else {
2074 // The handle must have been created on an earlier pass.
2075 client = handle->client;
2076 }
2077
2078 // Check the jack server sample rate.
2079 stream_.sampleRate = jackbridge_get_sample_rate( client );
2080
2081 // Get the latency of the JACK port.
2082 if (const char **ports = jackbridge_get_ports( client, "system:", NULL, JackPortIsInput )) {
2083 if ( ports[ firstChannel ] ) {
2084 // Added by Ge Wang
2085 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2086 // the range (usually the min and max are equal)
2087 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2088 // get the latency range
2089 jackbridge_port_get_latency_range( jackbridge_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2090 // be optimistic, use the min!
2091 stream_.latency[mode] = latrange.min;
2092 //stream_.latency[mode] = jack_port_get_latency( jackbridge_port_by_name( client, ports[ firstChannel ] ) );
2093 }
2094 jackbridge_free( ports );
2095 }
2096
2097 // The jack server always uses 32-bit floating-point data.
2098 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2099 stream_.userFormat = format;
2100
2101 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2102 else stream_.userInterleaved = true;
2103
2104 // Jack always uses non-interleaved buffers.
2105 stream_.deviceInterleaved[mode] = false;
2106
2107 // Jack always provides host byte-ordered data.
2108 stream_.doByteSwap[mode] = false;
2109
2110 // Get the buffer size. The buffer size and number of buffers
2111 // (periods) is set when the jack server is started.
2112 stream_.bufferSize = (int) jackbridge_get_buffer_size( client );
2113 *bufferSize = stream_.bufferSize;
2114
2115 stream_.nDeviceChannels[mode] = channels;
2116 stream_.nUserChannels[mode] = channels;
2117
2118 // Set flags for buffer conversion.
2119 stream_.doConvertBuffer[mode] = false;
2120 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2121 stream_.doConvertBuffer[mode] = true;
2122 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2123 stream_.nUserChannels[mode] > 1 )
2124 stream_.doConvertBuffer[mode] = true;
2125
2126 // Allocate our JackHandle structure for the stream.
2127 if ( handle == 0 ) {
2128 try {
2129 handle = new JackHandle;
2130 }
2131 catch ( std::bad_alloc& ) {
2132 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2133 goto error;
2134 }
2135
2136 if ( pthread_cond_init(&handle->condition, NULL) ) {
2137 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2138 goto error;
2139 }
2140 stream_.apiHandle = (void *) handle;
2141 handle->client = client;
2142 }
2143
2144 // Allocate necessary internal buffers.
2145 unsigned long bufferBytes;
2146 bufferBytes = stream_.nUserChannels[mode] * 8192 * formatBytes( stream_.userFormat );
2147 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2148 if ( stream_.userBuffer[mode] == NULL ) {
2149 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2150 goto error;
2151 }
2152
2153 if ( stream_.doConvertBuffer[mode] ) {
2154
2155 bool makeBuffer = true;
2156 if ( mode == OUTPUT )
2157 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2158 else { // mode == INPUT
2159 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2160 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2161 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2162 if ( bufferBytes < bytesOut ) makeBuffer = false;
2163 }
2164 }
2165
2166 if ( makeBuffer ) {
2167 bufferBytes *= 8192;
2168 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2169 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2170 if ( stream_.deviceBuffer == NULL ) {
2171 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2172 goto error;
2173 }
2174 }
2175 }
2176
2177 // Allocate memory for the Jack ports (channels) identifiers.
2178 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2179 if ( handle->ports[mode] == NULL ) {
2180 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2181 goto error;
2182 }
2183
2184 stream_.device[mode] = device;
2185 stream_.channelOffset[mode] = firstChannel;
2186 stream_.state = STREAM_STOPPED;
2187 stream_.callbackInfo.object = (void *) this;
2188
2189 if ( stream_.mode == OUTPUT && mode == INPUT )
2190 // We had already set up the stream for output.
2191 stream_.mode = DUPLEX;
2192 else {
2193 stream_.mode = mode;
2194 jackbridge_set_buffer_size_callback( handle->client, jackBufferSizeHandler, (void *) &stream_.callbackInfo );
2195 jackbridge_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2196 jackbridge_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2197 jackbridge_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2198 }
2199
2200 // Register our ports.
2201 char label[64];
2202 if ( mode == OUTPUT ) {
2203 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2204 snprintf( label, 64, "audio-out%d", i+1 );
2205 handle->ports[0][i] = jackbridge_port_register( handle->client, (const char *)label,
2206 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2207 }
2208 }
2209 else {
2210 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2211 snprintf( label, 64, "audio-in%d", i+1 );
2212 handle->ports[1][i] = jackbridge_port_register( handle->client, (const char *)label,
2213 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2214 }
2215 }
2216
2217 // auto-connect-off "device" is at index 1
2218 shouldAutoconnect_ = (device != 1 &&
2219 std::getenv("LADISH_APP_NAME") == nullptr &&
2220 std::getenv("NSM_URL") == nullptr);
2221
2222 // Setup the buffer conversion information structure. We don't use
2223 // buffers to do channel offsets, so we override that parameter
2224 // here.
2225 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2226
2227 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2228
2229 return SUCCESS;
2230
2231 error:
2232 if ( handle ) {
2233 pthread_cond_destroy( &handle->condition );
2234 jackbridge_client_close( handle->client );
2235
2236 if ( handle->ports[0] ) free( handle->ports[0] );
2237 if ( handle->ports[1] ) free( handle->ports[1] );
2238
2239 delete handle;
2240 stream_.apiHandle = 0;
2241 }
2242
2243 for ( int i=0; i<2; i++ ) {
2244 if ( stream_.userBuffer[i] ) {
2245 free( stream_.userBuffer[i] );
2246 stream_.userBuffer[i] = 0;
2247 }
2248 }
2249
2250 if ( stream_.deviceBuffer ) {
2251 free( stream_.deviceBuffer );
2252 stream_.deviceBuffer = 0;
2253 }
2254
2255 return FAILURE;
2256 }
2257
closeStream(void)2258 void RtApiJack :: closeStream( void )
2259 {
2260 if ( stream_.state == STREAM_CLOSED ) {
2261 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2262 error( RtAudioError::WARNING );
2263 return;
2264 }
2265
2266 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2267 if ( handle ) {
2268
2269 if ( stream_.state == STREAM_RUNNING )
2270 jackbridge_deactivate( handle->client );
2271
2272 jackbridge_client_close( handle->client );
2273 }
2274
2275 if ( handle ) {
2276 if ( handle->ports[0] ) free( handle->ports[0] );
2277 if ( handle->ports[1] ) free( handle->ports[1] );
2278 pthread_cond_destroy( &handle->condition );
2279 delete handle;
2280 stream_.apiHandle = 0;
2281 }
2282
2283 for ( int i=0; i<2; i++ ) {
2284 if ( stream_.userBuffer[i] ) {
2285 free( stream_.userBuffer[i] );
2286 stream_.userBuffer[i] = 0;
2287 }
2288 }
2289
2290 if ( stream_.deviceBuffer ) {
2291 free( stream_.deviceBuffer );
2292 stream_.deviceBuffer = 0;
2293 }
2294
2295 stream_.mode = UNINITIALIZED;
2296 stream_.state = STREAM_CLOSED;
2297 }
2298
startStream(void)2299 void RtApiJack :: startStream( void )
2300 {
2301 verifyStream();
2302 if ( stream_.state == STREAM_RUNNING ) {
2303 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2304 error( RtAudioError::WARNING );
2305 return;
2306 }
2307
2308 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2309 bool result = jackbridge_activate( handle->client );
2310 if ( ! result ) {
2311 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2312 goto unlock;
2313 }
2314
2315 const char **ports;
2316
2317 // Get the list of available ports.
2318 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2319 result = false;
2320 ports = jackbridge_get_ports( handle->client, "system:", NULL, JackPortIsInput);
2321 if ( ports == NULL) {
2322 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2323 goto unlock;
2324 }
2325
2326 // Now make the port connections. Since RtAudio wasn't designed to
2327 // allow the user to select particular channels of a device, we'll
2328 // just open the first "nChannels" ports with offset.
2329 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2330 result = false;
2331 if ( ports[ stream_.channelOffset[0] + i ] )
2332 result = jackbridge_connect( handle->client, jackbridge_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2333 if ( ! result ) {
2334 jackbridge_free( ports );
2335 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2336 goto unlock;
2337 }
2338 }
2339 jackbridge_free(ports);
2340 }
2341
2342 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2343 result = false;
2344 ports = jackbridge_get_ports( handle->client, "system:", NULL, JackPortIsOutput );
2345 if ( ports == NULL) {
2346 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2347 goto unlock;
2348 }
2349
2350 // Now make the port connections. See note above.
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 result = false;
2353 if ( ports[ stream_.channelOffset[1] + i ] )
2354 result = jackbridge_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jackbridge_port_name( handle->ports[1][i] ) );
2355 if ( ! result ) {
2356 jackbridge_free( ports );
2357 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2358 goto unlock;
2359 }
2360 }
2361 jackbridge_free(ports);
2362 }
2363
2364 handle->drainCounter = 0;
2365 handle->internalDrain = false;
2366 stream_.state = STREAM_RUNNING;
2367
2368 unlock:
2369 if ( result ) return;
2370 error( RtAudioError::SYSTEM_ERROR );
2371 }
2372
stopStream(void)2373 void RtApiJack :: stopStream( void )
2374 {
2375 verifyStream();
2376 if ( stream_.state == STREAM_STOPPED ) {
2377 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2378 error( RtAudioError::WARNING );
2379 return;
2380 }
2381
2382 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2384
2385 if ( handle->drainCounter == 0 ) {
2386 handle->drainCounter = 2;
2387 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2388 }
2389 }
2390
2391 jackbridge_deactivate( handle->client );
2392 stream_.state = STREAM_STOPPED;
2393 }
2394
abortStream(void)2395 void RtApiJack :: abortStream( void )
2396 {
2397 verifyStream();
2398 if ( stream_.state == STREAM_STOPPED ) {
2399 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2400 error( RtAudioError::WARNING );
2401 return;
2402 }
2403
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2405 handle->drainCounter = 2;
2406
2407 stopStream();
2408 }
2409
2410 // This function will be called by a spawned thread when the user
2411 // callback function signals that the stream should be stopped or
2412 // aborted. It is necessary to handle it this way because the
2413 // callbackEvent() function must return before the jackbridge_deactivate()
2414 // function will return.
jackStopStream(void * ptr)2415 static void *jackStopStream( void *ptr )
2416 {
2417 CallbackInfo *info = (CallbackInfo *) ptr;
2418 RtApiJack *object = (RtApiJack *) info->object;
2419
2420 object->stopStream();
2421 pthread_exit( NULL );
2422
2423 return NULL;
2424 }
2425
callbackEvent(unsigned long nframes)2426 bool RtApiJack :: callbackEvent( unsigned long nframes )
2427 {
2428 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2429 if ( stream_.state == STREAM_CLOSED ) {
2430 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2431 error( RtAudioError::WARNING );
2432 return FAILURE;
2433 }
2434 if ( nframes > 8192 ) {
2435 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size is too big ... cannot process!";
2436 error( RtAudioError::WARNING );
2437 return FAILURE;
2438 }
2439
2440 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2441 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2442
2443 // Check if we were draining the stream and signal is finished.
2444 if ( handle->drainCounter > 3 ) {
2445 ThreadHandle threadId;
2446
2447 stream_.state = STREAM_STOPPING;
2448 if ( handle->internalDrain == true )
2449 pthread_create( &threadId, NULL, jackStopStream, info );
2450 else
2451 pthread_cond_signal( &handle->condition );
2452 return SUCCESS;
2453 }
2454
2455 // Invoke user callback first, to get fresh output data.
2456 if ( handle->drainCounter == 0 ) {
2457 RtAudioCallback callback = (RtAudioCallback) info->callback;
2458 double streamTime = getStreamTime();
2459 RtAudioStreamStatus status = 0;
2460 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2461 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2462 handle->xrun[0] = false;
2463 }
2464 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2465 status |= RTAUDIO_INPUT_OVERFLOW;
2466 handle->xrun[1] = false;
2467 }
2468 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2469 nframes, streamTime, status, info->userData );
2470 if ( cbReturnValue == 2 ) {
2471 stream_.state = STREAM_STOPPING;
2472 handle->drainCounter = 2;
2473 ThreadHandle id;
2474 pthread_create( &id, NULL, jackStopStream, info );
2475 return SUCCESS;
2476 }
2477 else if ( cbReturnValue == 1 ) {
2478 handle->drainCounter = 1;
2479 handle->internalDrain = true;
2480 }
2481 }
2482
2483 jack_default_audio_sample_t *jackbuffer;
2484 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2486
2487 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2488
2489 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2490 jackbuffer = (jack_default_audio_sample_t *) jackbridge_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2491 memset( jackbuffer, 0, bufferBytes );
2492 }
2493
2494 }
2495 else if ( stream_.doConvertBuffer[0] ) {
2496
2497 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2498
2499 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2500 jackbuffer = (jack_default_audio_sample_t *) jackbridge_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2501 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2502 }
2503 }
2504 else { // no buffer conversion
2505 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2506 jackbuffer = (jack_default_audio_sample_t *) jackbridge_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2507 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2508 }
2509 }
2510 }
2511
2512 // Don't bother draining input
2513 if ( handle->drainCounter ) {
2514 handle->drainCounter++;
2515 goto unlock;
2516 }
2517
2518 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2519
2520 if ( stream_.doConvertBuffer[1] ) {
2521 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2522 jackbuffer = (jack_default_audio_sample_t *) jackbridge_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2523 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2524 }
2525 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2526 }
2527 else { // no buffer conversion
2528 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2529 jackbuffer = (jack_default_audio_sample_t *) jackbridge_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2530 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2531 }
2532 }
2533 }
2534
2535 unlock:
2536 RtApi::tickStreamTime();
2537 return SUCCESS;
2538 }
2539
bufferSizeEvent(unsigned long nframes)2540 bool RtApiJack :: bufferSizeEvent( unsigned long nframes )
2541 {
2542 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2543 if ( stream_.state == STREAM_CLOSED ) {
2544 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2545 error( RtAudioError::WARNING );
2546 return FAILURE;
2547 }
2548 if ( nframes > 8192 ) {
2549 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size is too big ... cannot process!";
2550 error( RtAudioError::WARNING );
2551 return FAILURE;
2552 }
2553
2554 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2555
2556 RtAudioBufferSizeCallback callback = (RtAudioBufferSizeCallback) info->bufSizeCallback;
2557 return callback( nframes, info->userData );
2558 }
2559 //******************** End of __UNIX_JACK__ *********************//
2560 #endif
2561
2562 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2563
2564 // The ASIO API is designed around a callback scheme, so this
2565 // implementation is similar to that used for OS-X CoreAudio and Linux
2566 // Jack. The primary constraint with ASIO is that it only allows
2567 // access to a single driver at a time. Thus, it is not possible to
2568 // have more than one simultaneous RtAudio stream.
2569 //
2570 // This implementation also requires a number of external ASIO files
2571 // and a few global variables. The ASIO callback scheme does not
2572 // allow for the passing of user data, so we must create a global
2573 // pointer to our callbackInfo structure.
2574 //
2575 // On unix systems, we make use of a pthread condition variable.
2576 // Since there is no equivalent in Windows, I hacked something based
2577 // on information found in
2578 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2579
2580 #include "asio.cpp"
2581 #include "asiodrivers.cpp"
2582 #include "asiolist.cpp"
2583 #include "iasiothiscallresolver.cpp"
2584 #include <cmath>
2585
2586 static AsioDrivers drivers;
2587 static ASIOCallbacks asioCallbacks;
2588 static ASIODriverInfo driverInfo;
2589 static CallbackInfo *asioCallbackInfo;
2590 static bool asioXRun;
2591
2592 struct AsioHandle {
2593 int drainCounter; // Tracks callback counts when draining
2594 bool internalDrain; // Indicates if stop is initiated from callback or not.
2595 ASIOBufferInfo *bufferInfos;
2596 HANDLE condition;
2597
AsioHandleAsioHandle2598 AsioHandle()
2599 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2600 };
2601
2602 // Function declarations (definitions at end of section)
2603 static const char* getAsioErrorString( ASIOError result );
2604 static void sampleRateChanged( ASIOSampleRate sRate );
2605 static long asioMessages( long selector, long value, void* message, double* opt );
2606
RtApiAsio()2607 RtApiAsio :: RtApiAsio()
2608 {
2609 // ASIO cannot run on a multi-threaded appartment. You can call
2610 // CoInitialize beforehand, but it must be for appartment threading
2611 // (in which case, CoInitilialize will return S_FALSE here).
2612 coInitialized_ = false;
2613 HRESULT hr = CoInitialize( NULL );
2614 if ( FAILED(hr) ) {
2615 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2616 error( RtAudioError::WARNING );
2617 }
2618 coInitialized_ = true;
2619
2620 drivers.removeCurrentDriver();
2621 driverInfo.asioVersion = 2;
2622
2623 // See note in DirectSound implementation about GetDesktopWindow().
2624 driverInfo.sysRef = GetForegroundWindow();
2625 }
2626
~RtApiAsio()2627 RtApiAsio :: ~RtApiAsio()
2628 {
2629 if ( stream_.state != STREAM_CLOSED ) closeStream();
2630 if ( coInitialized_ ) CoUninitialize();
2631 }
2632
getDeviceCount(void)2633 unsigned int RtApiAsio :: getDeviceCount( void )
2634 {
2635 return (unsigned int) drivers.asioGetNumDev();
2636 }
2637
getDeviceInfo(unsigned int device)2638 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2639 {
2640 RtAudio::DeviceInfo info;
2641 info.probed = false;
2642
2643 // Get device ID
2644 unsigned int nDevices = getDeviceCount();
2645 if ( nDevices == 0 ) {
2646 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2647 error( RtAudioError::INVALID_USE );
2648 return info;
2649 }
2650
2651 if ( device >= nDevices ) {
2652 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2653 error( RtAudioError::INVALID_USE );
2654 return info;
2655 }
2656
2657 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2658 if ( stream_.state != STREAM_CLOSED ) {
2659 if ( device >= devices_.size() ) {
2660 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2661 error( RtAudioError::WARNING );
2662 return info;
2663 }
2664 return devices_[ device ];
2665 }
2666
2667 char driverName[32];
2668 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2669 if ( result != ASE_OK ) {
2670 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2671 errorText_ = errorStream_.str();
2672 error( RtAudioError::WARNING );
2673 return info;
2674 }
2675
2676 info.name = driverName;
2677
2678 if ( !drivers.loadDriver( driverName ) ) {
2679 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2680 errorText_ = errorStream_.str();
2681 error( RtAudioError::WARNING );
2682 return info;
2683 }
2684
2685 result = ASIOInit( &driverInfo );
2686 if ( result != ASE_OK ) {
2687 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2688 errorText_ = errorStream_.str();
2689 error( RtAudioError::WARNING );
2690 return info;
2691 }
2692
2693 // Determine the device channel information.
2694 long inputChannels, outputChannels;
2695 result = ASIOGetChannels( &inputChannels, &outputChannels );
2696 if ( result != ASE_OK ) {
2697 drivers.removeCurrentDriver();
2698 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2699 errorText_ = errorStream_.str();
2700 error( RtAudioError::WARNING );
2701 return info;
2702 }
2703
2704 info.outputChannels = outputChannels;
2705 info.inputChannels = inputChannels;
2706 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2707 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2708
2709 // Determine the supported sample rates.
2710 info.sampleRates.clear();
2711 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2712 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2713 if ( result == ASE_OK ) {
2714 info.sampleRates.push_back( SAMPLE_RATES[i] );
2715
2716 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2717 info.preferredSampleRate = SAMPLE_RATES[i];
2718 }
2719 }
2720
2721 // Determine supported data types ... just check first channel and assume rest are the same.
2722 ASIOChannelInfo channelInfo;
2723 channelInfo.channel = 0;
2724 channelInfo.isInput = true;
2725 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2726 result = ASIOGetChannelInfo( &channelInfo );
2727 if ( result != ASE_OK ) {
2728 drivers.removeCurrentDriver();
2729 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2730 errorText_ = errorStream_.str();
2731 error( RtAudioError::WARNING );
2732 return info;
2733 }
2734
2735 info.nativeFormats = 0;
2736 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2737 info.nativeFormats |= RTAUDIO_SINT16;
2738 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2739 info.nativeFormats |= RTAUDIO_SINT32;
2740 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2741 info.nativeFormats |= RTAUDIO_FLOAT32;
2742 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2743 info.nativeFormats |= RTAUDIO_FLOAT64;
2744 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2745 info.nativeFormats |= RTAUDIO_SINT24;
2746
2747 if ( info.outputChannels > 0 )
2748 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2749 if ( info.inputChannels > 0 )
2750 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2751
2752 info.probed = true;
2753 drivers.removeCurrentDriver();
2754 return info;
2755 }
2756
bufferSwitch(long index,ASIOBool)2757 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2758 {
2759 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2760 object->callbackEvent( index );
2761 }
2762
saveDeviceInfo(void)2763 void RtApiAsio :: saveDeviceInfo( void )
2764 {
2765 devices_.clear();
2766
2767 unsigned int nDevices = getDeviceCount();
2768 devices_.resize( nDevices );
2769 for ( unsigned int i=0; i<nDevices; i++ )
2770 devices_[i] = getDeviceInfo( i );
2771 }
2772
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2773 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2774 unsigned int firstChannel, unsigned int sampleRate,
2775 RtAudioFormat format, unsigned int *bufferSize,
2776 RtAudio::StreamOptions *options )
2777 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2778
2779 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2780
2781 // For ASIO, a duplex stream MUST use the same driver.
2782 if ( isDuplexInput && stream_.device[0] != device ) {
2783 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2784 return FAILURE;
2785 }
2786
2787 char driverName[32];
2788 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2789 if ( result != ASE_OK ) {
2790 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2791 errorText_ = errorStream_.str();
2792 return FAILURE;
2793 }
2794
2795 // Only load the driver once for duplex stream.
2796 if ( !isDuplexInput ) {
2797 // The getDeviceInfo() function will not work when a stream is open
2798 // because ASIO does not allow multiple devices to run at the same
2799 // time. Thus, we'll probe the system before opening a stream and
2800 // save the results for use by getDeviceInfo().
2801 this->saveDeviceInfo();
2802
2803 if ( !drivers.loadDriver( driverName ) ) {
2804 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2805 errorText_ = errorStream_.str();
2806 return FAILURE;
2807 }
2808
2809 result = ASIOInit( &driverInfo );
2810 if ( result != ASE_OK ) {
2811 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2812 errorText_ = errorStream_.str();
2813 return FAILURE;
2814 }
2815 }
2816
2817 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2818 bool buffersAllocated = false;
2819 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2820 unsigned int nChannels;
2821
2822
2823 // Check the device channel count.
2824 long inputChannels, outputChannels;
2825 result = ASIOGetChannels( &inputChannels, &outputChannels );
2826 if ( result != ASE_OK ) {
2827 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2828 errorText_ = errorStream_.str();
2829 goto error;
2830 }
2831
2832 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2833 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2834 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2835 errorText_ = errorStream_.str();
2836 goto error;
2837 }
2838 stream_.nDeviceChannels[mode] = channels;
2839 stream_.nUserChannels[mode] = channels;
2840 stream_.channelOffset[mode] = firstChannel;
2841
2842 // Verify the sample rate is supported.
2843 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2844 if ( result != ASE_OK ) {
2845 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2846 errorText_ = errorStream_.str();
2847 goto error;
2848 }
2849
2850 // Get the current sample rate
2851 ASIOSampleRate currentRate;
2852 result = ASIOGetSampleRate( ¤tRate );
2853 if ( result != ASE_OK ) {
2854 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2855 errorText_ = errorStream_.str();
2856 goto error;
2857 }
2858
2859 // Set the sample rate only if necessary
2860 if ( currentRate != sampleRate ) {
2861 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2862 if ( result != ASE_OK ) {
2863 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2864 errorText_ = errorStream_.str();
2865 goto error;
2866 }
2867 }
2868
2869 // Determine the driver data type.
2870 ASIOChannelInfo channelInfo;
2871 channelInfo.channel = 0;
2872 if ( mode == OUTPUT ) channelInfo.isInput = false;
2873 else channelInfo.isInput = true;
2874 result = ASIOGetChannelInfo( &channelInfo );
2875 if ( result != ASE_OK ) {
2876 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2877 errorText_ = errorStream_.str();
2878 goto error;
2879 }
2880
2881 // Assuming WINDOWS host is always little-endian.
2882 stream_.doByteSwap[mode] = false;
2883 stream_.userFormat = format;
2884 stream_.deviceFormat[mode] = 0;
2885 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
2886 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
2887 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
2888 }
2889 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
2890 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
2891 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
2892 }
2893 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
2894 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2895 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
2896 }
2897 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
2898 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
2899 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
2900 }
2901 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
2902 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
2903 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
2904 }
2905
2906 if ( stream_.deviceFormat[mode] == 0 ) {
2907 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
2908 errorText_ = errorStream_.str();
2909 goto error;
2910 }
2911
2912 // Set the buffer size. For a duplex stream, this will end up
2913 // setting the buffer size based on the input constraints, which
2914 // should be ok.
2915 long minSize, maxSize, preferSize, granularity;
2916 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
2917 if ( result != ASE_OK ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
2919 errorText_ = errorStream_.str();
2920 goto error;
2921 }
2922
2923 if ( isDuplexInput ) {
2924 // When this is the duplex input (output was opened before), then we have to use the same
2925 // buffersize as the output, because it might use the preferred buffer size, which most
2926 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
2927 // So instead of throwing an error, make them equal. The caller uses the reference
2928 // to the "bufferSize" param as usual to set up processing buffers.
2929
2930 *bufferSize = stream_.bufferSize;
2931
2932 } else {
2933 if ( *bufferSize == 0 ) *bufferSize = preferSize;
2934 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
2935 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
2936 else if ( granularity == -1 ) {
2937 // Make sure bufferSize is a power of two.
2938 int log2_of_min_size = 0;
2939 int log2_of_max_size = 0;
2940
2941 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
2942 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
2943 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
2944 }
2945
2946 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
2947 int min_delta_num = log2_of_min_size;
2948
2949 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
2950 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
2951 if (current_delta < min_delta) {
2952 min_delta = current_delta;
2953 min_delta_num = i;
2954 }
2955 }
2956
2957 *bufferSize = ( (unsigned int)1 << min_delta_num );
2958 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
2959 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
2960 }
2961 else if ( granularity != 0 ) {
2962 // Set to an even multiple of granularity, rounding up.
2963 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
2964 }
2965 }
2966
2967 /*
2968 // we don't use it anymore, see above!
2969 // Just left it here for the case...
2970 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
2971 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
2972 goto error;
2973 }
2974 */
2975
2976 stream_.bufferSize = *bufferSize;
2977 stream_.nBuffers = 2;
2978
2979 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2980 else stream_.userInterleaved = true;
2981
2982 // ASIO always uses non-interleaved buffers.
2983 stream_.deviceInterleaved[mode] = false;
2984
2985 // Allocate, if necessary, our AsioHandle structure for the stream.
2986 if ( handle == 0 ) {
2987 try {
2988 handle = new AsioHandle;
2989 }
2990 catch ( std::bad_alloc& ) {
2991 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
2992 goto error;
2993 }
2994 handle->bufferInfos = 0;
2995
2996 // Create a manual-reset event.
2997 handle->condition = CreateEvent( NULL, // no security
2998 TRUE, // manual-reset
2999 FALSE, // non-signaled initially
3000 NULL ); // unnamed
3001 stream_.apiHandle = (void *) handle;
3002 }
3003
3004 // Create the ASIO internal buffers. Since RtAudio sets up input
3005 // and output separately, we'll have to dispose of previously
3006 // created output buffers for a duplex stream.
3007 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3008 ASIODisposeBuffers();
3009 if ( handle->bufferInfos ) free( handle->bufferInfos );
3010 }
3011
3012 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3013 unsigned int i;
3014 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3015 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3016 if ( handle->bufferInfos == NULL ) {
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3018 errorText_ = errorStream_.str();
3019 goto error;
3020 }
3021
3022 ASIOBufferInfo *infos;
3023 infos = handle->bufferInfos;
3024 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3025 infos->isInput = ASIOFalse;
3026 infos->channelNum = i + stream_.channelOffset[0];
3027 infos->buffers[0] = infos->buffers[1] = 0;
3028 }
3029 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3030 infos->isInput = ASIOTrue;
3031 infos->channelNum = i + stream_.channelOffset[1];
3032 infos->buffers[0] = infos->buffers[1] = 0;
3033 }
3034
3035 // prepare for callbacks
3036 stream_.sampleRate = sampleRate;
3037 stream_.device[mode] = device;
3038 stream_.mode = isDuplexInput ? DUPLEX : mode;
3039
3040 // store this class instance before registering callbacks, that are going to use it
3041 asioCallbackInfo = &stream_.callbackInfo;
3042 stream_.callbackInfo.object = (void *) this;
3043
3044 // Set up the ASIO callback structure and create the ASIO data buffers.
3045 asioCallbacks.bufferSwitch = &bufferSwitch;
3046 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3047 asioCallbacks.asioMessage = &asioMessages;
3048 asioCallbacks.bufferSwitchTimeInfo = NULL;
3049 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3050 if ( result != ASE_OK ) {
3051 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3052 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3053 // in that case, let's be naïve and try that instead
3054 *bufferSize = preferSize;
3055 stream_.bufferSize = *bufferSize;
3056 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3057 }
3058
3059 if ( result != ASE_OK ) {
3060 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3061 errorText_ = errorStream_.str();
3062 goto error;
3063 }
3064 buffersAllocated = true;
3065 stream_.state = STREAM_STOPPED;
3066
3067 // Set flags for buffer conversion.
3068 stream_.doConvertBuffer[mode] = false;
3069 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3070 stream_.doConvertBuffer[mode] = true;
3071 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3072 stream_.nUserChannels[mode] > 1 )
3073 stream_.doConvertBuffer[mode] = true;
3074
3075 // Allocate necessary internal buffers
3076 unsigned long bufferBytes;
3077 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3078 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3079 if ( stream_.userBuffer[mode] == NULL ) {
3080 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3081 goto error;
3082 }
3083
3084 if ( stream_.doConvertBuffer[mode] ) {
3085
3086 bool makeBuffer = true;
3087 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3088 if ( isDuplexInput && stream_.deviceBuffer ) {
3089 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3090 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3091 }
3092
3093 if ( makeBuffer ) {
3094 bufferBytes *= *bufferSize;
3095 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3096 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3097 if ( stream_.deviceBuffer == NULL ) {
3098 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3099 goto error;
3100 }
3101 }
3102 }
3103
3104 // Determine device latencies
3105 long inputLatency, outputLatency;
3106 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3107 if ( result != ASE_OK ) {
3108 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3109 errorText_ = errorStream_.str();
3110 error( RtAudioError::WARNING); // warn but don't fail
3111 }
3112 else {
3113 stream_.latency[0] = outputLatency;
3114 stream_.latency[1] = inputLatency;
3115 }
3116
3117 // Setup the buffer conversion information structure. We don't use
3118 // buffers to do channel offsets, so we override that parameter
3119 // here.
3120 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3121
3122 return SUCCESS;
3123
3124 error:
3125 if ( !isDuplexInput ) {
3126 // the cleanup for error in the duplex input, is done by RtApi::openStream
3127 // So we clean up for single channel only
3128
3129 if ( buffersAllocated )
3130 ASIODisposeBuffers();
3131
3132 drivers.removeCurrentDriver();
3133
3134 if ( handle ) {
3135 CloseHandle( handle->condition );
3136 if ( handle->bufferInfos )
3137 free( handle->bufferInfos );
3138
3139 delete handle;
3140 stream_.apiHandle = 0;
3141 }
3142
3143
3144 if ( stream_.userBuffer[mode] ) {
3145 free( stream_.userBuffer[mode] );
3146 stream_.userBuffer[mode] = 0;
3147 }
3148
3149 if ( stream_.deviceBuffer ) {
3150 free( stream_.deviceBuffer );
3151 stream_.deviceBuffer = 0;
3152 }
3153 }
3154
3155 return FAILURE;
3156 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3157
closeStream()3158 void RtApiAsio :: closeStream()
3159 {
3160 if ( stream_.state == STREAM_CLOSED ) {
3161 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3162 error( RtAudioError::WARNING );
3163 return;
3164 }
3165
3166 if ( stream_.state == STREAM_RUNNING ) {
3167 stream_.state = STREAM_STOPPED;
3168 ASIOStop();
3169 }
3170 ASIODisposeBuffers();
3171 drivers.removeCurrentDriver();
3172
3173 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3174 if ( handle ) {
3175 CloseHandle( handle->condition );
3176 if ( handle->bufferInfos )
3177 free( handle->bufferInfos );
3178 delete handle;
3179 stream_.apiHandle = 0;
3180 }
3181
3182 for ( int i=0; i<2; i++ ) {
3183 if ( stream_.userBuffer[i] ) {
3184 free( stream_.userBuffer[i] );
3185 stream_.userBuffer[i] = 0;
3186 }
3187 }
3188
3189 if ( stream_.deviceBuffer ) {
3190 free( stream_.deviceBuffer );
3191 stream_.deviceBuffer = 0;
3192 }
3193
3194 stream_.mode = UNINITIALIZED;
3195 stream_.state = STREAM_CLOSED;
3196 }
3197
3198 bool stopThreadCalled = false;
3199
startStream()3200 void RtApiAsio :: startStream()
3201 {
3202 verifyStream();
3203 if ( stream_.state == STREAM_RUNNING ) {
3204 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3205 error( RtAudioError::WARNING );
3206 return;
3207 }
3208
3209 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3210 ASIOError result = ASIOStart();
3211 if ( result != ASE_OK ) {
3212 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3213 errorText_ = errorStream_.str();
3214 goto unlock;
3215 }
3216
3217 handle->drainCounter = 0;
3218 handle->internalDrain = false;
3219 ResetEvent( handle->condition );
3220 stream_.state = STREAM_RUNNING;
3221 asioXRun = false;
3222
3223 unlock:
3224 stopThreadCalled = false;
3225
3226 if ( result == ASE_OK ) return;
3227 error( RtAudioError::SYSTEM_ERROR );
3228 }
3229
stopStream()3230 void RtApiAsio :: stopStream()
3231 {
3232 verifyStream();
3233 if ( stream_.state == STREAM_STOPPED ) {
3234 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3235 error( RtAudioError::WARNING );
3236 return;
3237 }
3238
3239 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3240 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3241 if ( handle->drainCounter == 0 ) {
3242 handle->drainCounter = 2;
3243 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3244 }
3245 }
3246
3247 stream_.state = STREAM_STOPPED;
3248
3249 ASIOError result = ASIOStop();
3250 if ( result != ASE_OK ) {
3251 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3252 errorText_ = errorStream_.str();
3253 }
3254
3255 if ( result == ASE_OK ) return;
3256 error( RtAudioError::SYSTEM_ERROR );
3257 }
3258
abortStream()3259 void RtApiAsio :: abortStream()
3260 {
3261 verifyStream();
3262 if ( stream_.state == STREAM_STOPPED ) {
3263 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3264 error( RtAudioError::WARNING );
3265 return;
3266 }
3267
3268 // The following lines were commented-out because some behavior was
3269 // noted where the device buffers need to be zeroed to avoid
3270 // continuing sound, even when the device buffers are completely
3271 // disposed. So now, calling abort is the same as calling stop.
3272 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3273 // handle->drainCounter = 2;
3274 stopStream();
3275 }
3276
3277 // This function will be called by a spawned thread when the user
3278 // callback function signals that the stream should be stopped or
3279 // aborted. It is necessary to handle it this way because the
3280 // callbackEvent() function must return before the ASIOStop()
3281 // function will return.
asioStopStream(void * ptr)3282 static unsigned __stdcall asioStopStream( void *ptr )
3283 {
3284 CallbackInfo *info = (CallbackInfo *) ptr;
3285 RtApiAsio *object = (RtApiAsio *) info->object;
3286
3287 object->stopStream();
3288 _endthreadex( 0 );
3289 return 0;
3290 }
3291
callbackEvent(long bufferIndex)3292 bool RtApiAsio :: callbackEvent( long bufferIndex )
3293 {
3294 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3295 if ( stream_.state == STREAM_CLOSED ) {
3296 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3297 error( RtAudioError::WARNING );
3298 return FAILURE;
3299 }
3300
3301 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3302 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3303
3304 // Check if we were draining the stream and signal if finished.
3305 if ( handle->drainCounter > 3 ) {
3306
3307 stream_.state = STREAM_STOPPING;
3308 if ( handle->internalDrain == false )
3309 SetEvent( handle->condition );
3310 else { // spawn a thread to stop the stream
3311 unsigned threadId;
3312 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3313 &stream_.callbackInfo, 0, &threadId );
3314 }
3315 return SUCCESS;
3316 }
3317
3318 // Invoke user callback to get fresh output data UNLESS we are
3319 // draining stream.
3320 if ( handle->drainCounter == 0 ) {
3321 RtAudioCallback callback = (RtAudioCallback) info->callback;
3322 double streamTime = getStreamTime();
3323 RtAudioStreamStatus status = 0;
3324 if ( stream_.mode != INPUT && asioXRun == true ) {
3325 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3326 asioXRun = false;
3327 }
3328 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3329 status |= RTAUDIO_INPUT_OVERFLOW;
3330 asioXRun = false;
3331 }
3332 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3333 stream_.bufferSize, streamTime, status, info->userData );
3334 if ( cbReturnValue == 2 ) {
3335 stream_.state = STREAM_STOPPING;
3336 handle->drainCounter = 2;
3337 unsigned threadId;
3338 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3339 &stream_.callbackInfo, 0, &threadId );
3340 return SUCCESS;
3341 }
3342 else if ( cbReturnValue == 1 ) {
3343 handle->drainCounter = 1;
3344 handle->internalDrain = true;
3345 }
3346 }
3347
3348 unsigned int nChannels, bufferBytes, i, j;
3349 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3350 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3351
3352 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3353
3354 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3355
3356 for ( i=0, j=0; i<nChannels; i++ ) {
3357 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3358 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3359 }
3360
3361 }
3362 else if ( stream_.doConvertBuffer[0] ) {
3363
3364 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3365 if ( stream_.doByteSwap[0] )
3366 byteSwapBuffer( stream_.deviceBuffer,
3367 stream_.bufferSize * stream_.nDeviceChannels[0],
3368 stream_.deviceFormat[0] );
3369
3370 for ( i=0, j=0; i<nChannels; i++ ) {
3371 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3372 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3373 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3374 }
3375
3376 }
3377 else {
3378
3379 if ( stream_.doByteSwap[0] )
3380 byteSwapBuffer( stream_.userBuffer[0],
3381 stream_.bufferSize * stream_.nUserChannels[0],
3382 stream_.userFormat );
3383
3384 for ( i=0, j=0; i<nChannels; i++ ) {
3385 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3386 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3387 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3388 }
3389
3390 }
3391 }
3392
3393 // Don't bother draining input
3394 if ( handle->drainCounter ) {
3395 handle->drainCounter++;
3396 goto unlock;
3397 }
3398
3399 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3400
3401 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3402
3403 if (stream_.doConvertBuffer[1]) {
3404
3405 // Always interleave ASIO input data.
3406 for ( i=0, j=0; i<nChannels; i++ ) {
3407 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3408 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3409 handle->bufferInfos[i].buffers[bufferIndex],
3410 bufferBytes );
3411 }
3412
3413 if ( stream_.doByteSwap[1] )
3414 byteSwapBuffer( stream_.deviceBuffer,
3415 stream_.bufferSize * stream_.nDeviceChannels[1],
3416 stream_.deviceFormat[1] );
3417 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3418
3419 }
3420 else {
3421 for ( i=0, j=0; i<nChannels; i++ ) {
3422 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3423 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3424 handle->bufferInfos[i].buffers[bufferIndex],
3425 bufferBytes );
3426 }
3427 }
3428
3429 if ( stream_.doByteSwap[1] )
3430 byteSwapBuffer( stream_.userBuffer[1],
3431 stream_.bufferSize * stream_.nUserChannels[1],
3432 stream_.userFormat );
3433 }
3434 }
3435
3436 unlock:
3437 // The following call was suggested by Malte Clasen. While the API
3438 // documentation indicates it should not be required, some device
3439 // drivers apparently do not function correctly without it.
3440 ASIOOutputReady();
3441
3442 RtApi::tickStreamTime();
3443 return SUCCESS;
3444 }
3445
sampleRateChanged(ASIOSampleRate sRate)3446 static void sampleRateChanged( ASIOSampleRate sRate )
3447 {
3448 // The ASIO documentation says that this usually only happens during
3449 // external sync. Audio processing is not stopped by the driver,
3450 // actual sample rate might not have even changed, maybe only the
3451 // sample rate status of an AES/EBU or S/PDIF digital input at the
3452 // audio device.
3453
3454 RtApi *object = (RtApi *) asioCallbackInfo->object;
3455 try {
3456 object->stopStream();
3457 }
3458 catch ( RtAudioError &exception ) {
3459 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3460 return;
3461 }
3462
3463 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3464 }
3465
asioMessages(long selector,long value,void *,double *)3466 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3467 {
3468 long ret = 0;
3469
3470 switch( selector ) {
3471 case kAsioSelectorSupported:
3472 if ( value == kAsioResetRequest
3473 || value == kAsioEngineVersion
3474 || value == kAsioResyncRequest
3475 || value == kAsioLatenciesChanged
3476 // The following three were added for ASIO 2.0, you don't
3477 // necessarily have to support them.
3478 || value == kAsioSupportsTimeInfo
3479 || value == kAsioSupportsTimeCode
3480 || value == kAsioSupportsInputMonitor)
3481 ret = 1L;
3482 break;
3483 case kAsioResetRequest:
3484 // Defer the task and perform the reset of the driver during the
3485 // next "safe" situation. You cannot reset the driver right now,
3486 // as this code is called from the driver. Reset the driver is
3487 // done by completely destruct is. I.e. ASIOStop(),
3488 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3489 // driver again.
3490 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3491 ret = 1L;
3492 break;
3493 case kAsioResyncRequest:
3494 // This informs the application that the driver encountered some
3495 // non-fatal data loss. It is used for synchronization purposes
3496 // of different media. Added mainly to work around the Win16Mutex
3497 // problems in Windows 95/98 with the Windows Multimedia system,
3498 // which could lose data because the Mutex was held too long by
3499 // another thread. However a driver can issue it in other
3500 // situations, too.
3501 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3502 asioXRun = true;
3503 ret = 1L;
3504 break;
3505 case kAsioLatenciesChanged:
3506 // This will inform the host application that the drivers were
3507 // latencies changed. Beware, it this does not mean that the
3508 // buffer sizes have changed! You might need to update internal
3509 // delay data.
3510 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3511 ret = 1L;
3512 break;
3513 case kAsioEngineVersion:
3514 // Return the supported ASIO version of the host application. If
3515 // a host application does not implement this selector, ASIO 1.0
3516 // is assumed by the driver.
3517 ret = 2L;
3518 break;
3519 case kAsioSupportsTimeInfo:
3520 // Informs the driver whether the
3521 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3522 // For compatibility with ASIO 1.0 drivers the host application
3523 // should always support the "old" bufferSwitch method, too.
3524 ret = 0;
3525 break;
3526 case kAsioSupportsTimeCode:
3527 // Informs the driver whether application is interested in time
3528 // code info. If an application does not need to know about time
3529 // code, the driver has less work to do.
3530 ret = 0;
3531 break;
3532 }
3533 return ret;
3534 }
3535
getAsioErrorString(ASIOError result)3536 static const char* getAsioErrorString( ASIOError result )
3537 {
3538 struct Messages
3539 {
3540 ASIOError value;
3541 const char*message;
3542 };
3543
3544 static const Messages m[] =
3545 {
3546 { ASE_NotPresent, "Hardware input or output is not present or available." },
3547 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3548 { ASE_InvalidParameter, "Invalid input parameter." },
3549 { ASE_InvalidMode, "Invalid mode." },
3550 { ASE_SPNotAdvancing, "Sample position not advancing." },
3551 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3552 { ASE_NoMemory, "Not enough memory to complete the request." }
3553 };
3554
3555 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3556 if ( m[i].value == result ) return m[i].message;
3557
3558 return "Unknown error.";
3559 }
3560
3561 //******************** End of __WINDOWS_ASIO__ *********************//
3562 #endif
3563
3564
3565 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3566
3567 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3568 // - Introduces support for the Windows WASAPI API
3569 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3570 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3571 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3572
3573 #ifndef INITGUID
3574 #define INITGUID
3575 #endif
3576 #include <audioclient.h>
3577 #include <avrt.h>
3578 #include <mmdeviceapi.h>
3579 #include <functiondiscoverykeys_devpkey.h>
3580 #include <sstream>
3581
3582 //=============================================================================
3583
3584 #define SAFE_RELEASE( objectPtr )\
3585 if ( objectPtr )\
3586 {\
3587 objectPtr->Release();\
3588 objectPtr = NULL;\
3589 }
3590
3591 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3592
3593 //-----------------------------------------------------------------------------
3594
3595 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3596 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3597 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3598 // provide intermediate storage for read / write synchronization.
3599 class WasapiBuffer
3600 {
3601 public:
WasapiBuffer()3602 WasapiBuffer()
3603 : buffer_( NULL ),
3604 bufferSize_( 0 ),
3605 inIndex_( 0 ),
3606 outIndex_( 0 ) {}
3607
~WasapiBuffer()3608 ~WasapiBuffer() {
3609 free( buffer_ );
3610 }
3611
3612 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3613 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3614 free( buffer_ );
3615
3616 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3617
3618 bufferSize_ = bufferSize;
3619 inIndex_ = 0;
3620 outIndex_ = 0;
3621 }
3622
3623 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3624 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3625 {
3626 if ( !buffer || // incoming buffer is NULL
3627 bufferSize == 0 || // incoming buffer has no data
3628 bufferSize > bufferSize_ ) // incoming buffer too large
3629 {
3630 return false;
3631 }
3632
3633 unsigned int relOutIndex = outIndex_;
3634 unsigned int inIndexEnd = inIndex_ + bufferSize;
3635 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3636 relOutIndex += bufferSize_;
3637 }
3638
3639 // "in" index can end on the "out" index but cannot begin at it
3640 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3641 return false; // not enough space between "in" index and "out" index
3642 }
3643
3644 // copy buffer from external to internal
3645 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3646 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3647 int fromInSize = bufferSize - fromZeroSize;
3648
3649 switch( format )
3650 {
3651 case RTAUDIO_SINT8:
3652 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3653 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3654 break;
3655 case RTAUDIO_SINT16:
3656 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3657 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3658 break;
3659 case RTAUDIO_SINT24:
3660 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3661 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3662 break;
3663 case RTAUDIO_SINT32:
3664 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3665 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3666 break;
3667 case RTAUDIO_FLOAT32:
3668 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3669 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3670 break;
3671 case RTAUDIO_FLOAT64:
3672 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3673 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3674 break;
3675 }
3676
3677 // update "in" index
3678 inIndex_ += bufferSize;
3679 inIndex_ %= bufferSize_;
3680
3681 return true;
3682 }
3683
3684 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3685 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3686 {
3687 if ( !buffer || // incoming buffer is NULL
3688 bufferSize == 0 || // incoming buffer has no data
3689 bufferSize > bufferSize_ ) // incoming buffer too large
3690 {
3691 return false;
3692 }
3693
3694 unsigned int relInIndex = inIndex_;
3695 unsigned int outIndexEnd = outIndex_ + bufferSize;
3696 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3697 relInIndex += bufferSize_;
3698 }
3699
3700 // "out" index can begin at and end on the "in" index
3701 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3702 return false; // not enough space between "out" index and "in" index
3703 }
3704
3705 // copy buffer from internal to external
3706 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3707 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3708 int fromOutSize = bufferSize - fromZeroSize;
3709
3710 switch( format )
3711 {
3712 case RTAUDIO_SINT8:
3713 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3714 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3715 break;
3716 case RTAUDIO_SINT16:
3717 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3718 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3719 break;
3720 case RTAUDIO_SINT24:
3721 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3722 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3723 break;
3724 case RTAUDIO_SINT32:
3725 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3726 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3727 break;
3728 case RTAUDIO_FLOAT32:
3729 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3730 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3731 break;
3732 case RTAUDIO_FLOAT64:
3733 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3734 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3735 break;
3736 }
3737
3738 // update "out" index
3739 outIndex_ += bufferSize;
3740 outIndex_ %= bufferSize_;
3741
3742 return true;
3743 }
3744
3745 private:
3746 char* buffer_;
3747 unsigned int bufferSize_;
3748 unsigned int inIndex_;
3749 unsigned int outIndex_;
3750 };
3751
3752 //-----------------------------------------------------------------------------
3753
3754 // A structure to hold various information related to the WASAPI implementation.
3755 struct WasapiHandle
3756 {
3757 IAudioClient* captureAudioClient;
3758 IAudioClient* renderAudioClient;
3759 IAudioCaptureClient* captureClient;
3760 IAudioRenderClient* renderClient;
3761 HANDLE captureEvent;
3762 HANDLE renderEvent;
3763
WasapiHandleWasapiHandle3764 WasapiHandle()
3765 : captureAudioClient( NULL ),
3766 renderAudioClient( NULL ),
3767 captureClient( NULL ),
3768 renderClient( NULL ),
3769 captureEvent( NULL ),
3770 renderEvent( NULL ) {}
3771 };
3772
3773 //=============================================================================
3774
RtApiWasapi()3775 RtApiWasapi::RtApiWasapi()
3776 : coInitialized_( false ), deviceEnumerator_( NULL )
3777 {
3778 // WASAPI can run either apartment or multi-threaded
3779 HRESULT hr = CoInitialize( NULL );
3780 if ( !FAILED( hr ) )
3781 coInitialized_ = true;
3782
3783 // Instantiate device enumerator
3784 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3785 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3786 ( void** ) &deviceEnumerator_ );
3787
3788 if ( FAILED( hr ) ) {
3789 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3790 error( RtAudioError::DRIVER_ERROR );
3791 }
3792 }
3793
3794 //-----------------------------------------------------------------------------
3795
~RtApiWasapi()3796 RtApiWasapi::~RtApiWasapi()
3797 {
3798 if ( stream_.state != STREAM_CLOSED )
3799 closeStream();
3800
3801 SAFE_RELEASE( deviceEnumerator_ );
3802
3803 // If this object previously called CoInitialize()
3804 if ( coInitialized_ )
3805 CoUninitialize();
3806 }
3807
3808 //=============================================================================
3809
getDeviceCount(void)3810 unsigned int RtApiWasapi::getDeviceCount( void )
3811 {
3812 unsigned int captureDeviceCount = 0;
3813 unsigned int renderDeviceCount = 0;
3814
3815 IMMDeviceCollection* captureDevices = NULL;
3816 IMMDeviceCollection* renderDevices = NULL;
3817
3818 // Count capture devices
3819 errorText_.clear();
3820 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3821 if ( FAILED( hr ) ) {
3822 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3823 goto Exit;
3824 }
3825
3826 hr = captureDevices->GetCount( &captureDeviceCount );
3827 if ( FAILED( hr ) ) {
3828 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3829 goto Exit;
3830 }
3831
3832 // Count render devices
3833 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3834 if ( FAILED( hr ) ) {
3835 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3836 goto Exit;
3837 }
3838
3839 hr = renderDevices->GetCount( &renderDeviceCount );
3840 if ( FAILED( hr ) ) {
3841 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3842 goto Exit;
3843 }
3844
3845 Exit:
3846 // release all references
3847 SAFE_RELEASE( captureDevices );
3848 SAFE_RELEASE( renderDevices );
3849
3850 if ( errorText_.empty() )
3851 return captureDeviceCount + renderDeviceCount;
3852
3853 error( RtAudioError::DRIVER_ERROR );
3854 return 0;
3855 }
3856
3857 //-----------------------------------------------------------------------------
3858
getDeviceInfo(unsigned int device)3859 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3860 {
3861 RtAudio::DeviceInfo info;
3862 unsigned int captureDeviceCount = 0;
3863 unsigned int renderDeviceCount = 0;
3864 std::string defaultDeviceName;
3865 bool isCaptureDevice = false;
3866
3867 PROPVARIANT deviceNameProp;
3868 PROPVARIANT defaultDeviceNameProp;
3869
3870 IMMDeviceCollection* captureDevices = NULL;
3871 IMMDeviceCollection* renderDevices = NULL;
3872 IMMDevice* devicePtr = NULL;
3873 IMMDevice* defaultDevicePtr = NULL;
3874 IAudioClient* audioClient = NULL;
3875 IPropertyStore* devicePropStore = NULL;
3876 IPropertyStore* defaultDevicePropStore = NULL;
3877
3878 WAVEFORMATEX* deviceFormat = NULL;
3879 WAVEFORMATEX* closestMatchFormat = NULL;
3880
3881 // probed
3882 info.probed = false;
3883
3884 // Count capture devices
3885 errorText_.clear();
3886 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
3887 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3888 if ( FAILED( hr ) ) {
3889 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
3890 goto Exit;
3891 }
3892
3893 hr = captureDevices->GetCount( &captureDeviceCount );
3894 if ( FAILED( hr ) ) {
3895 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
3896 goto Exit;
3897 }
3898
3899 // Count render devices
3900 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3901 if ( FAILED( hr ) ) {
3902 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
3903 goto Exit;
3904 }
3905
3906 hr = renderDevices->GetCount( &renderDeviceCount );
3907 if ( FAILED( hr ) ) {
3908 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
3909 goto Exit;
3910 }
3911
3912 // validate device index
3913 if ( device >= captureDeviceCount + renderDeviceCount ) {
3914 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
3915 errorType = RtAudioError::INVALID_USE;
3916 goto Exit;
3917 }
3918
3919 // determine whether index falls within capture or render devices
3920 if ( device >= renderDeviceCount ) {
3921 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
3922 if ( FAILED( hr ) ) {
3923 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
3924 goto Exit;
3925 }
3926 isCaptureDevice = true;
3927 }
3928 else {
3929 hr = renderDevices->Item( device, &devicePtr );
3930 if ( FAILED( hr ) ) {
3931 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
3932 goto Exit;
3933 }
3934 isCaptureDevice = false;
3935 }
3936
3937 // get default device name
3938 if ( isCaptureDevice ) {
3939 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
3940 if ( FAILED( hr ) ) {
3941 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
3942 goto Exit;
3943 }
3944 }
3945 else {
3946 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
3947 if ( FAILED( hr ) ) {
3948 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
3949 goto Exit;
3950 }
3951 }
3952
3953 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
3954 if ( FAILED( hr ) ) {
3955 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
3956 goto Exit;
3957 }
3958 PropVariantInit( &defaultDeviceNameProp );
3959
3960 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
3961 if ( FAILED( hr ) ) {
3962 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
3963 goto Exit;
3964 }
3965
3966 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
3967
3968 // name
3969 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
3970 if ( FAILED( hr ) ) {
3971 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
3972 goto Exit;
3973 }
3974
3975 PropVariantInit( &deviceNameProp );
3976
3977 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
3978 if ( FAILED( hr ) ) {
3979 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
3980 goto Exit;
3981 }
3982
3983 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
3984
3985 // is default
3986 if ( isCaptureDevice ) {
3987 info.isDefaultInput = info.name == defaultDeviceName;
3988 info.isDefaultOutput = false;
3989 }
3990 else {
3991 info.isDefaultInput = false;
3992 info.isDefaultOutput = info.name == defaultDeviceName;
3993 }
3994
3995 // channel count
3996 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
3997 if ( FAILED( hr ) ) {
3998 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
3999 goto Exit;
4000 }
4001
4002 hr = audioClient->GetMixFormat( &deviceFormat );
4003 if ( FAILED( hr ) ) {
4004 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4005 goto Exit;
4006 }
4007
4008 if ( isCaptureDevice ) {
4009 info.inputChannels = deviceFormat->nChannels;
4010 info.outputChannels = 0;
4011 info.duplexChannels = 0;
4012 }
4013 else {
4014 info.inputChannels = 0;
4015 info.outputChannels = deviceFormat->nChannels;
4016 info.duplexChannels = 0;
4017 }
4018
4019 // sample rates (WASAPI only supports the one native sample rate)
4020 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4021
4022 info.sampleRates.clear();
4023 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4024
4025 // native format
4026 info.nativeFormats = 0;
4027
4028 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4029 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4030 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4031 {
4032 if ( deviceFormat->wBitsPerSample == 32 ) {
4033 info.nativeFormats |= RTAUDIO_FLOAT32;
4034 }
4035 else if ( deviceFormat->wBitsPerSample == 64 ) {
4036 info.nativeFormats |= RTAUDIO_FLOAT64;
4037 }
4038 }
4039 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4040 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4041 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4042 {
4043 if ( deviceFormat->wBitsPerSample == 8 ) {
4044 info.nativeFormats |= RTAUDIO_SINT8;
4045 }
4046 else if ( deviceFormat->wBitsPerSample == 16 ) {
4047 info.nativeFormats |= RTAUDIO_SINT16;
4048 }
4049 else if ( deviceFormat->wBitsPerSample == 24 ) {
4050 info.nativeFormats |= RTAUDIO_SINT24;
4051 }
4052 else if ( deviceFormat->wBitsPerSample == 32 ) {
4053 info.nativeFormats |= RTAUDIO_SINT32;
4054 }
4055 }
4056
4057 // probed
4058 info.probed = true;
4059
4060 Exit:
4061 // release all references
4062 PropVariantClear( &deviceNameProp );
4063 PropVariantClear( &defaultDeviceNameProp );
4064
4065 SAFE_RELEASE( captureDevices );
4066 SAFE_RELEASE( renderDevices );
4067 SAFE_RELEASE( devicePtr );
4068 SAFE_RELEASE( defaultDevicePtr );
4069 SAFE_RELEASE( audioClient );
4070 SAFE_RELEASE( devicePropStore );
4071 SAFE_RELEASE( defaultDevicePropStore );
4072
4073 CoTaskMemFree( deviceFormat );
4074 CoTaskMemFree( closestMatchFormat );
4075
4076 if ( !errorText_.empty() )
4077 error( errorType );
4078 return info;
4079 }
4080
4081 //-----------------------------------------------------------------------------
4082
getDefaultOutputDevice(void)4083 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4084 {
4085 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4086 if ( getDeviceInfo( i ).isDefaultOutput ) {
4087 return i;
4088 }
4089 }
4090
4091 return 0;
4092 }
4093
4094 //-----------------------------------------------------------------------------
4095
getDefaultInputDevice(void)4096 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4097 {
4098 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4099 if ( getDeviceInfo( i ).isDefaultInput ) {
4100 return i;
4101 }
4102 }
4103
4104 return 0;
4105 }
4106
4107 //-----------------------------------------------------------------------------
4108
closeStream(void)4109 void RtApiWasapi::closeStream( void )
4110 {
4111 if ( stream_.state == STREAM_CLOSED ) {
4112 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4113 error( RtAudioError::WARNING );
4114 return;
4115 }
4116
4117 if ( stream_.state != STREAM_STOPPED )
4118 stopStream();
4119
4120 // clean up stream memory
4121 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4122 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4123
4124 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4125 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4126
4127 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4128 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4129
4130 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4131 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4132
4133 delete ( WasapiHandle* ) stream_.apiHandle;
4134 stream_.apiHandle = NULL;
4135
4136 for ( int i = 0; i < 2; i++ ) {
4137 if ( stream_.userBuffer[i] ) {
4138 free( stream_.userBuffer[i] );
4139 stream_.userBuffer[i] = 0;
4140 }
4141 }
4142
4143 if ( stream_.deviceBuffer ) {
4144 free( stream_.deviceBuffer );
4145 stream_.deviceBuffer = 0;
4146 }
4147
4148 // update stream state
4149 stream_.state = STREAM_CLOSED;
4150 }
4151
4152 //-----------------------------------------------------------------------------
4153
startStream(void)4154 void RtApiWasapi::startStream( void )
4155 {
4156 verifyStream();
4157
4158 if ( stream_.state == STREAM_RUNNING ) {
4159 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4160 error( RtAudioError::WARNING );
4161 return;
4162 }
4163
4164 // update stream state
4165 stream_.state = STREAM_RUNNING;
4166
4167 // create WASAPI stream thread
4168 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4169
4170 if ( !stream_.callbackInfo.thread ) {
4171 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4172 error( RtAudioError::THREAD_ERROR );
4173 }
4174 else {
4175 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4176 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4177 }
4178 }
4179
4180 //-----------------------------------------------------------------------------
4181
stopStream(void)4182 void RtApiWasapi::stopStream( void )
4183 {
4184 verifyStream();
4185
4186 if ( stream_.state == STREAM_STOPPED ) {
4187 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4188 error( RtAudioError::WARNING );
4189 return;
4190 }
4191
4192 // inform stream thread by setting stream state to STREAM_STOPPING
4193 stream_.state = STREAM_STOPPING;
4194
4195 // wait until stream thread is stopped
4196 while( stream_.state != STREAM_STOPPED ) {
4197 Sleep( 1 );
4198 }
4199
4200 // Wait for the last buffer to play before stopping.
4201 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4202
4203 // stop capture client if applicable
4204 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4205 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4206 if ( FAILED( hr ) ) {
4207 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4208 error( RtAudioError::DRIVER_ERROR );
4209 return;
4210 }
4211 }
4212
4213 // stop render client if applicable
4214 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4215 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4216 if ( FAILED( hr ) ) {
4217 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4218 error( RtAudioError::DRIVER_ERROR );
4219 return;
4220 }
4221 }
4222
4223 // close thread handle
4224 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4225 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4226 error( RtAudioError::THREAD_ERROR );
4227 return;
4228 }
4229
4230 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4231 }
4232
4233 //-----------------------------------------------------------------------------
4234
abortStream(void)4235 void RtApiWasapi::abortStream( void )
4236 {
4237 verifyStream();
4238
4239 if ( stream_.state == STREAM_STOPPED ) {
4240 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4241 error( RtAudioError::WARNING );
4242 return;
4243 }
4244
4245 // inform stream thread by setting stream state to STREAM_STOPPING
4246 stream_.state = STREAM_STOPPING;
4247
4248 // wait until stream thread is stopped
4249 while ( stream_.state != STREAM_STOPPED ) {
4250 Sleep( 1 );
4251 }
4252
4253 // stop capture client if applicable
4254 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4255 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4256 if ( FAILED( hr ) ) {
4257 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4258 error( RtAudioError::DRIVER_ERROR );
4259 return;
4260 }
4261 }
4262
4263 // stop render client if applicable
4264 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4265 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4266 if ( FAILED( hr ) ) {
4267 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4268 error( RtAudioError::DRIVER_ERROR );
4269 return;
4270 }
4271 }
4272
4273 // close thread handle
4274 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4275 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4276 error( RtAudioError::THREAD_ERROR );
4277 return;
4278 }
4279
4280 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4281 }
4282
4283 //-----------------------------------------------------------------------------
4284
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4285 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4286 unsigned int firstChannel, unsigned int sampleRate,
4287 RtAudioFormat format, unsigned int* bufferSize,
4288 RtAudio::StreamOptions* options )
4289 {
4290 bool methodResult = FAILURE;
4291 unsigned int captureDeviceCount = 0;
4292 unsigned int renderDeviceCount = 0;
4293
4294 IMMDeviceCollection* captureDevices = NULL;
4295 IMMDeviceCollection* renderDevices = NULL;
4296 IMMDevice* devicePtr = NULL;
4297 WAVEFORMATEX* deviceFormat = NULL;
4298 unsigned int bufferBytes;
4299 stream_.state = STREAM_STOPPED;
4300 RtAudio::DeviceInfo deviceInfo;
4301
4302 // create API Handle if not already created
4303 if ( !stream_.apiHandle )
4304 stream_.apiHandle = ( void* ) new WasapiHandle();
4305
4306 // Count capture devices
4307 errorText_.clear();
4308 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4309 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4310 if ( FAILED( hr ) ) {
4311 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4312 goto Exit;
4313 }
4314
4315 hr = captureDevices->GetCount( &captureDeviceCount );
4316 if ( FAILED( hr ) ) {
4317 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4318 goto Exit;
4319 }
4320
4321 // Count render devices
4322 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4323 if ( FAILED( hr ) ) {
4324 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4325 goto Exit;
4326 }
4327
4328 hr = renderDevices->GetCount( &renderDeviceCount );
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4331 goto Exit;
4332 }
4333
4334 // validate device index
4335 if ( device >= captureDeviceCount + renderDeviceCount ) {
4336 errorType = RtAudioError::INVALID_USE;
4337 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4338 goto Exit;
4339 }
4340
4341 deviceInfo = getDeviceInfo( device );
4342
4343 // validate sample rate
4344 if ( sampleRate != deviceInfo.preferredSampleRate )
4345 {
4346 errorType = RtAudioError::INVALID_USE;
4347 std::stringstream ss;
4348 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4349 << "Hz sample rate not supported. This device only supports "
4350 << deviceInfo.preferredSampleRate << "Hz.";
4351 errorText_ = ss.str();
4352 goto Exit;
4353 }
4354
4355 // determine whether index falls within capture or render devices
4356 if ( device >= renderDeviceCount ) {
4357 if ( mode != INPUT ) {
4358 errorType = RtAudioError::INVALID_USE;
4359 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4360 goto Exit;
4361 }
4362
4363 // retrieve captureAudioClient from devicePtr
4364 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4365
4366 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4367 if ( FAILED( hr ) ) {
4368 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4369 goto Exit;
4370 }
4371
4372 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4373 NULL, ( void** ) &captureAudioClient );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4376 goto Exit;
4377 }
4378
4379 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4382 goto Exit;
4383 }
4384
4385 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4386 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4387 }
4388 else {
4389 if ( mode != OUTPUT ) {
4390 errorType = RtAudioError::INVALID_USE;
4391 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4392 goto Exit;
4393 }
4394
4395 // retrieve renderAudioClient from devicePtr
4396 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4397
4398 hr = renderDevices->Item( device, &devicePtr );
4399 if ( FAILED( hr ) ) {
4400 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4401 goto Exit;
4402 }
4403
4404 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4405 NULL, ( void** ) &renderAudioClient );
4406 if ( FAILED( hr ) ) {
4407 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4408 goto Exit;
4409 }
4410
4411 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4412 if ( FAILED( hr ) ) {
4413 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4414 goto Exit;
4415 }
4416
4417 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4418 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4419 }
4420
4421 // fill stream data
4422 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4423 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4424 stream_.mode = DUPLEX;
4425 }
4426 else {
4427 stream_.mode = mode;
4428 }
4429
4430 stream_.device[mode] = device;
4431 stream_.doByteSwap[mode] = false;
4432 stream_.sampleRate = sampleRate;
4433 stream_.bufferSize = *bufferSize;
4434 stream_.nBuffers = 1;
4435 stream_.nUserChannels[mode] = channels;
4436 stream_.channelOffset[mode] = firstChannel;
4437 stream_.userFormat = format;
4438 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4439
4440 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4441 stream_.userInterleaved = false;
4442 else
4443 stream_.userInterleaved = true;
4444 stream_.deviceInterleaved[mode] = true;
4445
4446 // Set flags for buffer conversion.
4447 stream_.doConvertBuffer[mode] = false;
4448 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4449 stream_.nUserChannels != stream_.nDeviceChannels )
4450 stream_.doConvertBuffer[mode] = true;
4451 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4452 stream_.nUserChannels[mode] > 1 )
4453 stream_.doConvertBuffer[mode] = true;
4454
4455 if ( stream_.doConvertBuffer[mode] )
4456 setConvertInfo( mode, 0 );
4457
4458 // Allocate necessary internal buffers
4459 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4460
4461 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4462 if ( !stream_.userBuffer[mode] ) {
4463 errorType = RtAudioError::MEMORY_ERROR;
4464 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4465 goto Exit;
4466 }
4467
4468 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4469 stream_.callbackInfo.priority = 15;
4470 else
4471 stream_.callbackInfo.priority = 0;
4472
4473 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4474 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4475
4476 methodResult = SUCCESS;
4477
4478 Exit:
4479 //clean up
4480 SAFE_RELEASE( captureDevices );
4481 SAFE_RELEASE( renderDevices );
4482 SAFE_RELEASE( devicePtr );
4483 CoTaskMemFree( deviceFormat );
4484
4485 // if method failed, close the stream
4486 if ( methodResult == FAILURE )
4487 closeStream();
4488
4489 if ( !errorText_.empty() )
4490 error( errorType );
4491 return methodResult;
4492 }
4493
4494 //=============================================================================
4495
runWasapiThread(void * wasapiPtr)4496 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4497 {
4498 if ( wasapiPtr )
4499 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4500
4501 return 0;
4502 }
4503
stopWasapiThread(void * wasapiPtr)4504 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4505 {
4506 if ( wasapiPtr )
4507 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4508
4509 return 0;
4510 }
4511
abortWasapiThread(void * wasapiPtr)4512 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4513 {
4514 if ( wasapiPtr )
4515 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4516
4517 return 0;
4518 }
4519
4520 //-----------------------------------------------------------------------------
4521
wasapiThread()4522 void RtApiWasapi::wasapiThread()
4523 {
4524 // as this is a new thread, we must CoInitialize it
4525 CoInitialize( NULL );
4526
4527 HRESULT hr;
4528
4529 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4530 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4531 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4532 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4533 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4534 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4535
4536 WAVEFORMATEX* captureFormat = NULL;
4537 WAVEFORMATEX* renderFormat = NULL;
4538 WasapiBuffer captureBuffer;
4539 WasapiBuffer renderBuffer;
4540
4541 // declare local stream variables
4542 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4543 BYTE* streamBuffer = NULL;
4544 unsigned long captureFlags = 0;
4545 unsigned int bufferFrameCount = 0;
4546 unsigned int numFramesPadding = 0;
4547 bool callbackPushed = false;
4548 bool callbackPulled = false;
4549 bool callbackStopped = false;
4550 int callbackResult = 0;
4551
4552 unsigned int deviceBuffSize = 0;
4553
4554 errorText_.clear();
4555 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4556
4557 // Attempt to assign "Pro Audio" characteristic to thread
4558 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4559 if ( AvrtDll ) {
4560 DWORD taskIndex = 0;
4561 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4562 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4563 FreeLibrary( AvrtDll );
4564 }
4565
4566 // start capture stream if applicable
4567 if ( captureAudioClient ) {
4568 hr = captureAudioClient->GetMixFormat( &captureFormat );
4569 if ( FAILED( hr ) ) {
4570 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4571 goto Exit;
4572 }
4573
4574 // initialize capture stream according to desire buffer size
4575 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4576
4577 if ( !captureClient ) {
4578 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4579 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4580 desiredBufferPeriod,
4581 desiredBufferPeriod,
4582 captureFormat,
4583 NULL );
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4586 goto Exit;
4587 }
4588
4589 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4590 ( void** ) &captureClient );
4591 if ( FAILED( hr ) ) {
4592 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4593 goto Exit;
4594 }
4595
4596 // configure captureEvent to trigger on every available capture buffer
4597 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4598 if ( !captureEvent ) {
4599 errorType = RtAudioError::SYSTEM_ERROR;
4600 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4601 goto Exit;
4602 }
4603
4604 hr = captureAudioClient->SetEventHandle( captureEvent );
4605 if ( FAILED( hr ) ) {
4606 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4607 goto Exit;
4608 }
4609
4610 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4611 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4612 }
4613
4614 unsigned int inBufferSize = 0;
4615 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4616 if ( FAILED( hr ) ) {
4617 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4618 goto Exit;
4619 }
4620
4621 // scale outBufferSize according to stream->user sample rate ratio
4622 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4623 inBufferSize *= stream_.nDeviceChannels[INPUT];
4624
4625 // set captureBuffer size
4626 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4627
4628 // reset the capture stream
4629 hr = captureAudioClient->Reset();
4630 if ( FAILED( hr ) ) {
4631 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4632 goto Exit;
4633 }
4634
4635 // start the capture stream
4636 hr = captureAudioClient->Start();
4637 if ( FAILED( hr ) ) {
4638 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4639 goto Exit;
4640 }
4641 }
4642
4643 // start render stream if applicable
4644 if ( renderAudioClient ) {
4645 hr = renderAudioClient->GetMixFormat( &renderFormat );
4646 if ( FAILED( hr ) ) {
4647 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4648 goto Exit;
4649 }
4650
4651 // initialize render stream according to desire buffer size
4652 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4653
4654 if ( !renderClient ) {
4655 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4656 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4657 desiredBufferPeriod,
4658 desiredBufferPeriod,
4659 renderFormat,
4660 NULL );
4661 if ( FAILED( hr ) ) {
4662 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4663 goto Exit;
4664 }
4665
4666 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4667 ( void** ) &renderClient );
4668 if ( FAILED( hr ) ) {
4669 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4670 goto Exit;
4671 }
4672
4673 // configure renderEvent to trigger on every available render buffer
4674 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4675 if ( !renderEvent ) {
4676 errorType = RtAudioError::SYSTEM_ERROR;
4677 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4678 goto Exit;
4679 }
4680
4681 hr = renderAudioClient->SetEventHandle( renderEvent );
4682 if ( FAILED( hr ) ) {
4683 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4684 goto Exit;
4685 }
4686
4687 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4688 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4689 }
4690
4691 unsigned int outBufferSize = 0;
4692 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4693 if ( FAILED( hr ) ) {
4694 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4695 goto Exit;
4696 }
4697
4698 // scale inBufferSize according to user->stream sample rate ratio
4699 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4700 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4701
4702 // set renderBuffer size
4703 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4704
4705 // reset the render stream
4706 hr = renderAudioClient->Reset();
4707 if ( FAILED( hr ) ) {
4708 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4709 goto Exit;
4710 }
4711
4712 // start the render stream
4713 hr = renderAudioClient->Start();
4714 if ( FAILED( hr ) ) {
4715 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4716 goto Exit;
4717 }
4718 }
4719
4720 if ( stream_.mode == INPUT ) {
4721 using namespace std; // for roundf
4722 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4723 }
4724 else if ( stream_.mode == OUTPUT ) {
4725 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4726 }
4727 else if ( stream_.mode == DUPLEX ) {
4728 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4729 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4730 }
4731
4732 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4733 if ( !stream_.deviceBuffer ) {
4734 errorType = RtAudioError::MEMORY_ERROR;
4735 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4736 goto Exit;
4737 }
4738
4739 // stream process loop
4740 while ( stream_.state != STREAM_STOPPING ) {
4741 if ( !callbackPulled ) {
4742 // Callback Input
4743 // ==============
4744 // 1. Pull callback buffer from inputBuffer
4745 // 2. If 1. was successful: Convert callback buffer to user format
4746
4747 if ( captureAudioClient ) {
4748 // Pull callback buffer from inputBuffer
4749 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4750 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4751 stream_.deviceFormat[INPUT] );
4752
4753 if ( callbackPulled ) {
4754 if ( stream_.doConvertBuffer[INPUT] ) {
4755 // Convert callback buffer to user format
4756 convertBuffer( stream_.userBuffer[INPUT],
4757 stream_.deviceBuffer,
4758 stream_.convertInfo[INPUT] );
4759 }
4760 else {
4761 // no further conversion, simple copy deviceBuffer to userBuffer
4762 memcpy( stream_.userBuffer[INPUT],
4763 stream_.deviceBuffer,
4764 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4765 }
4766 }
4767 }
4768 else {
4769 // if there is no capture stream, set callbackPulled flag
4770 callbackPulled = true;
4771 }
4772
4773 // Execute Callback
4774 // ================
4775 // 1. Execute user callback method
4776 // 2. Handle return value from callback
4777
4778 // if callback has not requested the stream to stop
4779 if ( callbackPulled && !callbackStopped ) {
4780 // Execute user callback method
4781 callbackResult = callback( stream_.userBuffer[OUTPUT],
4782 stream_.userBuffer[INPUT],
4783 stream_.bufferSize,
4784 getStreamTime(),
4785 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4786 stream_.callbackInfo.userData );
4787
4788 // Handle return value from callback
4789 if ( callbackResult == 1 ) {
4790 // instantiate a thread to stop this thread
4791 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4792 if ( !threadHandle ) {
4793 errorType = RtAudioError::THREAD_ERROR;
4794 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4795 goto Exit;
4796 }
4797 else if ( !CloseHandle( threadHandle ) ) {
4798 errorType = RtAudioError::THREAD_ERROR;
4799 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4800 goto Exit;
4801 }
4802
4803 callbackStopped = true;
4804 }
4805 else if ( callbackResult == 2 ) {
4806 // instantiate a thread to stop this thread
4807 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4808 if ( !threadHandle ) {
4809 errorType = RtAudioError::THREAD_ERROR;
4810 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4811 goto Exit;
4812 }
4813 else if ( !CloseHandle( threadHandle ) ) {
4814 errorType = RtAudioError::THREAD_ERROR;
4815 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4816 goto Exit;
4817 }
4818
4819 callbackStopped = true;
4820 }
4821 }
4822 }
4823
4824 // Callback Output
4825 // ===============
4826 // 1. Convert callback buffer to stream format
4827 // 2. Push callback buffer into outputBuffer
4828
4829 if ( renderAudioClient && callbackPulled ) {
4830 if ( stream_.doConvertBuffer[OUTPUT] ) {
4831 // Convert callback buffer to stream format
4832 convertBuffer( stream_.deviceBuffer,
4833 stream_.userBuffer[OUTPUT],
4834 stream_.convertInfo[OUTPUT] );
4835
4836 }
4837
4838 // Push callback buffer into outputBuffer
4839 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
4840 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
4841 stream_.deviceFormat[OUTPUT] );
4842 }
4843 else {
4844 // if there is no render stream, set callbackPushed flag
4845 callbackPushed = true;
4846 }
4847
4848 // Stream Capture
4849 // ==============
4850 // 1. Get capture buffer from stream
4851 // 2. Push capture buffer into inputBuffer
4852 // 3. If 2. was successful: Release capture buffer
4853
4854 if ( captureAudioClient ) {
4855 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4856 if ( !callbackPulled ) {
4857 WaitForSingleObject( captureEvent, INFINITE );
4858 }
4859
4860 // Get capture buffer from stream
4861 hr = captureClient->GetBuffer( &streamBuffer,
4862 &bufferFrameCount,
4863 &captureFlags, NULL, NULL );
4864 if ( FAILED( hr ) ) {
4865 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4866 goto Exit;
4867 }
4868
4869 if ( bufferFrameCount != 0 ) {
4870 // Push capture buffer into inputBuffer
4871 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4872 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4873 stream_.deviceFormat[INPUT] ) )
4874 {
4875 // Release capture buffer
4876 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4877 if ( FAILED( hr ) ) {
4878 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4879 goto Exit;
4880 }
4881 }
4882 else
4883 {
4884 // Inform WASAPI that capture was unsuccessful
4885 hr = captureClient->ReleaseBuffer( 0 );
4886 if ( FAILED( hr ) ) {
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4888 goto Exit;
4889 }
4890 }
4891 }
4892 else
4893 {
4894 // Inform WASAPI that capture was unsuccessful
4895 hr = captureClient->ReleaseBuffer( 0 );
4896 if ( FAILED( hr ) ) {
4897 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4898 goto Exit;
4899 }
4900 }
4901 }
4902
4903 // Stream Render
4904 // =============
4905 // 1. Get render buffer from stream
4906 // 2. Pull next buffer from outputBuffer
4907 // 3. If 2. was successful: Fill render buffer with next buffer
4908 // Release render buffer
4909
4910 if ( renderAudioClient ) {
4911 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
4912 if ( callbackPulled && !callbackPushed ) {
4913 WaitForSingleObject( renderEvent, INFINITE );
4914 }
4915
4916 // Get render buffer from stream
4917 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
4918 if ( FAILED( hr ) ) {
4919 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
4920 goto Exit;
4921 }
4922
4923 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
4924 if ( FAILED( hr ) ) {
4925 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
4926 goto Exit;
4927 }
4928
4929 bufferFrameCount -= numFramesPadding;
4930
4931 if ( bufferFrameCount != 0 ) {
4932 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
4933 if ( FAILED( hr ) ) {
4934 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
4935 goto Exit;
4936 }
4937
4938 // Pull next buffer from outputBuffer
4939 // Fill render buffer with next buffer
4940 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
4941 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
4942 stream_.deviceFormat[OUTPUT] ) )
4943 {
4944 // Release render buffer
4945 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
4946 if ( FAILED( hr ) ) {
4947 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
4948 goto Exit;
4949 }
4950 }
4951 else
4952 {
4953 // Inform WASAPI that render was unsuccessful
4954 hr = renderClient->ReleaseBuffer( 0, 0 );
4955 if ( FAILED( hr ) ) {
4956 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
4957 goto Exit;
4958 }
4959 }
4960 }
4961 else
4962 {
4963 // Inform WASAPI that render was unsuccessful
4964 hr = renderClient->ReleaseBuffer( 0, 0 );
4965 if ( FAILED( hr ) ) {
4966 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
4967 goto Exit;
4968 }
4969 }
4970 }
4971
4972 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
4973 if ( callbackPushed ) {
4974 callbackPulled = false;
4975 // tick stream time
4976 RtApi::tickStreamTime();
4977 }
4978
4979 }
4980
4981 Exit:
4982 // clean up
4983 CoTaskMemFree( captureFormat );
4984 CoTaskMemFree( renderFormat );
4985
4986 CoUninitialize();
4987
4988 // update stream state
4989 stream_.state = STREAM_STOPPED;
4990
4991 if ( errorText_.empty() )
4992 return;
4993 else
4994 error( errorType );
4995 }
4996
4997 //******************** End of __WINDOWS_WASAPI__ *********************//
4998 #endif
4999
5000
5001 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5002
5003 // Modified by Robin Davies, October 2005
5004 // - Improvements to DirectX pointer chasing.
5005 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5006 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5007 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5008 // Changed device query structure for RtAudio 4.0.7, January 2010
5009
5010 #include <mmsystem.h>
5011 #include <mmreg.h>
5012 #include <dsound.h>
5013 #include <assert.h>
5014 #include <algorithm>
5015
5016 #if defined(__MINGW32__)
5017 // missing from latest mingw winapi
5018 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5019 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5020 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5021 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5022 #endif
5023
5024 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5025
5026 #ifdef _MSC_VER // if Microsoft Visual C++
5027 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5028 #endif
5029
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5030 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5031 {
5032 if ( pointer > bufferSize ) pointer -= bufferSize;
5033 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5034 if ( pointer < earlierPointer ) pointer += bufferSize;
5035 return pointer >= earlierPointer && pointer < laterPointer;
5036 }
5037
5038 // A structure to hold various information related to the DirectSound
5039 // API implementation.
5040 struct DsHandle {
5041 unsigned int drainCounter; // Tracks callback counts when draining
5042 bool internalDrain; // Indicates if stop is initiated from callback or not.
5043 void *id[2];
5044 void *buffer[2];
5045 bool xrun[2];
5046 UINT bufferPointer[2];
5047 DWORD dsBufferSize[2];
5048 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5049 HANDLE condition;
5050
DsHandleDsHandle5051 DsHandle()
5052 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5053 };
5054
5055 // Declarations for utility functions, callbacks, and structures
5056 // specific to the DirectSound implementation.
5057 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5058 LPCTSTR description,
5059 LPCTSTR module,
5060 LPVOID lpContext );
5061
5062 static const char* getErrorString( int code );
5063
5064 static unsigned __stdcall callbackHandler( void *ptr );
5065
5066 struct DsDevice {
5067 LPGUID id[2];
5068 bool validId[2];
5069 bool found;
5070 std::string name;
5071
DsDeviceDsDevice5072 DsDevice()
5073 : found(false) { validId[0] = false; validId[1] = false; }
5074 };
5075
5076 struct DsProbeData {
5077 bool isInput;
5078 std::vector<struct DsDevice>* dsDevices;
5079 };
5080
RtApiDs()5081 RtApiDs :: RtApiDs()
5082 {
5083 // Dsound will run both-threaded. If CoInitialize fails, then just
5084 // accept whatever the mainline chose for a threading model.
5085 coInitialized_ = false;
5086 HRESULT hr = CoInitialize( NULL );
5087 if ( !FAILED( hr ) ) coInitialized_ = true;
5088 }
5089
~RtApiDs()5090 RtApiDs :: ~RtApiDs()
5091 {
5092 if ( stream_.state != STREAM_CLOSED ) closeStream();
5093 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5094 }
5095
5096 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5097 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5098 {
5099 return 0;
5100 }
5101
5102 // The DirectSound default input is always the first input device,
5103 // which is the first capture device enumerated.
getDefaultInputDevice(void)5104 unsigned int RtApiDs :: getDefaultInputDevice( void )
5105 {
5106 return 0;
5107 }
5108
getDeviceCount(void)5109 unsigned int RtApiDs :: getDeviceCount( void )
5110 {
5111 // Set query flag for previously found devices to false, so that we
5112 // can check for any devices that have disappeared.
5113 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5114 dsDevices[i].found = false;
5115
5116 // Query DirectSound devices.
5117 struct DsProbeData probeInfo;
5118 probeInfo.isInput = false;
5119 probeInfo.dsDevices = &dsDevices;
5120 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5121 if ( FAILED( result ) ) {
5122 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5123 errorText_ = errorStream_.str();
5124 error( RtAudioError::WARNING );
5125 }
5126
5127 // Query DirectSoundCapture devices.
5128 probeInfo.isInput = true;
5129 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5130 if ( FAILED( result ) ) {
5131 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5132 errorText_ = errorStream_.str();
5133 error( RtAudioError::WARNING );
5134 }
5135
5136 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5137 for ( unsigned int i=0; i<dsDevices.size(); ) {
5138 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5139 else i++;
5140 }
5141
5142 return static_cast<unsigned int>(dsDevices.size());
5143 }
5144
getDeviceInfo(unsigned int device)5145 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5146 {
5147 RtAudio::DeviceInfo info;
5148 info.probed = false;
5149
5150 if ( dsDevices.size() == 0 ) {
5151 // Force a query of all devices
5152 getDeviceCount();
5153 if ( dsDevices.size() == 0 ) {
5154 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5155 error( RtAudioError::INVALID_USE );
5156 return info;
5157 }
5158 }
5159
5160 if ( device >= dsDevices.size() ) {
5161 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5162 error( RtAudioError::INVALID_USE );
5163 return info;
5164 }
5165
5166 HRESULT result;
5167 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5168
5169 LPDIRECTSOUND output;
5170 DSCAPS outCaps;
5171 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5172 if ( FAILED( result ) ) {
5173 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5174 errorText_ = errorStream_.str();
5175 error( RtAudioError::WARNING );
5176 goto probeInput;
5177 }
5178
5179 outCaps.dwSize = sizeof( outCaps );
5180 result = output->GetCaps( &outCaps );
5181 if ( FAILED( result ) ) {
5182 output->Release();
5183 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5184 errorText_ = errorStream_.str();
5185 error( RtAudioError::WARNING );
5186 goto probeInput;
5187 }
5188
5189 // Get output channel information.
5190 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5191
5192 // Get sample rate information.
5193 info.sampleRates.clear();
5194 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5195 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5196 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5197 info.sampleRates.push_back( SAMPLE_RATES[k] );
5198
5199 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5200 info.preferredSampleRate = SAMPLE_RATES[k];
5201 }
5202 }
5203
5204 // Get format information.
5205 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5206 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5207
5208 output->Release();
5209
5210 if ( getDefaultOutputDevice() == device )
5211 info.isDefaultOutput = true;
5212
5213 if ( dsDevices[ device ].validId[1] == false ) {
5214 info.name = dsDevices[ device ].name;
5215 info.probed = true;
5216 return info;
5217 }
5218
5219 probeInput:
5220
5221 LPDIRECTSOUNDCAPTURE input;
5222 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5223 if ( FAILED( result ) ) {
5224 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5225 errorText_ = errorStream_.str();
5226 error( RtAudioError::WARNING );
5227 return info;
5228 }
5229
5230 DSCCAPS inCaps;
5231 inCaps.dwSize = sizeof( inCaps );
5232 result = input->GetCaps( &inCaps );
5233 if ( FAILED( result ) ) {
5234 input->Release();
5235 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5236 errorText_ = errorStream_.str();
5237 error( RtAudioError::WARNING );
5238 return info;
5239 }
5240
5241 // Get input channel information.
5242 info.inputChannels = inCaps.dwChannels;
5243
5244 // Get sample rate and format information.
5245 std::vector<unsigned int> rates;
5246 if ( inCaps.dwChannels >= 2 ) {
5247 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5248 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5249 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5250 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5251 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5252 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5253 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5254 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5255
5256 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5257 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5258 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5259 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5260 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5261 }
5262 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5263 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5264 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5265 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5266 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5267 }
5268 }
5269 else if ( inCaps.dwChannels == 1 ) {
5270 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5271 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5272 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5273 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5274 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5275 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5276 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5277 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5278
5279 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5280 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5281 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5282 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5283 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5284 }
5285 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5286 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5287 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5288 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5289 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5290 }
5291 }
5292 else info.inputChannels = 0; // technically, this would be an error
5293
5294 input->Release();
5295
5296 if ( info.inputChannels == 0 ) return info;
5297
5298 // Copy the supported rates to the info structure but avoid duplication.
5299 bool found;
5300 for ( unsigned int i=0; i<rates.size(); i++ ) {
5301 found = false;
5302 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5303 if ( rates[i] == info.sampleRates[j] ) {
5304 found = true;
5305 break;
5306 }
5307 }
5308 if ( found == false ) info.sampleRates.push_back( rates[i] );
5309 }
5310 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5311
5312 // If device opens for both playback and capture, we determine the channels.
5313 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5314 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5315
5316 if ( device == 0 ) info.isDefaultInput = true;
5317
5318 // Copy name and return.
5319 info.name = dsDevices[ device ].name;
5320 info.probed = true;
5321 return info;
5322 }
5323
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5324 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5325 unsigned int firstChannel, unsigned int sampleRate,
5326 RtAudioFormat format, unsigned int *bufferSize,
5327 RtAudio::StreamOptions *options )
5328 {
5329 if ( channels + firstChannel > 2 ) {
5330 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5331 return FAILURE;
5332 }
5333
5334 size_t nDevices = dsDevices.size();
5335 if ( nDevices == 0 ) {
5336 // This should not happen because a check is made before this function is called.
5337 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5338 return FAILURE;
5339 }
5340
5341 if ( device >= nDevices ) {
5342 // This should not happen because a check is made before this function is called.
5343 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5344 return FAILURE;
5345 }
5346
5347 if ( mode == OUTPUT ) {
5348 if ( dsDevices[ device ].validId[0] == false ) {
5349 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5350 errorText_ = errorStream_.str();
5351 return FAILURE;
5352 }
5353 }
5354 else { // mode == INPUT
5355 if ( dsDevices[ device ].validId[1] == false ) {
5356 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5357 errorText_ = errorStream_.str();
5358 return FAILURE;
5359 }
5360 }
5361
5362 // According to a note in PortAudio, using GetDesktopWindow()
5363 // instead of GetForegroundWindow() is supposed to avoid problems
5364 // that occur when the application's window is not the foreground
5365 // window. Also, if the application window closes before the
5366 // DirectSound buffer, DirectSound can crash. In the past, I had
5367 // problems when using GetDesktopWindow() but it seems fine now
5368 // (January 2010). I'll leave it commented here.
5369 // HWND hWnd = GetForegroundWindow();
5370 HWND hWnd = GetDesktopWindow();
5371
5372 // Check the numberOfBuffers parameter and limit the lowest value to
5373 // two. This is a judgement call and a value of two is probably too
5374 // low for capture, but it should work for playback.
5375 int nBuffers = 0;
5376 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5377 if ( options && options->numberOfBuffers > 0 ) nBuffers = options->numberOfBuffers;
5378 if ( nBuffers < 2 ) nBuffers = 3;
5379
5380 // Check the lower range of the user-specified buffer size and set
5381 // (arbitrarily) to a lower bound of 32.
5382 if ( *bufferSize < 32 ) *bufferSize = 32;
5383
5384 // Create the wave format structure. The data format setting will
5385 // be determined later.
5386 WAVEFORMATEX waveFormat;
5387 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5388 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5389 waveFormat.nChannels = channels + firstChannel;
5390 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5391
5392 // Determine the device buffer size. By default, we'll use the value
5393 // defined above (32K), but we will grow it to make allowances for
5394 // very large software buffer sizes.
5395 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5396 DWORD dsPointerLeadTime = 0;
5397
5398 void *ohandle = 0, *bhandle = 0;
5399 HRESULT result;
5400 if ( mode == OUTPUT ) {
5401
5402 LPDIRECTSOUND output;
5403 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5404 if ( FAILED( result ) ) {
5405 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5406 errorText_ = errorStream_.str();
5407 return FAILURE;
5408 }
5409
5410 DSCAPS outCaps;
5411 outCaps.dwSize = sizeof( outCaps );
5412 result = output->GetCaps( &outCaps );
5413 if ( FAILED( result ) ) {
5414 output->Release();
5415 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5416 errorText_ = errorStream_.str();
5417 return FAILURE;
5418 }
5419
5420 // Check channel information.
5421 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5422 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5423 errorText_ = errorStream_.str();
5424 return FAILURE;
5425 }
5426
5427 // Check format information. Use 16-bit format unless not
5428 // supported or user requests 8-bit.
5429 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5430 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5431 waveFormat.wBitsPerSample = 16;
5432 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5433 }
5434 else {
5435 waveFormat.wBitsPerSample = 8;
5436 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5437 }
5438 stream_.userFormat = format;
5439
5440 // Update wave format structure and buffer information.
5441 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5442 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5443 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5444
5445 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5446 while ( dsPointerLeadTime * 2U > dsBufferSize )
5447 dsBufferSize *= 2;
5448
5449 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5450 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5451 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5452 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5453 if ( FAILED( result ) ) {
5454 output->Release();
5455 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5456 errorText_ = errorStream_.str();
5457 return FAILURE;
5458 }
5459
5460 // Even though we will write to the secondary buffer, we need to
5461 // access the primary buffer to set the correct output format
5462 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5463 // buffer description.
5464 DSBUFFERDESC bufferDescription;
5465 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5466 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5467 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5468
5469 // Obtain the primary buffer
5470 LPDIRECTSOUNDBUFFER buffer;
5471 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5472 if ( FAILED( result ) ) {
5473 output->Release();
5474 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5475 errorText_ = errorStream_.str();
5476 return FAILURE;
5477 }
5478
5479 // Set the primary DS buffer sound format.
5480 result = buffer->SetFormat( &waveFormat );
5481 if ( FAILED( result ) ) {
5482 output->Release();
5483 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5484 errorText_ = errorStream_.str();
5485 return FAILURE;
5486 }
5487
5488 // Setup the secondary DS buffer description.
5489 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5490 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5491 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5492 DSBCAPS_GLOBALFOCUS |
5493 DSBCAPS_GETCURRENTPOSITION2 |
5494 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5495 bufferDescription.dwBufferBytes = dsBufferSize;
5496 bufferDescription.lpwfxFormat = &waveFormat;
5497
5498 // Try to create the secondary DS buffer. If that doesn't work,
5499 // try to use software mixing. Otherwise, there's a problem.
5500 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5501 if ( FAILED( result ) ) {
5502 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5503 DSBCAPS_GLOBALFOCUS |
5504 DSBCAPS_GETCURRENTPOSITION2 |
5505 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5506 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5507 if ( FAILED( result ) ) {
5508 output->Release();
5509 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5510 errorText_ = errorStream_.str();
5511 return FAILURE;
5512 }
5513 }
5514
5515 // Get the buffer size ... might be different from what we specified.
5516 DSBCAPS dsbcaps;
5517 dsbcaps.dwSize = sizeof( DSBCAPS );
5518 result = buffer->GetCaps( &dsbcaps );
5519 if ( FAILED( result ) ) {
5520 output->Release();
5521 buffer->Release();
5522 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5523 errorText_ = errorStream_.str();
5524 return FAILURE;
5525 }
5526
5527 dsBufferSize = dsbcaps.dwBufferBytes;
5528
5529 // Lock the DS buffer
5530 LPVOID audioPtr;
5531 DWORD dataLen;
5532 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5533 if ( FAILED( result ) ) {
5534 output->Release();
5535 buffer->Release();
5536 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5537 errorText_ = errorStream_.str();
5538 return FAILURE;
5539 }
5540
5541 // Zero the DS buffer
5542 ZeroMemory( audioPtr, dataLen );
5543
5544 // Unlock the DS buffer
5545 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5546 if ( FAILED( result ) ) {
5547 output->Release();
5548 buffer->Release();
5549 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5550 errorText_ = errorStream_.str();
5551 return FAILURE;
5552 }
5553
5554 ohandle = (void *) output;
5555 bhandle = (void *) buffer;
5556 }
5557
5558 if ( mode == INPUT ) {
5559
5560 LPDIRECTSOUNDCAPTURE input;
5561 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5562 if ( FAILED( result ) ) {
5563 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5564 errorText_ = errorStream_.str();
5565 return FAILURE;
5566 }
5567
5568 DSCCAPS inCaps;
5569 inCaps.dwSize = sizeof( inCaps );
5570 result = input->GetCaps( &inCaps );
5571 if ( FAILED( result ) ) {
5572 input->Release();
5573 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5574 errorText_ = errorStream_.str();
5575 return FAILURE;
5576 }
5577
5578 // Check channel information.
5579 if ( inCaps.dwChannels < channels + firstChannel ) {
5580 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5581 return FAILURE;
5582 }
5583
5584 // Check format information. Use 16-bit format unless user
5585 // requests 8-bit.
5586 DWORD deviceFormats;
5587 if ( channels + firstChannel == 2 ) {
5588 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5589 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5590 waveFormat.wBitsPerSample = 8;
5591 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5592 }
5593 else { // assume 16-bit is supported
5594 waveFormat.wBitsPerSample = 16;
5595 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5596 }
5597 }
5598 else { // channel == 1
5599 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5600 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5601 waveFormat.wBitsPerSample = 8;
5602 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5603 }
5604 else { // assume 16-bit is supported
5605 waveFormat.wBitsPerSample = 16;
5606 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5607 }
5608 }
5609 stream_.userFormat = format;
5610
5611 // Update wave format structure and buffer information.
5612 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5613 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5614 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5615
5616 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5617 while ( dsPointerLeadTime * 2U > dsBufferSize )
5618 dsBufferSize *= 2;
5619
5620 // Setup the secondary DS buffer description.
5621 DSCBUFFERDESC bufferDescription;
5622 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5623 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5624 bufferDescription.dwFlags = 0;
5625 bufferDescription.dwReserved = 0;
5626 bufferDescription.dwBufferBytes = dsBufferSize;
5627 bufferDescription.lpwfxFormat = &waveFormat;
5628
5629 // Create the capture buffer.
5630 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5631 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5632 if ( FAILED( result ) ) {
5633 input->Release();
5634 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5635 errorText_ = errorStream_.str();
5636 return FAILURE;
5637 }
5638
5639 // Get the buffer size ... might be different from what we specified.
5640 DSCBCAPS dscbcaps;
5641 dscbcaps.dwSize = sizeof( DSCBCAPS );
5642 result = buffer->GetCaps( &dscbcaps );
5643 if ( FAILED( result ) ) {
5644 input->Release();
5645 buffer->Release();
5646 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5647 errorText_ = errorStream_.str();
5648 return FAILURE;
5649 }
5650
5651 dsBufferSize = dscbcaps.dwBufferBytes;
5652
5653 // NOTE: We could have a problem here if this is a duplex stream
5654 // and the play and capture hardware buffer sizes are different
5655 // (I'm actually not sure if that is a problem or not).
5656 // Currently, we are not verifying that.
5657
5658 // Lock the capture buffer
5659 LPVOID audioPtr;
5660 DWORD dataLen;
5661 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5662 if ( FAILED( result ) ) {
5663 input->Release();
5664 buffer->Release();
5665 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5666 errorText_ = errorStream_.str();
5667 return FAILURE;
5668 }
5669
5670 // Zero the buffer
5671 ZeroMemory( audioPtr, dataLen );
5672
5673 // Unlock the buffer
5674 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5675 if ( FAILED( result ) ) {
5676 input->Release();
5677 buffer->Release();
5678 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5679 errorText_ = errorStream_.str();
5680 return FAILURE;
5681 }
5682
5683 ohandle = (void *) input;
5684 bhandle = (void *) buffer;
5685 }
5686
5687 // Set various stream parameters
5688 DsHandle *handle = 0;
5689 stream_.nDeviceChannels[mode] = channels + firstChannel;
5690 stream_.nUserChannels[mode] = channels;
5691 stream_.bufferSize = *bufferSize;
5692 stream_.channelOffset[mode] = firstChannel;
5693 stream_.deviceInterleaved[mode] = true;
5694 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5695 else stream_.userInterleaved = true;
5696
5697 // Set flag for buffer conversion
5698 stream_.doConvertBuffer[mode] = false;
5699 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5700 stream_.doConvertBuffer[mode] = true;
5701 if (stream_.userFormat != stream_.deviceFormat[mode])
5702 stream_.doConvertBuffer[mode] = true;
5703 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5704 stream_.nUserChannels[mode] > 1 )
5705 stream_.doConvertBuffer[mode] = true;
5706
5707 // Allocate necessary internal buffers
5708 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5709 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5710 if ( stream_.userBuffer[mode] == NULL ) {
5711 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5712 goto error;
5713 }
5714
5715 if ( stream_.doConvertBuffer[mode] ) {
5716
5717 bool makeBuffer = true;
5718 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5719 if ( mode == INPUT ) {
5720 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5721 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5722 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5723 }
5724 }
5725
5726 if ( makeBuffer ) {
5727 bufferBytes *= *bufferSize;
5728 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5729 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5730 if ( stream_.deviceBuffer == NULL ) {
5731 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5732 goto error;
5733 }
5734 }
5735 }
5736
5737 // Allocate our DsHandle structures for the stream.
5738 if ( stream_.apiHandle == 0 ) {
5739 try {
5740 handle = new DsHandle;
5741 }
5742 catch ( std::bad_alloc& ) {
5743 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5744 goto error;
5745 }
5746
5747 // Create a manual-reset event.
5748 handle->condition = CreateEvent( NULL, // no security
5749 TRUE, // manual-reset
5750 FALSE, // non-signaled initially
5751 NULL ); // unnamed
5752 stream_.apiHandle = (void *) handle;
5753 }
5754 else
5755 handle = (DsHandle *) stream_.apiHandle;
5756 handle->id[mode] = ohandle;
5757 handle->buffer[mode] = bhandle;
5758 handle->dsBufferSize[mode] = dsBufferSize;
5759 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5760
5761 stream_.device[mode] = device;
5762 stream_.state = STREAM_STOPPED;
5763 if ( stream_.mode == OUTPUT && mode == INPUT )
5764 // We had already set up an output stream.
5765 stream_.mode = DUPLEX;
5766 else
5767 stream_.mode = mode;
5768 stream_.nBuffers = nBuffers;
5769 stream_.sampleRate = sampleRate;
5770
5771 // Setup the buffer conversion information structure.
5772 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5773
5774 // Setup the callback thread.
5775 if ( stream_.callbackInfo.isRunning == false ) {
5776 unsigned threadId;
5777 stream_.callbackInfo.isRunning = true;
5778 stream_.callbackInfo.object = (void *) this;
5779 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5780 &stream_.callbackInfo, 0, &threadId );
5781 if ( stream_.callbackInfo.thread == 0 ) {
5782 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5783 goto error;
5784 }
5785
5786 // Boost DS thread priority
5787 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5788 }
5789 return SUCCESS;
5790
5791 error:
5792 if ( handle ) {
5793 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5794 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5795 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5796 if ( buffer ) buffer->Release();
5797 object->Release();
5798 }
5799 if ( handle->buffer[1] ) {
5800 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5801 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5802 if ( buffer ) buffer->Release();
5803 object->Release();
5804 }
5805 CloseHandle( handle->condition );
5806 delete handle;
5807 stream_.apiHandle = 0;
5808 }
5809
5810 for ( int i=0; i<2; i++ ) {
5811 if ( stream_.userBuffer[i] ) {
5812 free( stream_.userBuffer[i] );
5813 stream_.userBuffer[i] = 0;
5814 }
5815 }
5816
5817 if ( stream_.deviceBuffer ) {
5818 free( stream_.deviceBuffer );
5819 stream_.deviceBuffer = 0;
5820 }
5821
5822 stream_.state = STREAM_CLOSED;
5823 return FAILURE;
5824 }
5825
closeStream()5826 void RtApiDs :: closeStream()
5827 {
5828 if ( stream_.state == STREAM_CLOSED ) {
5829 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5830 error( RtAudioError::WARNING );
5831 return;
5832 }
5833
5834 // Stop the callback thread.
5835 stream_.callbackInfo.isRunning = false;
5836 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5837 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5838
5839 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5840 if ( handle ) {
5841 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5842 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5843 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5844 if ( buffer ) {
5845 buffer->Stop();
5846 buffer->Release();
5847 }
5848 object->Release();
5849 }
5850 if ( handle->buffer[1] ) {
5851 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5852 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5853 if ( buffer ) {
5854 buffer->Stop();
5855 buffer->Release();
5856 }
5857 object->Release();
5858 }
5859 CloseHandle( handle->condition );
5860 delete handle;
5861 stream_.apiHandle = 0;
5862 }
5863
5864 for ( int i=0; i<2; i++ ) {
5865 if ( stream_.userBuffer[i] ) {
5866 free( stream_.userBuffer[i] );
5867 stream_.userBuffer[i] = 0;
5868 }
5869 }
5870
5871 if ( stream_.deviceBuffer ) {
5872 free( stream_.deviceBuffer );
5873 stream_.deviceBuffer = 0;
5874 }
5875
5876 stream_.mode = UNINITIALIZED;
5877 stream_.state = STREAM_CLOSED;
5878 }
5879
startStream()5880 void RtApiDs :: startStream()
5881 {
5882 verifyStream();
5883 if ( stream_.state == STREAM_RUNNING ) {
5884 errorText_ = "RtApiDs::startStream(): the stream is already running!";
5885 error( RtAudioError::WARNING );
5886 return;
5887 }
5888
5889 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5890
5891 // Increase scheduler frequency on lesser windows (a side-effect of
5892 // increasing timer accuracy). On greater windows (Win2K or later),
5893 // this is already in effect.
5894 timeBeginPeriod( 1 );
5895
5896 buffersRolling = false;
5897 duplexPrerollBytes = 0;
5898
5899 if ( stream_.mode == DUPLEX ) {
5900 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
5901 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
5902 }
5903
5904 HRESULT result = 0;
5905 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
5906
5907 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5908 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
5909 if ( FAILED( result ) ) {
5910 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
5911 errorText_ = errorStream_.str();
5912 goto unlock;
5913 }
5914 }
5915
5916 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
5917
5918 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5919 result = buffer->Start( DSCBSTART_LOOPING );
5920 if ( FAILED( result ) ) {
5921 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
5922 errorText_ = errorStream_.str();
5923 goto unlock;
5924 }
5925 }
5926
5927 handle->drainCounter = 0;
5928 handle->internalDrain = false;
5929 ResetEvent( handle->condition );
5930 stream_.state = STREAM_RUNNING;
5931
5932 unlock:
5933 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
5934 }
5935
stopStream()5936 void RtApiDs :: stopStream()
5937 {
5938 verifyStream();
5939 if ( stream_.state == STREAM_STOPPED ) {
5940 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
5941 error( RtAudioError::WARNING );
5942 return;
5943 }
5944
5945 HRESULT result = 0;
5946 LPVOID audioPtr;
5947 DWORD dataLen;
5948 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5949 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
5950 if ( handle->drainCounter == 0 ) {
5951 handle->drainCounter = 2;
5952 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
5953 }
5954
5955 stream_.state = STREAM_STOPPED;
5956
5957 MUTEX_LOCK( &stream_.mutex );
5958
5959 // Stop the buffer and clear memory
5960 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5961 result = buffer->Stop();
5962 if ( FAILED( result ) ) {
5963 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
5964 errorText_ = errorStream_.str();
5965 goto unlock;
5966 }
5967
5968 // Lock the buffer and clear it so that if we start to play again,
5969 // we won't have old data playing.
5970 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
5971 if ( FAILED( result ) ) {
5972 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
5973 errorText_ = errorStream_.str();
5974 goto unlock;
5975 }
5976
5977 // Zero the DS buffer
5978 ZeroMemory( audioPtr, dataLen );
5979
5980 // Unlock the DS buffer
5981 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5982 if ( FAILED( result ) ) {
5983 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
5984 errorText_ = errorStream_.str();
5985 goto unlock;
5986 }
5987
5988 // If we start playing again, we must begin at beginning of buffer.
5989 handle->bufferPointer[0] = 0;
5990 }
5991
5992 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
5993 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5994 audioPtr = NULL;
5995 dataLen = 0;
5996
5997 stream_.state = STREAM_STOPPED;
5998
5999 if ( stream_.mode != DUPLEX )
6000 MUTEX_LOCK( &stream_.mutex );
6001
6002 result = buffer->Stop();
6003 if ( FAILED( result ) ) {
6004 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6005 errorText_ = errorStream_.str();
6006 goto unlock;
6007 }
6008
6009 // Lock the buffer and clear it so that if we start to play again,
6010 // we won't have old data playing.
6011 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6012 if ( FAILED( result ) ) {
6013 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6014 errorText_ = errorStream_.str();
6015 goto unlock;
6016 }
6017
6018 // Zero the DS buffer
6019 ZeroMemory( audioPtr, dataLen );
6020
6021 // Unlock the DS buffer
6022 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6023 if ( FAILED( result ) ) {
6024 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6025 errorText_ = errorStream_.str();
6026 goto unlock;
6027 }
6028
6029 // If we start recording again, we must begin at beginning of buffer.
6030 handle->bufferPointer[1] = 0;
6031 }
6032
6033 unlock:
6034 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6035 MUTEX_UNLOCK( &stream_.mutex );
6036
6037 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6038 }
6039
abortStream()6040 void RtApiDs :: abortStream()
6041 {
6042 verifyStream();
6043 if ( stream_.state == STREAM_STOPPED ) {
6044 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6045 error( RtAudioError::WARNING );
6046 return;
6047 }
6048
6049 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6050 handle->drainCounter = 2;
6051
6052 stopStream();
6053 }
6054
callbackEvent()6055 void RtApiDs :: callbackEvent()
6056 {
6057 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6058 Sleep( 50 ); // sleep 50 milliseconds
6059 return;
6060 }
6061
6062 if ( stream_.state == STREAM_CLOSED ) {
6063 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6064 error( RtAudioError::WARNING );
6065 return;
6066 }
6067
6068 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6069 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6070
6071 // Check if we were draining the stream and signal is finished.
6072 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6073
6074 stream_.state = STREAM_STOPPING;
6075 if ( handle->internalDrain == false )
6076 SetEvent( handle->condition );
6077 else
6078 stopStream();
6079 return;
6080 }
6081
6082 // Invoke user callback to get fresh output data UNLESS we are
6083 // draining stream.
6084 if ( handle->drainCounter == 0 ) {
6085 RtAudioCallback callback = (RtAudioCallback) info->callback;
6086 double streamTime = getStreamTime();
6087 RtAudioStreamStatus status = 0;
6088 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6089 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6090 handle->xrun[0] = false;
6091 }
6092 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6093 status |= RTAUDIO_INPUT_OVERFLOW;
6094 handle->xrun[1] = false;
6095 }
6096 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6097 stream_.bufferSize, streamTime, status, info->userData );
6098 if ( cbReturnValue == 2 ) {
6099 stream_.state = STREAM_STOPPING;
6100 handle->drainCounter = 2;
6101 abortStream();
6102 return;
6103 }
6104 else if ( cbReturnValue == 1 ) {
6105 handle->drainCounter = 1;
6106 handle->internalDrain = true;
6107 }
6108 }
6109
6110 HRESULT result;
6111 DWORD currentWritePointer, safeWritePointer;
6112 DWORD currentReadPointer, safeReadPointer;
6113 UINT nextWritePointer;
6114
6115 LPVOID buffer1 = NULL;
6116 LPVOID buffer2 = NULL;
6117 DWORD bufferSize1 = 0;
6118 DWORD bufferSize2 = 0;
6119
6120 char *buffer;
6121 long bufferBytes;
6122
6123 MUTEX_LOCK( &stream_.mutex );
6124 if ( stream_.state == STREAM_STOPPED ) {
6125 MUTEX_UNLOCK( &stream_.mutex );
6126 return;
6127 }
6128
6129 if ( buffersRolling == false ) {
6130 if ( stream_.mode == DUPLEX ) {
6131 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6132
6133 // It takes a while for the devices to get rolling. As a result,
6134 // there's no guarantee that the capture and write device pointers
6135 // will move in lockstep. Wait here for both devices to start
6136 // rolling, and then set our buffer pointers accordingly.
6137 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6138 // bytes later than the write buffer.
6139
6140 // Stub: a serious risk of having a pre-emptive scheduling round
6141 // take place between the two GetCurrentPosition calls... but I'm
6142 // really not sure how to solve the problem. Temporarily boost to
6143 // Realtime priority, maybe; but I'm not sure what priority the
6144 // DirectSound service threads run at. We *should* be roughly
6145 // within a ms or so of correct.
6146
6147 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6148 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6149
6150 DWORD startSafeWritePointer, startSafeReadPointer;
6151
6152 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6153 if ( FAILED( result ) ) {
6154 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6155 errorText_ = errorStream_.str();
6156 MUTEX_UNLOCK( &stream_.mutex );
6157 error( RtAudioError::SYSTEM_ERROR );
6158 return;
6159 }
6160 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6161 if ( FAILED( result ) ) {
6162 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6163 errorText_ = errorStream_.str();
6164 MUTEX_UNLOCK( &stream_.mutex );
6165 error( RtAudioError::SYSTEM_ERROR );
6166 return;
6167 }
6168 while ( true ) {
6169 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6170 if ( FAILED( result ) ) {
6171 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6172 errorText_ = errorStream_.str();
6173 MUTEX_UNLOCK( &stream_.mutex );
6174 error( RtAudioError::SYSTEM_ERROR );
6175 return;
6176 }
6177 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6178 if ( FAILED( result ) ) {
6179 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6180 errorText_ = errorStream_.str();
6181 MUTEX_UNLOCK( &stream_.mutex );
6182 error( RtAudioError::SYSTEM_ERROR );
6183 return;
6184 }
6185 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6186 Sleep( 1 );
6187 }
6188
6189 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6190
6191 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6192 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6193 handle->bufferPointer[1] = safeReadPointer;
6194 }
6195 else if ( stream_.mode == OUTPUT ) {
6196
6197 // Set the proper nextWritePosition after initial startup.
6198 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6199 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6200 if ( FAILED( result ) ) {
6201 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6202 errorText_ = errorStream_.str();
6203 MUTEX_UNLOCK( &stream_.mutex );
6204 error( RtAudioError::SYSTEM_ERROR );
6205 return;
6206 }
6207 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6208 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6209 }
6210
6211 buffersRolling = true;
6212 }
6213
6214 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6215
6216 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6217
6218 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6219 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6220 bufferBytes *= formatBytes( stream_.userFormat );
6221 memset( stream_.userBuffer[0], 0, bufferBytes );
6222 }
6223
6224 // Setup parameters and do buffer conversion if necessary.
6225 if ( stream_.doConvertBuffer[0] ) {
6226 buffer = stream_.deviceBuffer;
6227 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6228 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6229 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6230 }
6231 else {
6232 buffer = stream_.userBuffer[0];
6233 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6234 bufferBytes *= formatBytes( stream_.userFormat );
6235 }
6236
6237 // No byte swapping necessary in DirectSound implementation.
6238
6239 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6240 // unsigned. So, we need to convert our signed 8-bit data here to
6241 // unsigned.
6242 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6243 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6244
6245 DWORD dsBufferSize = handle->dsBufferSize[0];
6246 nextWritePointer = handle->bufferPointer[0];
6247
6248 DWORD endWrite, leadPointer;
6249 while ( true ) {
6250 // Find out where the read and "safe write" pointers are.
6251 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6252 if ( FAILED( result ) ) {
6253 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6254 errorText_ = errorStream_.str();
6255 MUTEX_UNLOCK( &stream_.mutex );
6256 error( RtAudioError::SYSTEM_ERROR );
6257 return;
6258 }
6259
6260 // We will copy our output buffer into the region between
6261 // safeWritePointer and leadPointer. If leadPointer is not
6262 // beyond the next endWrite position, wait until it is.
6263 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6264 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6265 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6266 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6267 endWrite = nextWritePointer + bufferBytes;
6268
6269 // Check whether the entire write region is behind the play pointer.
6270 if ( leadPointer >= endWrite ) break;
6271
6272 // If we are here, then we must wait until the leadPointer advances
6273 // beyond the end of our next write region. We use the
6274 // Sleep() function to suspend operation until that happens.
6275 double millis = ( endWrite - leadPointer ) * 1000.0;
6276 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6277 if ( millis < 1.0 ) millis = 1.0;
6278 Sleep( (DWORD) millis );
6279 }
6280
6281 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6282 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6283 // We've strayed into the forbidden zone ... resync the read pointer.
6284 handle->xrun[0] = true;
6285 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6286 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6287 handle->bufferPointer[0] = nextWritePointer;
6288 endWrite = nextWritePointer + bufferBytes;
6289 }
6290
6291 // Lock free space in the buffer
6292 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6293 &bufferSize1, &buffer2, &bufferSize2, 0 );
6294 if ( FAILED( result ) ) {
6295 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6296 errorText_ = errorStream_.str();
6297 MUTEX_UNLOCK( &stream_.mutex );
6298 error( RtAudioError::SYSTEM_ERROR );
6299 return;
6300 }
6301
6302 // Copy our buffer into the DS buffer
6303 CopyMemory( buffer1, buffer, bufferSize1 );
6304 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6305
6306 // Update our buffer offset and unlock sound buffer
6307 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6308 if ( FAILED( result ) ) {
6309 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6310 errorText_ = errorStream_.str();
6311 MUTEX_UNLOCK( &stream_.mutex );
6312 error( RtAudioError::SYSTEM_ERROR );
6313 return;
6314 }
6315 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6316 handle->bufferPointer[0] = nextWritePointer;
6317 }
6318
6319 // Don't bother draining input
6320 if ( handle->drainCounter ) {
6321 handle->drainCounter++;
6322 goto unlock;
6323 }
6324
6325 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6326
6327 // Setup parameters.
6328 if ( stream_.doConvertBuffer[1] ) {
6329 buffer = stream_.deviceBuffer;
6330 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6331 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6332 }
6333 else {
6334 buffer = stream_.userBuffer[1];
6335 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6336 bufferBytes *= formatBytes( stream_.userFormat );
6337 }
6338
6339 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6340 long nextReadPointer = handle->bufferPointer[1];
6341 DWORD dsBufferSize = handle->dsBufferSize[1];
6342
6343 // Find out where the write and "safe read" pointers are.
6344 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6345 if ( FAILED( result ) ) {
6346 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6347 errorText_ = errorStream_.str();
6348 MUTEX_UNLOCK( &stream_.mutex );
6349 error( RtAudioError::SYSTEM_ERROR );
6350 return;
6351 }
6352
6353 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6354 DWORD endRead = nextReadPointer + bufferBytes;
6355
6356 // Handling depends on whether we are INPUT or DUPLEX.
6357 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6358 // then a wait here will drag the write pointers into the forbidden zone.
6359 //
6360 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6361 // it's in a safe position. This causes dropouts, but it seems to be the only
6362 // practical way to sync up the read and write pointers reliably, given the
6363 // the very complex relationship between phase and increment of the read and write
6364 // pointers.
6365 //
6366 // In order to minimize audible dropouts in DUPLEX mode, we will
6367 // provide a pre-roll period of 0.5 seconds in which we return
6368 // zeros from the read buffer while the pointers sync up.
6369
6370 if ( stream_.mode == DUPLEX ) {
6371 if ( safeReadPointer < endRead ) {
6372 if ( duplexPrerollBytes <= 0 ) {
6373 // Pre-roll time over. Be more agressive.
6374 int adjustment = endRead-safeReadPointer;
6375
6376 handle->xrun[1] = true;
6377 // Two cases:
6378 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6379 // and perform fine adjustments later.
6380 // - small adjustments: back off by twice as much.
6381 if ( adjustment >= 2*bufferBytes )
6382 nextReadPointer = safeReadPointer-2*bufferBytes;
6383 else
6384 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6385
6386 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6387
6388 }
6389 else {
6390 // In pre=roll time. Just do it.
6391 nextReadPointer = safeReadPointer - bufferBytes;
6392 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6393 }
6394 endRead = nextReadPointer + bufferBytes;
6395 }
6396 }
6397 else { // mode == INPUT
6398 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6399 // See comments for playback.
6400 double millis = (endRead - safeReadPointer) * 1000.0;
6401 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6402 if ( millis < 1.0 ) millis = 1.0;
6403 Sleep( (DWORD) millis );
6404
6405 // Wake up and find out where we are now.
6406 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6407 if ( FAILED( result ) ) {
6408 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6409 errorText_ = errorStream_.str();
6410 MUTEX_UNLOCK( &stream_.mutex );
6411 error( RtAudioError::SYSTEM_ERROR );
6412 return;
6413 }
6414
6415 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6416 }
6417 }
6418
6419 // Lock free space in the buffer
6420 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6421 &bufferSize1, &buffer2, &bufferSize2, 0 );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6424 errorText_ = errorStream_.str();
6425 MUTEX_UNLOCK( &stream_.mutex );
6426 error( RtAudioError::SYSTEM_ERROR );
6427 return;
6428 }
6429
6430 if ( duplexPrerollBytes <= 0 ) {
6431 // Copy our buffer into the DS buffer
6432 CopyMemory( buffer, buffer1, bufferSize1 );
6433 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6434 }
6435 else {
6436 memset( buffer, 0, bufferSize1 );
6437 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6438 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6439 }
6440
6441 // Update our buffer offset and unlock sound buffer
6442 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6443 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6444 if ( FAILED( result ) ) {
6445 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6446 errorText_ = errorStream_.str();
6447 MUTEX_UNLOCK( &stream_.mutex );
6448 error( RtAudioError::SYSTEM_ERROR );
6449 return;
6450 }
6451 handle->bufferPointer[1] = nextReadPointer;
6452
6453 // No byte swapping necessary in DirectSound implementation.
6454
6455 // If necessary, convert 8-bit data from unsigned to signed.
6456 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6457 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6458
6459 // Do buffer conversion if necessary.
6460 if ( stream_.doConvertBuffer[1] )
6461 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6462 }
6463
6464 unlock:
6465 MUTEX_UNLOCK( &stream_.mutex );
6466 RtApi::tickStreamTime();
6467 }
6468
6469 // Definitions for utility functions and callbacks
6470 // specific to the DirectSound implementation.
6471
callbackHandler(void * ptr)6472 static unsigned __stdcall callbackHandler( void *ptr )
6473 {
6474 CallbackInfo *info = (CallbackInfo *) ptr;
6475 RtApiDs *object = (RtApiDs *) info->object;
6476 bool* isRunning = &info->isRunning;
6477
6478 while ( *isRunning == true ) {
6479 object->callbackEvent();
6480 }
6481
6482 _endthreadex( 0 );
6483 return 0;
6484 }
6485
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6486 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6487 LPCTSTR description,
6488 LPCTSTR /*module*/,
6489 LPVOID lpContext )
6490 {
6491 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6492 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6493
6494 HRESULT hr;
6495 bool validDevice = false;
6496 if ( probeInfo.isInput == true ) {
6497 DSCCAPS caps;
6498 LPDIRECTSOUNDCAPTURE object;
6499
6500 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6501 if ( hr != DS_OK ) return TRUE;
6502
6503 caps.dwSize = sizeof(caps);
6504 hr = object->GetCaps( &caps );
6505 if ( hr == DS_OK ) {
6506 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6507 validDevice = true;
6508 }
6509 object->Release();
6510 }
6511 else {
6512 DSCAPS caps;
6513 LPDIRECTSOUND object;
6514 hr = DirectSoundCreate( lpguid, &object, NULL );
6515 if ( hr != DS_OK ) return TRUE;
6516
6517 caps.dwSize = sizeof(caps);
6518 hr = object->GetCaps( &caps );
6519 if ( hr == DS_OK ) {
6520 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6521 validDevice = true;
6522 }
6523 object->Release();
6524 }
6525
6526 // If good device, then save its name and guid.
6527 std::string name = convertCharPointerToStdString( description );
6528 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6529 if ( lpguid == NULL )
6530 name = "Default Device";
6531 if ( validDevice ) {
6532 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6533 if ( dsDevices[i].name == name ) {
6534 dsDevices[i].found = true;
6535 if ( probeInfo.isInput ) {
6536 dsDevices[i].id[1] = lpguid;
6537 dsDevices[i].validId[1] = true;
6538 }
6539 else {
6540 dsDevices[i].id[0] = lpguid;
6541 dsDevices[i].validId[0] = true;
6542 }
6543 return TRUE;
6544 }
6545 }
6546
6547 DsDevice device;
6548 device.name = name;
6549 device.found = true;
6550 if ( probeInfo.isInput ) {
6551 device.id[1] = lpguid;
6552 device.validId[1] = true;
6553 }
6554 else {
6555 device.id[0] = lpguid;
6556 device.validId[0] = true;
6557 }
6558 dsDevices.push_back( device );
6559 }
6560
6561 return TRUE;
6562 }
6563
getErrorString(int code)6564 static const char* getErrorString( int code )
6565 {
6566 switch ( code ) {
6567
6568 case DSERR_ALLOCATED:
6569 return "Already allocated";
6570
6571 case DSERR_CONTROLUNAVAIL:
6572 return "Control unavailable";
6573
6574 case DSERR_INVALIDPARAM:
6575 return "Invalid parameter";
6576
6577 case DSERR_INVALIDCALL:
6578 return "Invalid call";
6579
6580 case DSERR_GENERIC:
6581 return "Generic error";
6582
6583 case DSERR_PRIOLEVELNEEDED:
6584 return "Priority level needed";
6585
6586 case DSERR_OUTOFMEMORY:
6587 return "Out of memory";
6588
6589 case DSERR_BADFORMAT:
6590 return "The sample rate or the channel format is not supported";
6591
6592 case DSERR_UNSUPPORTED:
6593 return "Not supported";
6594
6595 case DSERR_NODRIVER:
6596 return "No driver";
6597
6598 case DSERR_ALREADYINITIALIZED:
6599 return "Already initialized";
6600
6601 case DSERR_NOAGGREGATION:
6602 return "No aggregation";
6603
6604 case DSERR_BUFFERLOST:
6605 return "Buffer lost";
6606
6607 case DSERR_OTHERAPPHASPRIO:
6608 return "Another application already has priority";
6609
6610 case DSERR_UNINITIALIZED:
6611 return "Uninitialized";
6612
6613 default:
6614 return "DirectSound unknown error";
6615 }
6616 }
6617 //******************** End of __WINDOWS_DS__ *********************//
6618 #endif
6619
6620
6621 #if defined(__LINUX_ALSA__)
6622
6623 #include <alsa/asoundlib.h>
6624 #include <unistd.h>
6625
6626 // A structure to hold various information related to the ALSA API
6627 // implementation.
6628 struct AlsaHandle {
6629 snd_pcm_t *handles[2];
6630 bool synchronized;
6631 bool xrun[2];
6632 pthread_cond_t runnable_cv;
6633 bool runnable;
6634
AlsaHandleAlsaHandle6635 AlsaHandle()
6636 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6637 };
6638
6639 static void *alsaCallbackHandler( void * ptr );
6640
RtApiAlsa()6641 RtApiAlsa :: RtApiAlsa()
6642 {
6643 // Nothing to do here.
6644 }
6645
~RtApiAlsa()6646 RtApiAlsa :: ~RtApiAlsa()
6647 {
6648 if ( stream_.state != STREAM_CLOSED ) closeStream();
6649 }
6650
getDeviceCount(void)6651 unsigned int RtApiAlsa :: getDeviceCount( void )
6652 {
6653 unsigned nDevices = 0;
6654 int result, subdevice, card;
6655 char name[64];
6656 snd_ctl_t *handle;
6657
6658 // Count cards and devices
6659 card = -1;
6660 snd_card_next( &card );
6661 while ( card >= 0 ) {
6662 sprintf( name, "hw:%d", card );
6663 result = snd_ctl_open( &handle, name, 0 );
6664 if ( result < 0 ) {
6665 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6666 errorText_ = errorStream_.str();
6667 error( RtAudioError::WARNING );
6668 goto nextcard;
6669 }
6670 subdevice = -1;
6671 while( 1 ) {
6672 result = snd_ctl_pcm_next_device( handle, &subdevice );
6673 if ( result < 0 ) {
6674 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6675 errorText_ = errorStream_.str();
6676 error( RtAudioError::WARNING );
6677 break;
6678 }
6679 if ( subdevice < 0 )
6680 break;
6681 nDevices++;
6682 }
6683 nextcard:
6684 if (result == 0) {
6685 snd_ctl_close( handle );
6686 }
6687 snd_card_next( &card );
6688 }
6689
6690 result = snd_ctl_open( &handle, "default", 0 );
6691 if (result == 0) {
6692 nDevices++;
6693 snd_ctl_close( handle );
6694 }
6695
6696 return nDevices;
6697 }
6698
getDeviceInfo(unsigned int device)6699 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6700 {
6701 RtAudio::DeviceInfo info;
6702 info.probed = false;
6703
6704 unsigned nDevices = 0;
6705 int result, subdevice, card;
6706 char name[64];
6707 snd_ctl_t *chandle;
6708
6709 // Count cards and devices
6710 card = -1;
6711 subdevice = -1;
6712 snd_card_next( &card );
6713 while ( card >= 0 ) {
6714 sprintf( name, "hw:%d", card );
6715 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6716 if ( result < 0 ) {
6717 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6718 errorText_ = errorStream_.str();
6719 error( RtAudioError::WARNING );
6720 goto nextcard;
6721 }
6722 subdevice = -1;
6723 while( 1 ) {
6724 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6725 if ( result < 0 ) {
6726 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6727 errorText_ = errorStream_.str();
6728 error( RtAudioError::WARNING );
6729 break;
6730 }
6731 if ( subdevice < 0 ) break;
6732 if ( nDevices == device ) {
6733 sprintf( name, "hw:%d,%d", card, subdevice );
6734 goto foundDevice;
6735 }
6736 nDevices++;
6737 }
6738 nextcard:
6739 snd_ctl_close( chandle );
6740 snd_card_next( &card );
6741 }
6742
6743 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6744 if ( result == 0 ) {
6745 if ( nDevices == device ) {
6746 strcpy( name, "default" );
6747 goto foundDevice;
6748 }
6749 nDevices++;
6750 }
6751
6752 if ( nDevices == 0 ) {
6753 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6754 error( RtAudioError::INVALID_USE );
6755 return info;
6756 }
6757
6758 if ( device >= nDevices ) {
6759 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6760 error( RtAudioError::INVALID_USE );
6761 return info;
6762 }
6763
6764 foundDevice:
6765
6766 // If a stream is already open, we cannot probe the stream devices.
6767 // Thus, use the saved results.
6768 if ( stream_.state != STREAM_CLOSED &&
6769 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6770 snd_ctl_close( chandle );
6771 if ( device >= devices_.size() ) {
6772 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6773 error( RtAudioError::WARNING );
6774 return info;
6775 }
6776 return devices_[ device ];
6777 }
6778
6779 int openMode = SND_PCM_ASYNC;
6780 snd_pcm_stream_t stream;
6781 snd_pcm_info_t *pcminfo;
6782 snd_pcm_info_alloca( &pcminfo );
6783 snd_pcm_t *phandle;
6784 snd_pcm_hw_params_t *params;
6785 snd_pcm_hw_params_alloca( ¶ms );
6786
6787 // First try for playback unless default device (which has subdev -1)
6788 stream = SND_PCM_STREAM_PLAYBACK;
6789 snd_pcm_info_set_stream( pcminfo, stream );
6790 if ( subdevice != -1 ) {
6791 snd_pcm_info_set_device( pcminfo, subdevice );
6792 snd_pcm_info_set_subdevice( pcminfo, 0 );
6793
6794 result = snd_ctl_pcm_info( chandle, pcminfo );
6795 if ( result < 0 ) {
6796 // Device probably doesn't support playback.
6797 goto captureProbe;
6798 }
6799 }
6800
6801 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6802 if ( result < 0 ) {
6803 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6804 errorText_ = errorStream_.str();
6805 error( RtAudioError::WARNING );
6806 goto captureProbe;
6807 }
6808
6809 // The device is open ... fill the parameter structure.
6810 result = snd_pcm_hw_params_any( phandle, params );
6811 if ( result < 0 ) {
6812 snd_pcm_close( phandle );
6813 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6814 errorText_ = errorStream_.str();
6815 error( RtAudioError::WARNING );
6816 goto captureProbe;
6817 }
6818
6819 // Get output channel information.
6820 unsigned int value;
6821 result = snd_pcm_hw_params_get_channels_max( params, &value );
6822 if ( result < 0 ) {
6823 snd_pcm_close( phandle );
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6825 errorText_ = errorStream_.str();
6826 error( RtAudioError::WARNING );
6827 goto captureProbe;
6828 }
6829 info.outputChannels = value;
6830 snd_pcm_close( phandle );
6831
6832 captureProbe:
6833 stream = SND_PCM_STREAM_CAPTURE;
6834 snd_pcm_info_set_stream( pcminfo, stream );
6835
6836 // Now try for capture unless default device (with subdev = -1)
6837 if ( subdevice != -1 ) {
6838 result = snd_ctl_pcm_info( chandle, pcminfo );
6839 snd_ctl_close( chandle );
6840 if ( result < 0 ) {
6841 // Device probably doesn't support capture.
6842 if ( info.outputChannels == 0 ) return info;
6843 goto probeParameters;
6844 }
6845 }
6846 else
6847 snd_ctl_close( chandle );
6848
6849 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6850 if ( result < 0 ) {
6851 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6852 errorText_ = errorStream_.str();
6853 error( RtAudioError::WARNING );
6854 if ( info.outputChannels == 0 ) return info;
6855 goto probeParameters;
6856 }
6857
6858 // The device is open ... fill the parameter structure.
6859 result = snd_pcm_hw_params_any( phandle, params );
6860 if ( result < 0 ) {
6861 snd_pcm_close( phandle );
6862 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6863 errorText_ = errorStream_.str();
6864 error( RtAudioError::WARNING );
6865 if ( info.outputChannels == 0 ) return info;
6866 goto probeParameters;
6867 }
6868
6869 result = snd_pcm_hw_params_get_channels_max( params, &value );
6870 if ( result < 0 ) {
6871 snd_pcm_close( phandle );
6872 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6873 errorText_ = errorStream_.str();
6874 error( RtAudioError::WARNING );
6875 if ( info.outputChannels == 0 ) return info;
6876 goto probeParameters;
6877 }
6878 info.inputChannels = value;
6879 snd_pcm_close( phandle );
6880
6881 // If device opens for both playback and capture, we determine the channels.
6882 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6883 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6884
6885 // ALSA doesn't provide default devices so we'll use the first available one.
6886 if ( device == 0 && info.outputChannels > 0 )
6887 info.isDefaultOutput = true;
6888 if ( device == 0 && info.inputChannels > 0 )
6889 info.isDefaultInput = true;
6890
6891 probeParameters:
6892 // At this point, we just need to figure out the supported data
6893 // formats and sample rates. We'll proceed by opening the device in
6894 // the direction with the maximum number of channels, or playback if
6895 // they are equal. This might limit our sample rate options, but so
6896 // be it.
6897
6898 if ( info.outputChannels >= info.inputChannels )
6899 stream = SND_PCM_STREAM_PLAYBACK;
6900 else
6901 stream = SND_PCM_STREAM_CAPTURE;
6902 snd_pcm_info_set_stream( pcminfo, stream );
6903
6904 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6905 if ( result < 0 ) {
6906 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6907 errorText_ = errorStream_.str();
6908 error( RtAudioError::WARNING );
6909 return info;
6910 }
6911
6912 // The device is open ... fill the parameter structure.
6913 result = snd_pcm_hw_params_any( phandle, params );
6914 if ( result < 0 ) {
6915 snd_pcm_close( phandle );
6916 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6917 errorText_ = errorStream_.str();
6918 error( RtAudioError::WARNING );
6919 return info;
6920 }
6921
6922 // Test our discrete set of sample rate values.
6923 info.sampleRates.clear();
6924 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
6925 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
6926 info.sampleRates.push_back( SAMPLE_RATES[i] );
6927
6928 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
6929 info.preferredSampleRate = SAMPLE_RATES[i];
6930 }
6931 }
6932 if ( info.sampleRates.size() == 0 ) {
6933 snd_pcm_close( phandle );
6934 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
6935 errorText_ = errorStream_.str();
6936 error( RtAudioError::WARNING );
6937 return info;
6938 }
6939
6940 // Probe the supported data formats ... we don't care about endian-ness just yet
6941 snd_pcm_format_t format;
6942 info.nativeFormats = 0;
6943 format = SND_PCM_FORMAT_S8;
6944 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6945 info.nativeFormats |= RTAUDIO_SINT8;
6946 format = SND_PCM_FORMAT_S16;
6947 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6948 info.nativeFormats |= RTAUDIO_SINT16;
6949 format = SND_PCM_FORMAT_S24;
6950 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6951 info.nativeFormats |= RTAUDIO_SINT24;
6952 format = SND_PCM_FORMAT_S32;
6953 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6954 info.nativeFormats |= RTAUDIO_SINT32;
6955 format = SND_PCM_FORMAT_FLOAT;
6956 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6957 info.nativeFormats |= RTAUDIO_FLOAT32;
6958 format = SND_PCM_FORMAT_FLOAT64;
6959 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
6960 info.nativeFormats |= RTAUDIO_FLOAT64;
6961
6962 // Check that we have at least one supported format
6963 if ( info.nativeFormats == 0 ) {
6964 snd_pcm_close( phandle );
6965 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
6966 errorText_ = errorStream_.str();
6967 error( RtAudioError::WARNING );
6968 return info;
6969 }
6970
6971 // Get the device name
6972 char *cardname;
6973 result = snd_card_get_name( card, &cardname );
6974 if ( result >= 0 ) {
6975 sprintf( name, "hw:%s,%d", cardname, subdevice );
6976 free( cardname );
6977 }
6978 info.name = name;
6979
6980 // That's all ... close the device and return
6981 snd_pcm_close( phandle );
6982 info.probed = true;
6983 return info;
6984 }
6985
saveDeviceInfo(void)6986 void RtApiAlsa :: saveDeviceInfo( void )
6987 {
6988 devices_.clear();
6989
6990 unsigned int nDevices = getDeviceCount();
6991 devices_.resize( nDevices );
6992 for ( unsigned int i=0; i<nDevices; i++ )
6993 devices_[i] = getDeviceInfo( i );
6994 }
6995
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)6996 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
6997 unsigned int firstChannel, unsigned int sampleRate,
6998 RtAudioFormat format, unsigned int *bufferSize,
6999 RtAudio::StreamOptions *options )
7000
7001 {
7002 #if defined(__RTAUDIO_DEBUG__)
7003 snd_output_t *out;
7004 snd_output_stdio_attach(&out, stderr, 0);
7005 #endif
7006
7007 // I'm not using the "plug" interface ... too much inconsistent behavior.
7008
7009 unsigned nDevices = 0;
7010 int result, subdevice, card;
7011 char name[64];
7012 snd_ctl_t *chandle;
7013
7014 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7015 snprintf(name, sizeof(name), "%s", "default");
7016 else {
7017 // Count cards and devices
7018 card = -1;
7019 snd_card_next( &card );
7020 while ( card >= 0 ) {
7021 sprintf( name, "hw:%d", card );
7022 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7023 if ( result < 0 ) {
7024 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7025 errorText_ = errorStream_.str();
7026 return FAILURE;
7027 }
7028 subdevice = -1;
7029 while( 1 ) {
7030 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7031 if ( result < 0 ) break;
7032 if ( subdevice < 0 ) break;
7033 if ( nDevices == device ) {
7034 sprintf( name, "hw:%d,%d", card, subdevice );
7035 snd_ctl_close( chandle );
7036 goto foundDevice;
7037 }
7038 nDevices++;
7039 }
7040 snd_ctl_close( chandle );
7041 snd_card_next( &card );
7042 }
7043
7044 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7045 if ( result == 0 ) {
7046 if ( nDevices == device ) {
7047 strcpy( name, "default" );
7048 goto foundDevice;
7049 }
7050 nDevices++;
7051 }
7052
7053 if ( nDevices == 0 ) {
7054 // This should not happen because a check is made before this function is called.
7055 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7056 return FAILURE;
7057 }
7058
7059 if ( device >= nDevices ) {
7060 // This should not happen because a check is made before this function is called.
7061 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7062 return FAILURE;
7063 }
7064 }
7065
7066 foundDevice:
7067
7068 // The getDeviceInfo() function will not work for a device that is
7069 // already open. Thus, we'll probe the system before opening a
7070 // stream and save the results for use by getDeviceInfo().
7071 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7072 this->saveDeviceInfo();
7073
7074 snd_pcm_stream_t stream;
7075 if ( mode == OUTPUT )
7076 stream = SND_PCM_STREAM_PLAYBACK;
7077 else
7078 stream = SND_PCM_STREAM_CAPTURE;
7079
7080 snd_pcm_t *phandle;
7081 int openMode = SND_PCM_ASYNC;
7082 result = snd_pcm_open( &phandle, name, stream, openMode );
7083 if ( result < 0 ) {
7084 if ( mode == OUTPUT )
7085 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7086 else
7087 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7088 errorText_ = errorStream_.str();
7089 return FAILURE;
7090 }
7091
7092 // Fill the parameter structure.
7093 snd_pcm_hw_params_t *hw_params;
7094 snd_pcm_hw_params_alloca( &hw_params );
7095 result = snd_pcm_hw_params_any( phandle, hw_params );
7096 if ( result < 0 ) {
7097 snd_pcm_close( phandle );
7098 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7099 errorText_ = errorStream_.str();
7100 return FAILURE;
7101 }
7102
7103 #if defined(__RTAUDIO_DEBUG__)
7104 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7105 snd_pcm_hw_params_dump( hw_params, out );
7106 #endif
7107
7108 // Set access ... check user preference.
7109 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7110 stream_.userInterleaved = false;
7111 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7112 if ( result < 0 ) {
7113 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7114 stream_.deviceInterleaved[mode] = true;
7115 }
7116 else
7117 stream_.deviceInterleaved[mode] = false;
7118 }
7119 else {
7120 stream_.userInterleaved = true;
7121 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7122 if ( result < 0 ) {
7123 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7124 stream_.deviceInterleaved[mode] = false;
7125 }
7126 else
7127 stream_.deviceInterleaved[mode] = true;
7128 }
7129
7130 if ( result < 0 ) {
7131 snd_pcm_close( phandle );
7132 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7133 errorText_ = errorStream_.str();
7134 return FAILURE;
7135 }
7136
7137 // Determine how to set the device format.
7138 stream_.userFormat = format;
7139 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7140
7141 if ( format == RTAUDIO_SINT8 )
7142 deviceFormat = SND_PCM_FORMAT_S8;
7143 else if ( format == RTAUDIO_SINT16 )
7144 deviceFormat = SND_PCM_FORMAT_S16;
7145 else if ( format == RTAUDIO_SINT24 )
7146 deviceFormat = SND_PCM_FORMAT_S24;
7147 else if ( format == RTAUDIO_SINT32 )
7148 deviceFormat = SND_PCM_FORMAT_S32;
7149 else if ( format == RTAUDIO_FLOAT32 )
7150 deviceFormat = SND_PCM_FORMAT_FLOAT;
7151 else if ( format == RTAUDIO_FLOAT64 )
7152 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7153
7154 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7155 stream_.deviceFormat[mode] = format;
7156 goto setFormat;
7157 }
7158
7159 // The user requested format is not natively supported by the device.
7160 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7161 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7162 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7163 goto setFormat;
7164 }
7165
7166 deviceFormat = SND_PCM_FORMAT_FLOAT;
7167 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7168 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7169 goto setFormat;
7170 }
7171
7172 deviceFormat = SND_PCM_FORMAT_S32;
7173 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7174 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7175 goto setFormat;
7176 }
7177
7178 deviceFormat = SND_PCM_FORMAT_S24;
7179 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7180 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7181 goto setFormat;
7182 }
7183
7184 deviceFormat = SND_PCM_FORMAT_S16;
7185 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7186 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7187 goto setFormat;
7188 }
7189
7190 deviceFormat = SND_PCM_FORMAT_S8;
7191 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7192 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7193 goto setFormat;
7194 }
7195
7196 // If we get here, no supported format was found.
7197 snd_pcm_close( phandle );
7198 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7199 errorText_ = errorStream_.str();
7200 return FAILURE;
7201
7202 setFormat:
7203 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7204 if ( result < 0 ) {
7205 snd_pcm_close( phandle );
7206 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7207 errorText_ = errorStream_.str();
7208 return FAILURE;
7209 }
7210
7211 // Determine whether byte-swaping is necessary.
7212 stream_.doByteSwap[mode] = false;
7213 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7214 result = snd_pcm_format_cpu_endian( deviceFormat );
7215 if ( result == 0 )
7216 stream_.doByteSwap[mode] = true;
7217 else if (result < 0) {
7218 snd_pcm_close( phandle );
7219 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7220 errorText_ = errorStream_.str();
7221 return FAILURE;
7222 }
7223 }
7224
7225 // Set the sample rate.
7226 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7227 if ( result < 0 ) {
7228 snd_pcm_close( phandle );
7229 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7230 errorText_ = errorStream_.str();
7231 return FAILURE;
7232 }
7233
7234 // Determine the number of channels for this device. We support a possible
7235 // minimum device channel number > than the value requested by the user.
7236 stream_.nUserChannels[mode] = channels;
7237 unsigned int value;
7238 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7239 unsigned int deviceChannels = value;
7240 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7241 snd_pcm_close( phandle );
7242 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7243 errorText_ = errorStream_.str();
7244 return FAILURE;
7245 }
7246
7247 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7248 if ( result < 0 ) {
7249 snd_pcm_close( phandle );
7250 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7251 errorText_ = errorStream_.str();
7252 return FAILURE;
7253 }
7254 deviceChannels = value;
7255 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7256 stream_.nDeviceChannels[mode] = deviceChannels;
7257
7258 // Set the device channels.
7259 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7260 if ( result < 0 ) {
7261 snd_pcm_close( phandle );
7262 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7263 errorText_ = errorStream_.str();
7264 return FAILURE;
7265 }
7266
7267 // Set the buffer (or period) size.
7268 int dir = 0;
7269 snd_pcm_uframes_t periodSize = *bufferSize;
7270 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7271 if ( result < 0 ) {
7272 snd_pcm_close( phandle );
7273 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7274 errorText_ = errorStream_.str();
7275 return FAILURE;
7276 }
7277 *bufferSize = periodSize;
7278
7279 // Set the buffer number, which in ALSA is referred to as the "period".
7280 unsigned int periods = 0;
7281 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7282 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7283 if ( periods < 2 ) periods = 4; // a fairly safe default value
7284 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7285 if ( result < 0 ) {
7286 snd_pcm_close( phandle );
7287 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7288 errorText_ = errorStream_.str();
7289 return FAILURE;
7290 }
7291
7292 // If attempting to setup a duplex stream, the bufferSize parameter
7293 // MUST be the same in both directions!
7294 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7295 snd_pcm_close( phandle );
7296 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7297 errorText_ = errorStream_.str();
7298 return FAILURE;
7299 }
7300
7301 stream_.bufferSize = *bufferSize;
7302
7303 // Install the hardware configuration
7304 result = snd_pcm_hw_params( phandle, hw_params );
7305 if ( result < 0 ) {
7306 snd_pcm_close( phandle );
7307 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7309 return FAILURE;
7310 }
7311
7312 #if defined(__RTAUDIO_DEBUG__)
7313 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7314 snd_pcm_hw_params_dump( hw_params, out );
7315 #endif
7316
7317 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7318 snd_pcm_sw_params_t *sw_params = NULL;
7319 snd_pcm_sw_params_alloca( &sw_params );
7320 snd_pcm_sw_params_current( phandle, sw_params );
7321 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7322 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7323 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7324
7325 // The following two settings were suggested by Theo Veenker
7326 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7327 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7328
7329 // here are two options for a fix
7330 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7331 snd_pcm_uframes_t val;
7332 snd_pcm_sw_params_get_boundary( sw_params, &val );
7333 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7334
7335 result = snd_pcm_sw_params( phandle, sw_params );
7336 if ( result < 0 ) {
7337 snd_pcm_close( phandle );
7338 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7339 errorText_ = errorStream_.str();
7340 return FAILURE;
7341 }
7342
7343 #if defined(__RTAUDIO_DEBUG__)
7344 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7345 snd_pcm_sw_params_dump( sw_params, out );
7346 #endif
7347
7348 // Set flags for buffer conversion
7349 stream_.doConvertBuffer[mode] = false;
7350 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7351 stream_.doConvertBuffer[mode] = true;
7352 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7353 stream_.doConvertBuffer[mode] = true;
7354 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7355 stream_.nUserChannels[mode] > 1 )
7356 stream_.doConvertBuffer[mode] = true;
7357
7358 // Allocate the ApiHandle if necessary and then save.
7359 AlsaHandle *apiInfo = 0;
7360 if ( stream_.apiHandle == 0 ) {
7361 try {
7362 apiInfo = (AlsaHandle *) new AlsaHandle;
7363 }
7364 catch ( std::bad_alloc& ) {
7365 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7366 goto error;
7367 }
7368
7369 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7370 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7371 goto error;
7372 }
7373
7374 stream_.apiHandle = (void *) apiInfo;
7375 apiInfo->handles[0] = 0;
7376 apiInfo->handles[1] = 0;
7377 }
7378 else {
7379 apiInfo = (AlsaHandle *) stream_.apiHandle;
7380 }
7381 apiInfo->handles[mode] = phandle;
7382 phandle = 0;
7383
7384 // Allocate necessary internal buffers.
7385 unsigned long bufferBytes;
7386 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7387 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7388 if ( stream_.userBuffer[mode] == NULL ) {
7389 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7390 goto error;
7391 }
7392
7393 if ( stream_.doConvertBuffer[mode] ) {
7394
7395 bool makeBuffer = true;
7396 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7397 if ( mode == INPUT ) {
7398 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7399 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7400 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7401 }
7402 }
7403
7404 if ( makeBuffer ) {
7405 bufferBytes *= *bufferSize;
7406 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7407 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7408 if ( stream_.deviceBuffer == NULL ) {
7409 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7410 goto error;
7411 }
7412 }
7413 }
7414
7415 stream_.sampleRate = sampleRate;
7416 stream_.nBuffers = periods;
7417 stream_.device[mode] = device;
7418 stream_.state = STREAM_STOPPED;
7419
7420 // Setup the buffer conversion information structure.
7421 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7422
7423 // Setup thread if necessary.
7424 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7425 // We had already set up an output stream.
7426 stream_.mode = DUPLEX;
7427 // Link the streams if possible.
7428 apiInfo->synchronized = false;
7429 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7430 apiInfo->synchronized = true;
7431 else {
7432 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7433 error( RtAudioError::WARNING );
7434 }
7435 }
7436 else {
7437 stream_.mode = mode;
7438
7439 // Setup callback thread.
7440 stream_.callbackInfo.object = (void *) this;
7441
7442 // Set the thread attributes for joinable and realtime scheduling
7443 // priority (optional). The higher priority will only take affect
7444 // if the program is run as root or suid. Note, under Linux
7445 // processes with CAP_SYS_NICE privilege, a user can change
7446 // scheduling policy and priority (thus need not be root). See
7447 // POSIX "capabilities".
7448 pthread_attr_t attr;
7449 pthread_attr_init( &attr );
7450 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7451
7452 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7453 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7454 // We previously attempted to increase the audio callback priority
7455 // to SCHED_RR here via the attributes. However, while no errors
7456 // were reported in doing so, it did not work. So, now this is
7457 // done in the alsaCallbackHandler function.
7458 stream_.callbackInfo.doRealtime = true;
7459 int priority = options->priority;
7460 int min = sched_get_priority_min( SCHED_RR );
7461 int max = sched_get_priority_max( SCHED_RR );
7462 if ( priority < min ) priority = min;
7463 else if ( priority > max ) priority = max;
7464 stream_.callbackInfo.priority = priority;
7465 }
7466 #endif
7467
7468 stream_.callbackInfo.isRunning = true;
7469 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7470 pthread_attr_destroy( &attr );
7471 if ( result ) {
7472 stream_.callbackInfo.isRunning = false;
7473 errorText_ = "RtApiAlsa::error creating callback thread!";
7474 goto error;
7475 }
7476 }
7477
7478 return SUCCESS;
7479
7480 error:
7481 if ( apiInfo ) {
7482 pthread_cond_destroy( &apiInfo->runnable_cv );
7483 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7484 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7485 delete apiInfo;
7486 stream_.apiHandle = 0;
7487 }
7488
7489 if ( phandle) snd_pcm_close( phandle );
7490
7491 for ( int i=0; i<2; i++ ) {
7492 if ( stream_.userBuffer[i] ) {
7493 free( stream_.userBuffer[i] );
7494 stream_.userBuffer[i] = 0;
7495 }
7496 }
7497
7498 if ( stream_.deviceBuffer ) {
7499 free( stream_.deviceBuffer );
7500 stream_.deviceBuffer = 0;
7501 }
7502
7503 stream_.state = STREAM_CLOSED;
7504 return FAILURE;
7505 }
7506
closeStream()7507 void RtApiAlsa :: closeStream()
7508 {
7509 if ( stream_.state == STREAM_CLOSED ) {
7510 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7511 error( RtAudioError::WARNING );
7512 return;
7513 }
7514
7515 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7516 stream_.callbackInfo.isRunning = false;
7517 MUTEX_LOCK( &stream_.mutex );
7518 if ( stream_.state == STREAM_STOPPED ) {
7519 apiInfo->runnable = true;
7520 pthread_cond_signal( &apiInfo->runnable_cv );
7521 }
7522 MUTEX_UNLOCK( &stream_.mutex );
7523 pthread_join( stream_.callbackInfo.thread, NULL );
7524
7525 if ( stream_.state == STREAM_RUNNING ) {
7526 stream_.state = STREAM_STOPPED;
7527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7528 snd_pcm_drop( apiInfo->handles[0] );
7529 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7530 snd_pcm_drop( apiInfo->handles[1] );
7531 }
7532
7533 if ( apiInfo ) {
7534 pthread_cond_destroy( &apiInfo->runnable_cv );
7535 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7536 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7537 delete apiInfo;
7538 stream_.apiHandle = 0;
7539 }
7540
7541 for ( int i=0; i<2; i++ ) {
7542 if ( stream_.userBuffer[i] ) {
7543 free( stream_.userBuffer[i] );
7544 stream_.userBuffer[i] = 0;
7545 }
7546 }
7547
7548 if ( stream_.deviceBuffer ) {
7549 free( stream_.deviceBuffer );
7550 stream_.deviceBuffer = 0;
7551 }
7552
7553 stream_.mode = UNINITIALIZED;
7554 stream_.state = STREAM_CLOSED;
7555 }
7556
startStream()7557 void RtApiAlsa :: startStream()
7558 {
7559 // This method calls snd_pcm_prepare if the device isn't already in that state.
7560
7561 verifyStream();
7562 if ( stream_.state == STREAM_RUNNING ) {
7563 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7564 error( RtAudioError::WARNING );
7565 return;
7566 }
7567
7568 MUTEX_LOCK( &stream_.mutex );
7569
7570 int result = 0;
7571 snd_pcm_state_t state;
7572 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7573 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7574 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7575 state = snd_pcm_state( handle[0] );
7576 if ( state != SND_PCM_STATE_PREPARED ) {
7577 result = snd_pcm_prepare( handle[0] );
7578 if ( result < 0 ) {
7579 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7580 errorText_ = errorStream_.str();
7581 goto unlock;
7582 }
7583 }
7584 }
7585
7586 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7587 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7588 state = snd_pcm_state( handle[1] );
7589 if ( state != SND_PCM_STATE_PREPARED ) {
7590 result = snd_pcm_prepare( handle[1] );
7591 if ( result < 0 ) {
7592 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7593 errorText_ = errorStream_.str();
7594 goto unlock;
7595 }
7596 }
7597 }
7598
7599 stream_.state = STREAM_RUNNING;
7600
7601 unlock:
7602 apiInfo->runnable = true;
7603 pthread_cond_signal( &apiInfo->runnable_cv );
7604 MUTEX_UNLOCK( &stream_.mutex );
7605
7606 if ( result >= 0 ) return;
7607 error( RtAudioError::SYSTEM_ERROR );
7608 }
7609
stopStream()7610 void RtApiAlsa :: stopStream()
7611 {
7612 verifyStream();
7613 if ( stream_.state == STREAM_STOPPED ) {
7614 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7615 error( RtAudioError::WARNING );
7616 return;
7617 }
7618
7619 stream_.state = STREAM_STOPPED;
7620 MUTEX_LOCK( &stream_.mutex );
7621
7622 int result = 0;
7623 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7624 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7625 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7626 if ( apiInfo->synchronized )
7627 result = snd_pcm_drop( handle[0] );
7628 else
7629 result = snd_pcm_drain( handle[0] );
7630 if ( result < 0 ) {
7631 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7632 errorText_ = errorStream_.str();
7633 goto unlock;
7634 }
7635 }
7636
7637 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7638 result = snd_pcm_drop( handle[1] );
7639 if ( result < 0 ) {
7640 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7641 errorText_ = errorStream_.str();
7642 goto unlock;
7643 }
7644 }
7645
7646 unlock:
7647 apiInfo->runnable = false; // fixes high CPU usage when stopped
7648 MUTEX_UNLOCK( &stream_.mutex );
7649
7650 if ( result >= 0 ) return;
7651 error( RtAudioError::SYSTEM_ERROR );
7652 }
7653
abortStream()7654 void RtApiAlsa :: abortStream()
7655 {
7656 verifyStream();
7657 if ( stream_.state == STREAM_STOPPED ) {
7658 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7659 error( RtAudioError::WARNING );
7660 return;
7661 }
7662
7663 stream_.state = STREAM_STOPPED;
7664 MUTEX_LOCK( &stream_.mutex );
7665
7666 int result = 0;
7667 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7668 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7669 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7670 result = snd_pcm_drop( handle[0] );
7671 if ( result < 0 ) {
7672 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7673 errorText_ = errorStream_.str();
7674 goto unlock;
7675 }
7676 }
7677
7678 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7679 result = snd_pcm_drop( handle[1] );
7680 if ( result < 0 ) {
7681 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7682 errorText_ = errorStream_.str();
7683 goto unlock;
7684 }
7685 }
7686
7687 unlock:
7688 apiInfo->runnable = false; // fixes high CPU usage when stopped
7689 MUTEX_UNLOCK( &stream_.mutex );
7690
7691 if ( result >= 0 ) return;
7692 error( RtAudioError::SYSTEM_ERROR );
7693 }
7694
callbackEvent()7695 void RtApiAlsa :: callbackEvent()
7696 {
7697 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7698 if ( stream_.state == STREAM_STOPPED ) {
7699 MUTEX_LOCK( &stream_.mutex );
7700 while ( !apiInfo->runnable )
7701 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7702
7703 if ( stream_.state != STREAM_RUNNING ) {
7704 MUTEX_UNLOCK( &stream_.mutex );
7705 return;
7706 }
7707 MUTEX_UNLOCK( &stream_.mutex );
7708 }
7709
7710 if ( stream_.state == STREAM_CLOSED ) {
7711 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7712 error( RtAudioError::WARNING );
7713 return;
7714 }
7715
7716 int doStopStream = 0;
7717 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7718 double streamTime = getStreamTime();
7719 RtAudioStreamStatus status = 0;
7720 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7721 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7722 apiInfo->xrun[0] = false;
7723 }
7724 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7725 status |= RTAUDIO_INPUT_OVERFLOW;
7726 apiInfo->xrun[1] = false;
7727 }
7728 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7729 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7730
7731 if ( doStopStream == 2 ) {
7732 abortStream();
7733 return;
7734 }
7735
7736 MUTEX_LOCK( &stream_.mutex );
7737
7738 // The state might change while waiting on a mutex.
7739 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7740
7741 int result;
7742 char *buffer;
7743 int channels;
7744 snd_pcm_t **handle;
7745 snd_pcm_sframes_t frames;
7746 RtAudioFormat format;
7747 handle = (snd_pcm_t **) apiInfo->handles;
7748
7749 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7750
7751 // Setup parameters.
7752 if ( stream_.doConvertBuffer[1] ) {
7753 buffer = stream_.deviceBuffer;
7754 channels = stream_.nDeviceChannels[1];
7755 format = stream_.deviceFormat[1];
7756 }
7757 else {
7758 buffer = stream_.userBuffer[1];
7759 channels = stream_.nUserChannels[1];
7760 format = stream_.userFormat;
7761 }
7762
7763 // Read samples from device in interleaved/non-interleaved format.
7764 if ( stream_.deviceInterleaved[1] )
7765 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7766 else {
7767 void *bufs[channels];
7768 size_t offset = stream_.bufferSize * formatBytes( format );
7769 for ( int i=0; i<channels; i++ )
7770 bufs[i] = (void *) (buffer + (i * offset));
7771 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7772 }
7773
7774 if ( result < (int) stream_.bufferSize ) {
7775 // Either an error or overrun occured.
7776 if ( result == -EPIPE ) {
7777 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7778 if ( state == SND_PCM_STATE_XRUN ) {
7779 apiInfo->xrun[1] = true;
7780 result = snd_pcm_prepare( handle[1] );
7781 if ( result < 0 ) {
7782 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7783 errorText_ = errorStream_.str();
7784 }
7785 }
7786 else {
7787 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7788 errorText_ = errorStream_.str();
7789 }
7790 }
7791 else {
7792 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7793 errorText_ = errorStream_.str();
7794 }
7795 error( RtAudioError::WARNING );
7796 goto tryOutput;
7797 }
7798
7799 // Do byte swapping if necessary.
7800 if ( stream_.doByteSwap[1] )
7801 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7802
7803 // Do buffer conversion if necessary.
7804 if ( stream_.doConvertBuffer[1] )
7805 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7806
7807 // Check stream latency
7808 result = snd_pcm_delay( handle[1], &frames );
7809 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7810 }
7811
7812 tryOutput:
7813
7814 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7815
7816 // Setup parameters and do buffer conversion if necessary.
7817 if ( stream_.doConvertBuffer[0] ) {
7818 buffer = stream_.deviceBuffer;
7819 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7820 channels = stream_.nDeviceChannels[0];
7821 format = stream_.deviceFormat[0];
7822 }
7823 else {
7824 buffer = stream_.userBuffer[0];
7825 channels = stream_.nUserChannels[0];
7826 format = stream_.userFormat;
7827 }
7828
7829 // Do byte swapping if necessary.
7830 if ( stream_.doByteSwap[0] )
7831 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7832
7833 // Write samples to device in interleaved/non-interleaved format.
7834 if ( stream_.deviceInterleaved[0] )
7835 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7836 else {
7837 void *bufs[channels];
7838 size_t offset = stream_.bufferSize * formatBytes( format );
7839 for ( int i=0; i<channels; i++ )
7840 bufs[i] = (void *) (buffer + (i * offset));
7841 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7842 }
7843
7844 if ( result < (int) stream_.bufferSize ) {
7845 // Either an error or underrun occured.
7846 if ( result == -EPIPE ) {
7847 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7848 if ( state == SND_PCM_STATE_XRUN ) {
7849 apiInfo->xrun[0] = true;
7850 result = snd_pcm_prepare( handle[0] );
7851 if ( result < 0 ) {
7852 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7853 errorText_ = errorStream_.str();
7854 }
7855 else
7856 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
7857 }
7858 else {
7859 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7860 errorText_ = errorStream_.str();
7861 }
7862 }
7863 else {
7864 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7865 errorText_ = errorStream_.str();
7866 }
7867 error( RtAudioError::WARNING );
7868 goto unlock;
7869 }
7870
7871 // Check stream latency
7872 result = snd_pcm_delay( handle[0], &frames );
7873 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
7874 }
7875
7876 unlock:
7877 MUTEX_UNLOCK( &stream_.mutex );
7878
7879 RtApi::tickStreamTime();
7880 if ( doStopStream == 1 ) this->stopStream();
7881 }
7882
alsaCallbackHandler(void * ptr)7883 static void *alsaCallbackHandler( void *ptr )
7884 {
7885 CallbackInfo *info = (CallbackInfo *) ptr;
7886 RtApiAlsa *object = (RtApiAlsa *) info->object;
7887 bool *isRunning = &info->isRunning;
7888
7889 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7890 if ( info->doRealtime ) {
7891 pthread_t tID = pthread_self(); // ID of this thread
7892 sched_param prio = { info->priority }; // scheduling priority of thread
7893 pthread_setschedparam( tID, SCHED_RR, &prio );
7894 }
7895 #endif
7896
7897 while ( *isRunning == true ) {
7898 pthread_testcancel();
7899 object->callbackEvent();
7900 }
7901
7902 pthread_exit( NULL );
7903
7904 return NULL;
7905 }
7906
7907 //******************** End of __LINUX_ALSA__ *********************//
7908 #endif
7909
7910 #if defined(__UNIX_PULSE__)
7911
7912 // Code written by Peter Meerwald, pmeerw@pmeerw.net
7913 // and Tristan Matthews.
7914
7915 #include <pulse/error.h>
7916 #include <pulse/simple.h>
7917 #include <cstdio>
7918
7919 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
7920 44100, 48000, 96000, 0};
7921
7922 struct rtaudio_pa_format_mapping_t {
7923 RtAudioFormat rtaudio_format;
7924 pa_sample_format_t pa_format;
7925 };
7926
7927 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
7928 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
7929 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
7930 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
7931 {0, PA_SAMPLE_INVALID}};
7932
7933 struct PulseAudioHandle {
7934 pa_simple *s_play;
7935 pa_simple *s_rec;
7936 pthread_t thread;
7937 pthread_cond_t runnable_cv;
7938 bool runnable;
PulseAudioHandlePulseAudioHandle7939 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
7940 };
7941
~RtApiPulse()7942 RtApiPulse::~RtApiPulse()
7943 {
7944 if ( stream_.state != STREAM_CLOSED )
7945 closeStream();
7946 }
7947
getDeviceCount(void)7948 unsigned int RtApiPulse::getDeviceCount( void )
7949 {
7950 return 1;
7951 }
7952
getDeviceInfo(unsigned int)7953 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
7954 {
7955 RtAudio::DeviceInfo info;
7956 info.probed = true;
7957 info.name = "PulseAudio";
7958 info.outputChannels = 2;
7959 info.inputChannels = 2;
7960 info.duplexChannels = 2;
7961 info.isDefaultOutput = true;
7962 info.isDefaultInput = true;
7963
7964 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
7965 info.sampleRates.push_back( *sr );
7966
7967 info.preferredSampleRate = 48000;
7968 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
7969
7970 return info;
7971 }
7972
pulseaudio_callback(void * user)7973 static void *pulseaudio_callback( void * user )
7974 {
7975 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
7976 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
7977 volatile bool *isRunning = &cbi->isRunning;
7978
7979 while ( *isRunning ) {
7980 pthread_testcancel();
7981 context->callbackEvent();
7982 }
7983
7984 pthread_exit( NULL );
7985
7986 return NULL;
7987 }
7988
closeStream(void)7989 void RtApiPulse::closeStream( void )
7990 {
7991 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
7992
7993 stream_.callbackInfo.isRunning = false;
7994 if ( pah ) {
7995 MUTEX_LOCK( &stream_.mutex );
7996 if ( stream_.state == STREAM_STOPPED ) {
7997 pah->runnable = true;
7998 pthread_cond_signal( &pah->runnable_cv );
7999 }
8000 MUTEX_UNLOCK( &stream_.mutex );
8001
8002 pthread_join( pah->thread, 0 );
8003 if ( pah->s_play ) {
8004 pa_simple_flush( pah->s_play, NULL );
8005 pa_simple_free( pah->s_play );
8006 }
8007 if ( pah->s_rec )
8008 pa_simple_free( pah->s_rec );
8009
8010 pthread_cond_destroy( &pah->runnable_cv );
8011 delete pah;
8012 stream_.apiHandle = 0;
8013 }
8014
8015 if ( stream_.userBuffer[0] ) {
8016 free( stream_.userBuffer[0] );
8017 stream_.userBuffer[0] = 0;
8018 }
8019 if ( stream_.userBuffer[1] ) {
8020 free( stream_.userBuffer[1] );
8021 stream_.userBuffer[1] = 0;
8022 }
8023
8024 stream_.state = STREAM_CLOSED;
8025 stream_.mode = UNINITIALIZED;
8026 }
8027
callbackEvent(void)8028 void RtApiPulse::callbackEvent( void )
8029 {
8030 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8031
8032 if ( stream_.state == STREAM_STOPPED ) {
8033 MUTEX_LOCK( &stream_.mutex );
8034 while ( !pah->runnable )
8035 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8036
8037 if ( stream_.state != STREAM_RUNNING ) {
8038 MUTEX_UNLOCK( &stream_.mutex );
8039 return;
8040 }
8041 MUTEX_UNLOCK( &stream_.mutex );
8042 }
8043
8044 if ( stream_.state == STREAM_CLOSED ) {
8045 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8046 "this shouldn't happen!";
8047 error( RtAudioError::WARNING );
8048 return;
8049 }
8050
8051 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8052 double streamTime = getStreamTime();
8053 RtAudioStreamStatus status = 0;
8054 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8055 stream_.bufferSize, streamTime, status,
8056 stream_.callbackInfo.userData );
8057
8058 if ( doStopStream == 2 ) {
8059 abortStream();
8060 return;
8061 }
8062
8063 MUTEX_LOCK( &stream_.mutex );
8064 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8065 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8066
8067 if ( stream_.state != STREAM_RUNNING )
8068 goto unlock;
8069
8070 int pa_error;
8071 size_t bytes;
8072 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8073 if ( stream_.doConvertBuffer[OUTPUT] ) {
8074 convertBuffer( stream_.deviceBuffer,
8075 stream_.userBuffer[OUTPUT],
8076 stream_.convertInfo[OUTPUT] );
8077 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8078 formatBytes( stream_.deviceFormat[OUTPUT] );
8079 } else
8080 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8081 formatBytes( stream_.userFormat );
8082
8083 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8084 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8085 pa_strerror( pa_error ) << ".";
8086 errorText_ = errorStream_.str();
8087 error( RtAudioError::WARNING );
8088 }
8089 }
8090
8091 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8092 if ( stream_.doConvertBuffer[INPUT] )
8093 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8094 formatBytes( stream_.deviceFormat[INPUT] );
8095 else
8096 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8097 formatBytes( stream_.userFormat );
8098
8099 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8100 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8101 pa_strerror( pa_error ) << ".";
8102 errorText_ = errorStream_.str();
8103 error( RtAudioError::WARNING );
8104 }
8105 if ( stream_.doConvertBuffer[INPUT] ) {
8106 convertBuffer( stream_.userBuffer[INPUT],
8107 stream_.deviceBuffer,
8108 stream_.convertInfo[INPUT] );
8109 }
8110 }
8111
8112 unlock:
8113 MUTEX_UNLOCK( &stream_.mutex );
8114 RtApi::tickStreamTime();
8115
8116 if ( doStopStream == 1 )
8117 stopStream();
8118 }
8119
startStream(void)8120 void RtApiPulse::startStream( void )
8121 {
8122 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8123
8124 if ( stream_.state == STREAM_CLOSED ) {
8125 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8126 error( RtAudioError::INVALID_USE );
8127 return;
8128 }
8129 if ( stream_.state == STREAM_RUNNING ) {
8130 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8131 error( RtAudioError::WARNING );
8132 return;
8133 }
8134
8135 MUTEX_LOCK( &stream_.mutex );
8136
8137 stream_.state = STREAM_RUNNING;
8138
8139 pah->runnable = true;
8140 pthread_cond_signal( &pah->runnable_cv );
8141 MUTEX_UNLOCK( &stream_.mutex );
8142 }
8143
stopStream(void)8144 void RtApiPulse::stopStream( void )
8145 {
8146 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8147
8148 if ( stream_.state == STREAM_CLOSED ) {
8149 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8150 error( RtAudioError::INVALID_USE );
8151 return;
8152 }
8153 if ( stream_.state == STREAM_STOPPED ) {
8154 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8155 error( RtAudioError::WARNING );
8156 return;
8157 }
8158
8159 stream_.state = STREAM_STOPPED;
8160 MUTEX_LOCK( &stream_.mutex );
8161
8162 if ( pah && pah->s_play ) {
8163 int pa_error;
8164 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8165 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8166 pa_strerror( pa_error ) << ".";
8167 errorText_ = errorStream_.str();
8168 MUTEX_UNLOCK( &stream_.mutex );
8169 error( RtAudioError::SYSTEM_ERROR );
8170 return;
8171 }
8172 }
8173
8174 stream_.state = STREAM_STOPPED;
8175 MUTEX_UNLOCK( &stream_.mutex );
8176 }
8177
abortStream(void)8178 void RtApiPulse::abortStream( void )
8179 {
8180 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8181
8182 if ( stream_.state == STREAM_CLOSED ) {
8183 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8184 error( RtAudioError::INVALID_USE );
8185 return;
8186 }
8187 if ( stream_.state == STREAM_STOPPED ) {
8188 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8189 error( RtAudioError::WARNING );
8190 return;
8191 }
8192
8193 stream_.state = STREAM_STOPPED;
8194 MUTEX_LOCK( &stream_.mutex );
8195
8196 if ( pah && pah->s_play ) {
8197 int pa_error;
8198 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8199 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8200 pa_strerror( pa_error ) << ".";
8201 errorText_ = errorStream_.str();
8202 MUTEX_UNLOCK( &stream_.mutex );
8203 error( RtAudioError::SYSTEM_ERROR );
8204 return;
8205 }
8206 }
8207
8208 stream_.state = STREAM_STOPPED;
8209 MUTEX_UNLOCK( &stream_.mutex );
8210 }
8211
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8212 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8213 unsigned int channels, unsigned int firstChannel,
8214 unsigned int sampleRate, RtAudioFormat format,
8215 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8216 {
8217 PulseAudioHandle *pah = 0;
8218 unsigned long bufferBytes = 0;
8219 pa_sample_spec ss;
8220
8221 if ( device != 0 ) return false;
8222 if ( mode != INPUT && mode != OUTPUT ) return false;
8223 if ( channels != 1 && channels != 2 ) {
8224 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8225 return false;
8226 }
8227 ss.channels = channels;
8228
8229 if ( firstChannel != 0 ) return false;
8230
8231 bool sr_found = false;
8232 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8233 if ( sampleRate == *sr ) {
8234 sr_found = true;
8235 stream_.sampleRate = sampleRate;
8236 ss.rate = sampleRate;
8237 break;
8238 }
8239 }
8240 if ( !sr_found ) {
8241 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8242 return false;
8243 }
8244
8245 bool sf_found = 0;
8246 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8247 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8248 if ( format == sf->rtaudio_format ) {
8249 sf_found = true;
8250 stream_.userFormat = sf->rtaudio_format;
8251 stream_.deviceFormat[mode] = stream_.userFormat;
8252 ss.format = sf->pa_format;
8253 break;
8254 }
8255 }
8256 if ( !sf_found ) { // Use internal data format conversion.
8257 stream_.userFormat = format;
8258 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8259 ss.format = PA_SAMPLE_FLOAT32LE;
8260 }
8261
8262 // Set other stream parameters.
8263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8264 else stream_.userInterleaved = true;
8265 stream_.deviceInterleaved[mode] = true;
8266 stream_.nBuffers = 1;
8267 stream_.doByteSwap[mode] = false;
8268 stream_.nUserChannels[mode] = channels;
8269 stream_.nDeviceChannels[mode] = channels + firstChannel;
8270 stream_.channelOffset[mode] = 0;
8271 std::string streamName = "RtAudio";
8272
8273 // Set flags for buffer conversion.
8274 stream_.doConvertBuffer[mode] = false;
8275 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8276 stream_.doConvertBuffer[mode] = true;
8277 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8278 stream_.doConvertBuffer[mode] = true;
8279
8280 // Allocate necessary internal buffers.
8281 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8282 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8283 if ( stream_.userBuffer[mode] == NULL ) {
8284 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8285 goto error;
8286 }
8287 stream_.bufferSize = *bufferSize;
8288
8289 if ( stream_.doConvertBuffer[mode] ) {
8290
8291 bool makeBuffer = true;
8292 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8293 if ( mode == INPUT ) {
8294 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8295 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8296 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8297 }
8298 }
8299
8300 if ( makeBuffer ) {
8301 bufferBytes *= *bufferSize;
8302 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8303 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8304 if ( stream_.deviceBuffer == NULL ) {
8305 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8306 goto error;
8307 }
8308 }
8309 }
8310
8311 stream_.device[mode] = device;
8312
8313 // Setup the buffer conversion information structure.
8314 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8315
8316 if ( !stream_.apiHandle ) {
8317 PulseAudioHandle *pah = new PulseAudioHandle;
8318 if ( !pah ) {
8319 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8320 goto error;
8321 }
8322
8323 stream_.apiHandle = pah;
8324 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8325 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8326 goto error;
8327 }
8328 }
8329 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8330
8331 int error;
8332 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8333 switch ( mode ) {
8334 case INPUT:
8335 pa_buffer_attr buffer_attr;
8336 buffer_attr.fragsize = bufferBytes;
8337 buffer_attr.maxlength = -1;
8338
8339 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8340 if ( !pah->s_rec ) {
8341 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8342 goto error;
8343 }
8344 break;
8345 case OUTPUT:
8346 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8347 if ( !pah->s_play ) {
8348 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8349 goto error;
8350 }
8351 break;
8352 default:
8353 goto error;
8354 }
8355
8356 if ( stream_.mode == UNINITIALIZED )
8357 stream_.mode = mode;
8358 else if ( stream_.mode == mode )
8359 goto error;
8360 else
8361 stream_.mode = DUPLEX;
8362
8363 if ( !stream_.callbackInfo.isRunning ) {
8364 stream_.callbackInfo.object = this;
8365 stream_.callbackInfo.isRunning = true;
8366 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8367 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8368 goto error;
8369 }
8370 }
8371
8372 stream_.state = STREAM_STOPPED;
8373 return true;
8374
8375 error:
8376 if ( pah && stream_.callbackInfo.isRunning ) {
8377 pthread_cond_destroy( &pah->runnable_cv );
8378 delete pah;
8379 stream_.apiHandle = 0;
8380 }
8381
8382 for ( int i=0; i<2; i++ ) {
8383 if ( stream_.userBuffer[i] ) {
8384 free( stream_.userBuffer[i] );
8385 stream_.userBuffer[i] = 0;
8386 }
8387 }
8388
8389 if ( stream_.deviceBuffer ) {
8390 free( stream_.deviceBuffer );
8391 stream_.deviceBuffer = 0;
8392 }
8393
8394 return FAILURE;
8395 }
8396
8397 //******************** End of __UNIX_PULSE__ *********************//
8398 #endif
8399
8400 #if defined(__LINUX_OSS__)
8401
8402 #include <unistd.h>
8403 #include <sys/ioctl.h>
8404 #include <unistd.h>
8405 #include <fcntl.h>
8406 #include <sys/soundcard.h>
8407 #include <errno.h>
8408 #include <math.h>
8409
8410 static void *ossCallbackHandler(void * ptr);
8411
8412 // A structure to hold various information related to the OSS API
8413 // implementation.
8414 struct OssHandle {
8415 int id[2]; // device ids
8416 bool xrun[2];
8417 bool triggered;
8418 pthread_cond_t runnable;
8419
OssHandleOssHandle8420 OssHandle()
8421 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8422 };
8423
RtApiOss()8424 RtApiOss :: RtApiOss()
8425 {
8426 // Nothing to do here.
8427 }
8428
~RtApiOss()8429 RtApiOss :: ~RtApiOss()
8430 {
8431 if ( stream_.state != STREAM_CLOSED ) closeStream();
8432 }
8433
getDeviceCount(void)8434 unsigned int RtApiOss :: getDeviceCount( void )
8435 {
8436 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8437 if ( mixerfd == -1 ) {
8438 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8439 error( RtAudioError::WARNING );
8440 return 0;
8441 }
8442
8443 oss_sysinfo sysinfo;
8444 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8445 close( mixerfd );
8446 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8447 error( RtAudioError::WARNING );
8448 return 0;
8449 }
8450
8451 close( mixerfd );
8452 return sysinfo.numaudios;
8453 }
8454
getDeviceInfo(unsigned int device)8455 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8456 {
8457 RtAudio::DeviceInfo info;
8458 info.probed = false;
8459
8460 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8461 if ( mixerfd == -1 ) {
8462 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8463 error( RtAudioError::WARNING );
8464 return info;
8465 }
8466
8467 oss_sysinfo sysinfo;
8468 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8469 if ( result == -1 ) {
8470 close( mixerfd );
8471 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8472 error( RtAudioError::WARNING );
8473 return info;
8474 }
8475
8476 unsigned nDevices = sysinfo.numaudios;
8477 if ( nDevices == 0 ) {
8478 close( mixerfd );
8479 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8480 error( RtAudioError::INVALID_USE );
8481 return info;
8482 }
8483
8484 if ( device >= nDevices ) {
8485 close( mixerfd );
8486 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8487 error( RtAudioError::INVALID_USE );
8488 return info;
8489 }
8490
8491 oss_audioinfo ainfo;
8492 ainfo.dev = device;
8493 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8494 close( mixerfd );
8495 if ( result == -1 ) {
8496 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8497 errorText_ = errorStream_.str();
8498 error( RtAudioError::WARNING );
8499 return info;
8500 }
8501
8502 // Probe channels
8503 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8504 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8505 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8506 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8507 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8508 }
8509
8510 // Probe data formats ... do for input
8511 unsigned long mask = ainfo.iformats;
8512 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8513 info.nativeFormats |= RTAUDIO_SINT16;
8514 if ( mask & AFMT_S8 )
8515 info.nativeFormats |= RTAUDIO_SINT8;
8516 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8517 info.nativeFormats |= RTAUDIO_SINT32;
8518 #ifdef AFMT_FLOAT
8519 if ( mask & AFMT_FLOAT )
8520 info.nativeFormats |= RTAUDIO_FLOAT32;
8521 #endif
8522 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8523 info.nativeFormats |= RTAUDIO_SINT24;
8524
8525 // Check that we have at least one supported format
8526 if ( info.nativeFormats == 0 ) {
8527 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8528 errorText_ = errorStream_.str();
8529 error( RtAudioError::WARNING );
8530 return info;
8531 }
8532
8533 // Probe the supported sample rates.
8534 info.sampleRates.clear();
8535 if ( ainfo.nrates ) {
8536 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8537 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8538 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8539 info.sampleRates.push_back( SAMPLE_RATES[k] );
8540
8541 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8542 info.preferredSampleRate = SAMPLE_RATES[k];
8543
8544 break;
8545 }
8546 }
8547 }
8548 }
8549 else {
8550 // Check min and max rate values;
8551 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8552 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8553 info.sampleRates.push_back( SAMPLE_RATES[k] );
8554
8555 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8556 info.preferredSampleRate = SAMPLE_RATES[k];
8557 }
8558 }
8559 }
8560
8561 if ( info.sampleRates.size() == 0 ) {
8562 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8563 errorText_ = errorStream_.str();
8564 error( RtAudioError::WARNING );
8565 }
8566 else {
8567 info.probed = true;
8568 info.name = ainfo.name;
8569 }
8570
8571 return info;
8572 }
8573
8574
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8575 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8576 unsigned int firstChannel, unsigned int sampleRate,
8577 RtAudioFormat format, unsigned int *bufferSize,
8578 RtAudio::StreamOptions *options )
8579 {
8580 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8581 if ( mixerfd == -1 ) {
8582 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8583 return FAILURE;
8584 }
8585
8586 oss_sysinfo sysinfo;
8587 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8588 if ( result == -1 ) {
8589 close( mixerfd );
8590 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8591 return FAILURE;
8592 }
8593
8594 unsigned nDevices = sysinfo.numaudios;
8595 if ( nDevices == 0 ) {
8596 // This should not happen because a check is made before this function is called.
8597 close( mixerfd );
8598 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8599 return FAILURE;
8600 }
8601
8602 if ( device >= nDevices ) {
8603 // This should not happen because a check is made before this function is called.
8604 close( mixerfd );
8605 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8606 return FAILURE;
8607 }
8608
8609 oss_audioinfo ainfo;
8610 ainfo.dev = device;
8611 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8612 close( mixerfd );
8613 if ( result == -1 ) {
8614 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8615 errorText_ = errorStream_.str();
8616 return FAILURE;
8617 }
8618
8619 // Check if device supports input or output
8620 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8621 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8622 if ( mode == OUTPUT )
8623 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8624 else
8625 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8626 errorText_ = errorStream_.str();
8627 return FAILURE;
8628 }
8629
8630 int flags = 0;
8631 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8632 if ( mode == OUTPUT )
8633 flags |= O_WRONLY;
8634 else { // mode == INPUT
8635 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8636 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8637 close( handle->id[0] );
8638 handle->id[0] = 0;
8639 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8640 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8641 errorText_ = errorStream_.str();
8642 return FAILURE;
8643 }
8644 // Check that the number previously set channels is the same.
8645 if ( stream_.nUserChannels[0] != channels ) {
8646 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8647 errorText_ = errorStream_.str();
8648 return FAILURE;
8649 }
8650 flags |= O_RDWR;
8651 }
8652 else
8653 flags |= O_RDONLY;
8654 }
8655
8656 // Set exclusive access if specified.
8657 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8658
8659 // Try to open the device.
8660 int fd;
8661 fd = open( ainfo.devnode, flags, 0 );
8662 if ( fd == -1 ) {
8663 if ( errno == EBUSY )
8664 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8665 else
8666 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8667 errorText_ = errorStream_.str();
8668 return FAILURE;
8669 }
8670
8671 // For duplex operation, specifically set this mode (this doesn't seem to work).
8672 /*
8673 if ( flags | O_RDWR ) {
8674 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8675 if ( result == -1) {
8676 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8677 errorText_ = errorStream_.str();
8678 return FAILURE;
8679 }
8680 }
8681 */
8682
8683 // Check the device channel support.
8684 stream_.nUserChannels[mode] = channels;
8685 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8686 close( fd );
8687 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8688 errorText_ = errorStream_.str();
8689 return FAILURE;
8690 }
8691
8692 // Set the number of channels.
8693 int deviceChannels = channels + firstChannel;
8694 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8695 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8696 close( fd );
8697 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8698 errorText_ = errorStream_.str();
8699 return FAILURE;
8700 }
8701 stream_.nDeviceChannels[mode] = deviceChannels;
8702
8703 // Get the data format mask
8704 int mask;
8705 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8706 if ( result == -1 ) {
8707 close( fd );
8708 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8709 errorText_ = errorStream_.str();
8710 return FAILURE;
8711 }
8712
8713 // Determine how to set the device format.
8714 stream_.userFormat = format;
8715 int deviceFormat = -1;
8716 stream_.doByteSwap[mode] = false;
8717 if ( format == RTAUDIO_SINT8 ) {
8718 if ( mask & AFMT_S8 ) {
8719 deviceFormat = AFMT_S8;
8720 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8721 }
8722 }
8723 else if ( format == RTAUDIO_SINT16 ) {
8724 if ( mask & AFMT_S16_NE ) {
8725 deviceFormat = AFMT_S16_NE;
8726 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8727 }
8728 else if ( mask & AFMT_S16_OE ) {
8729 deviceFormat = AFMT_S16_OE;
8730 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8731 stream_.doByteSwap[mode] = true;
8732 }
8733 }
8734 else if ( format == RTAUDIO_SINT24 ) {
8735 if ( mask & AFMT_S24_NE ) {
8736 deviceFormat = AFMT_S24_NE;
8737 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8738 }
8739 else if ( mask & AFMT_S24_OE ) {
8740 deviceFormat = AFMT_S24_OE;
8741 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8742 stream_.doByteSwap[mode] = true;
8743 }
8744 }
8745 else if ( format == RTAUDIO_SINT32 ) {
8746 if ( mask & AFMT_S32_NE ) {
8747 deviceFormat = AFMT_S32_NE;
8748 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8749 }
8750 else if ( mask & AFMT_S32_OE ) {
8751 deviceFormat = AFMT_S32_OE;
8752 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8753 stream_.doByteSwap[mode] = true;
8754 }
8755 }
8756
8757 if ( deviceFormat == -1 ) {
8758 // The user requested format is not natively supported by the device.
8759 if ( mask & AFMT_S16_NE ) {
8760 deviceFormat = AFMT_S16_NE;
8761 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8762 }
8763 else if ( mask & AFMT_S32_NE ) {
8764 deviceFormat = AFMT_S32_NE;
8765 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8766 }
8767 else if ( mask & AFMT_S24_NE ) {
8768 deviceFormat = AFMT_S24_NE;
8769 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8770 }
8771 else if ( mask & AFMT_S16_OE ) {
8772 deviceFormat = AFMT_S16_OE;
8773 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8774 stream_.doByteSwap[mode] = true;
8775 }
8776 else if ( mask & AFMT_S32_OE ) {
8777 deviceFormat = AFMT_S32_OE;
8778 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8779 stream_.doByteSwap[mode] = true;
8780 }
8781 else if ( mask & AFMT_S24_OE ) {
8782 deviceFormat = AFMT_S24_OE;
8783 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8784 stream_.doByteSwap[mode] = true;
8785 }
8786 else if ( mask & AFMT_S8) {
8787 deviceFormat = AFMT_S8;
8788 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8789 }
8790 }
8791
8792 if ( stream_.deviceFormat[mode] == 0 ) {
8793 // This really shouldn't happen ...
8794 close( fd );
8795 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8796 errorText_ = errorStream_.str();
8797 return FAILURE;
8798 }
8799
8800 // Set the data format.
8801 int temp = deviceFormat;
8802 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8803 if ( result == -1 || deviceFormat != temp ) {
8804 close( fd );
8805 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8806 errorText_ = errorStream_.str();
8807 return FAILURE;
8808 }
8809
8810 // Attempt to set the buffer size. According to OSS, the minimum
8811 // number of buffers is two. The supposed minimum buffer size is 16
8812 // bytes, so that will be our lower bound. The argument to this
8813 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8814 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8815 // We'll check the actual value used near the end of the setup
8816 // procedure.
8817 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8818 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8819 int buffers = 0;
8820 if ( options ) buffers = options->numberOfBuffers;
8821 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8822 if ( buffers < 2 ) buffers = 3;
8823 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8824 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8825 if ( result == -1 ) {
8826 close( fd );
8827 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
8828 errorText_ = errorStream_.str();
8829 return FAILURE;
8830 }
8831 stream_.nBuffers = buffers;
8832
8833 // Save buffer size (in sample frames).
8834 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
8835 stream_.bufferSize = *bufferSize;
8836
8837 // Set the sample rate.
8838 int srate = sampleRate;
8839 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
8840 if ( result == -1 ) {
8841 close( fd );
8842 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
8843 errorText_ = errorStream_.str();
8844 return FAILURE;
8845 }
8846
8847 // Verify the sample rate setup worked.
8848 if ( abs( srate - (int)sampleRate ) > 100 ) {
8849 close( fd );
8850 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
8851 errorText_ = errorStream_.str();
8852 return FAILURE;
8853 }
8854 stream_.sampleRate = sampleRate;
8855
8856 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
8857 // We're doing duplex setup here.
8858 stream_.deviceFormat[0] = stream_.deviceFormat[1];
8859 stream_.nDeviceChannels[0] = deviceChannels;
8860 }
8861
8862 // Set interleaving parameters.
8863 stream_.userInterleaved = true;
8864 stream_.deviceInterleaved[mode] = true;
8865 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
8866 stream_.userInterleaved = false;
8867
8868 // Set flags for buffer conversion
8869 stream_.doConvertBuffer[mode] = false;
8870 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8871 stream_.doConvertBuffer[mode] = true;
8872 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8873 stream_.doConvertBuffer[mode] = true;
8874 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
8875 stream_.nUserChannels[mode] > 1 )
8876 stream_.doConvertBuffer[mode] = true;
8877
8878 // Allocate the stream handles if necessary and then save.
8879 if ( stream_.apiHandle == 0 ) {
8880 try {
8881 handle = new OssHandle;
8882 }
8883 catch ( std::bad_alloc& ) {
8884 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
8885 goto error;
8886 }
8887
8888 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
8889 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
8890 goto error;
8891 }
8892
8893 stream_.apiHandle = (void *) handle;
8894 }
8895 else {
8896 handle = (OssHandle *) stream_.apiHandle;
8897 }
8898 handle->id[mode] = fd;
8899
8900 // Allocate necessary internal buffers.
8901 unsigned long bufferBytes;
8902 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8903 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8904 if ( stream_.userBuffer[mode] == NULL ) {
8905 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
8906 goto error;
8907 }
8908
8909 if ( stream_.doConvertBuffer[mode] ) {
8910
8911 bool makeBuffer = true;
8912 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8913 if ( mode == INPUT ) {
8914 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8915 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8916 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8917 }
8918 }
8919
8920 if ( makeBuffer ) {
8921 bufferBytes *= *bufferSize;
8922 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8923 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8924 if ( stream_.deviceBuffer == NULL ) {
8925 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
8926 goto error;
8927 }
8928 }
8929 }
8930
8931 stream_.device[mode] = device;
8932 stream_.state = STREAM_STOPPED;
8933
8934 // Setup the buffer conversion information structure.
8935 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8936
8937 // Setup thread if necessary.
8938 if ( stream_.mode == OUTPUT && mode == INPUT ) {
8939 // We had already set up an output stream.
8940 stream_.mode = DUPLEX;
8941 if ( stream_.device[0] == device ) handle->id[0] = fd;
8942 }
8943 else {
8944 stream_.mode = mode;
8945
8946 // Setup callback thread.
8947 stream_.callbackInfo.object = (void *) this;
8948
8949 // Set the thread attributes for joinable and realtime scheduling
8950 // priority. The higher priority will only take affect if the
8951 // program is run as root or suid.
8952 pthread_attr_t attr;
8953 pthread_attr_init( &attr );
8954 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8955 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8956 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8957 struct sched_param param;
8958 int priority = options->priority;
8959 int min = sched_get_priority_min( SCHED_RR );
8960 int max = sched_get_priority_max( SCHED_RR );
8961 if ( priority < min ) priority = min;
8962 else if ( priority > max ) priority = max;
8963 param.sched_priority = priority;
8964 pthread_attr_setschedparam( &attr, ¶m );
8965 pthread_attr_setschedpolicy( &attr, SCHED_RR );
8966 }
8967 else
8968 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8969 #else
8970 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8971 #endif
8972
8973 stream_.callbackInfo.isRunning = true;
8974 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
8975 pthread_attr_destroy( &attr );
8976 if ( result ) {
8977 stream_.callbackInfo.isRunning = false;
8978 errorText_ = "RtApiOss::error creating callback thread!";
8979 goto error;
8980 }
8981 }
8982
8983 return SUCCESS;
8984
8985 error:
8986 if ( handle ) {
8987 pthread_cond_destroy( &handle->runnable );
8988 if ( handle->id[0] ) close( handle->id[0] );
8989 if ( handle->id[1] ) close( handle->id[1] );
8990 delete handle;
8991 stream_.apiHandle = 0;
8992 }
8993
8994 for ( int i=0; i<2; i++ ) {
8995 if ( stream_.userBuffer[i] ) {
8996 free( stream_.userBuffer[i] );
8997 stream_.userBuffer[i] = 0;
8998 }
8999 }
9000
9001 if ( stream_.deviceBuffer ) {
9002 free( stream_.deviceBuffer );
9003 stream_.deviceBuffer = 0;
9004 }
9005
9006 return FAILURE;
9007 }
9008
closeStream()9009 void RtApiOss :: closeStream()
9010 {
9011 if ( stream_.state == STREAM_CLOSED ) {
9012 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9013 error( RtAudioError::WARNING );
9014 return;
9015 }
9016
9017 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9018 stream_.callbackInfo.isRunning = false;
9019 MUTEX_LOCK( &stream_.mutex );
9020 if ( stream_.state == STREAM_STOPPED )
9021 pthread_cond_signal( &handle->runnable );
9022 MUTEX_UNLOCK( &stream_.mutex );
9023 pthread_join( stream_.callbackInfo.thread, NULL );
9024
9025 if ( stream_.state == STREAM_RUNNING ) {
9026 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9027 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9028 else
9029 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9030 stream_.state = STREAM_STOPPED;
9031 }
9032
9033 if ( handle ) {
9034 pthread_cond_destroy( &handle->runnable );
9035 if ( handle->id[0] ) close( handle->id[0] );
9036 if ( handle->id[1] ) close( handle->id[1] );
9037 delete handle;
9038 stream_.apiHandle = 0;
9039 }
9040
9041 for ( int i=0; i<2; i++ ) {
9042 if ( stream_.userBuffer[i] ) {
9043 free( stream_.userBuffer[i] );
9044 stream_.userBuffer[i] = 0;
9045 }
9046 }
9047
9048 if ( stream_.deviceBuffer ) {
9049 free( stream_.deviceBuffer );
9050 stream_.deviceBuffer = 0;
9051 }
9052
9053 stream_.mode = UNINITIALIZED;
9054 stream_.state = STREAM_CLOSED;
9055 }
9056
startStream()9057 void RtApiOss :: startStream()
9058 {
9059 verifyStream();
9060 if ( stream_.state == STREAM_RUNNING ) {
9061 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9062 error( RtAudioError::WARNING );
9063 return;
9064 }
9065
9066 MUTEX_LOCK( &stream_.mutex );
9067
9068 stream_.state = STREAM_RUNNING;
9069
9070 // No need to do anything else here ... OSS automatically starts
9071 // when fed samples.
9072
9073 MUTEX_UNLOCK( &stream_.mutex );
9074
9075 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9076 pthread_cond_signal( &handle->runnable );
9077 }
9078
stopStream()9079 void RtApiOss :: stopStream()
9080 {
9081 verifyStream();
9082 if ( stream_.state == STREAM_STOPPED ) {
9083 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9084 error( RtAudioError::WARNING );
9085 return;
9086 }
9087
9088 MUTEX_LOCK( &stream_.mutex );
9089
9090 // The state might change while waiting on a mutex.
9091 if ( stream_.state == STREAM_STOPPED ) {
9092 MUTEX_UNLOCK( &stream_.mutex );
9093 return;
9094 }
9095
9096 int result = 0;
9097 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9098 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9099
9100 // Flush the output with zeros a few times.
9101 char *buffer;
9102 int samples;
9103 RtAudioFormat format;
9104
9105 if ( stream_.doConvertBuffer[0] ) {
9106 buffer = stream_.deviceBuffer;
9107 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9108 format = stream_.deviceFormat[0];
9109 }
9110 else {
9111 buffer = stream_.userBuffer[0];
9112 samples = stream_.bufferSize * stream_.nUserChannels[0];
9113 format = stream_.userFormat;
9114 }
9115
9116 memset( buffer, 0, samples * formatBytes(format) );
9117 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9118 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9119 if ( result == -1 ) {
9120 errorText_ = "RtApiOss::stopStream: audio write error.";
9121 error( RtAudioError::WARNING );
9122 }
9123 }
9124
9125 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9126 if ( result == -1 ) {
9127 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9128 errorText_ = errorStream_.str();
9129 goto unlock;
9130 }
9131 handle->triggered = false;
9132 }
9133
9134 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9135 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9136 if ( result == -1 ) {
9137 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9138 errorText_ = errorStream_.str();
9139 goto unlock;
9140 }
9141 }
9142
9143 unlock:
9144 stream_.state = STREAM_STOPPED;
9145 MUTEX_UNLOCK( &stream_.mutex );
9146
9147 if ( result != -1 ) return;
9148 error( RtAudioError::SYSTEM_ERROR );
9149 }
9150
abortStream()9151 void RtApiOss :: abortStream()
9152 {
9153 verifyStream();
9154 if ( stream_.state == STREAM_STOPPED ) {
9155 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9156 error( RtAudioError::WARNING );
9157 return;
9158 }
9159
9160 MUTEX_LOCK( &stream_.mutex );
9161
9162 // The state might change while waiting on a mutex.
9163 if ( stream_.state == STREAM_STOPPED ) {
9164 MUTEX_UNLOCK( &stream_.mutex );
9165 return;
9166 }
9167
9168 int result = 0;
9169 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9170 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9171 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9172 if ( result == -1 ) {
9173 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9174 errorText_ = errorStream_.str();
9175 goto unlock;
9176 }
9177 handle->triggered = false;
9178 }
9179
9180 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9181 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9182 if ( result == -1 ) {
9183 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9184 errorText_ = errorStream_.str();
9185 goto unlock;
9186 }
9187 }
9188
9189 unlock:
9190 stream_.state = STREAM_STOPPED;
9191 MUTEX_UNLOCK( &stream_.mutex );
9192
9193 if ( result != -1 ) return;
9194 error( RtAudioError::SYSTEM_ERROR );
9195 }
9196
callbackEvent()9197 void RtApiOss :: callbackEvent()
9198 {
9199 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9200 if ( stream_.state == STREAM_STOPPED ) {
9201 MUTEX_LOCK( &stream_.mutex );
9202 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9203 if ( stream_.state != STREAM_RUNNING ) {
9204 MUTEX_UNLOCK( &stream_.mutex );
9205 return;
9206 }
9207 MUTEX_UNLOCK( &stream_.mutex );
9208 }
9209
9210 if ( stream_.state == STREAM_CLOSED ) {
9211 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9212 error( RtAudioError::WARNING );
9213 return;
9214 }
9215
9216 // Invoke user callback to get fresh output data.
9217 int doStopStream = 0;
9218 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9219 double streamTime = getStreamTime();
9220 RtAudioStreamStatus status = 0;
9221 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9222 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9223 handle->xrun[0] = false;
9224 }
9225 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9226 status |= RTAUDIO_INPUT_OVERFLOW;
9227 handle->xrun[1] = false;
9228 }
9229 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9230 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9231 if ( doStopStream == 2 ) {
9232 this->abortStream();
9233 return;
9234 }
9235
9236 MUTEX_LOCK( &stream_.mutex );
9237
9238 // The state might change while waiting on a mutex.
9239 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9240
9241 int result;
9242 char *buffer;
9243 int samples;
9244 RtAudioFormat format;
9245
9246 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9247
9248 // Setup parameters and do buffer conversion if necessary.
9249 if ( stream_.doConvertBuffer[0] ) {
9250 buffer = stream_.deviceBuffer;
9251 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9252 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9253 format = stream_.deviceFormat[0];
9254 }
9255 else {
9256 buffer = stream_.userBuffer[0];
9257 samples = stream_.bufferSize * stream_.nUserChannels[0];
9258 format = stream_.userFormat;
9259 }
9260
9261 // Do byte swapping if necessary.
9262 if ( stream_.doByteSwap[0] )
9263 byteSwapBuffer( buffer, samples, format );
9264
9265 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9266 int trig = 0;
9267 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9268 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9269 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9270 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9271 handle->triggered = true;
9272 }
9273 else
9274 // Write samples to device.
9275 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9276
9277 if ( result == -1 ) {
9278 // We'll assume this is an underrun, though there isn't a
9279 // specific means for determining that.
9280 handle->xrun[0] = true;
9281 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9282 error( RtAudioError::WARNING );
9283 // Continue on to input section.
9284 }
9285 }
9286
9287 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9288
9289 // Setup parameters.
9290 if ( stream_.doConvertBuffer[1] ) {
9291 buffer = stream_.deviceBuffer;
9292 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9293 format = stream_.deviceFormat[1];
9294 }
9295 else {
9296 buffer = stream_.userBuffer[1];
9297 samples = stream_.bufferSize * stream_.nUserChannels[1];
9298 format = stream_.userFormat;
9299 }
9300
9301 // Read samples from device.
9302 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9303
9304 if ( result == -1 ) {
9305 // We'll assume this is an overrun, though there isn't a
9306 // specific means for determining that.
9307 handle->xrun[1] = true;
9308 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9309 error( RtAudioError::WARNING );
9310 goto unlock;
9311 }
9312
9313 // Do byte swapping if necessary.
9314 if ( stream_.doByteSwap[1] )
9315 byteSwapBuffer( buffer, samples, format );
9316
9317 // Do buffer conversion if necessary.
9318 if ( stream_.doConvertBuffer[1] )
9319 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9320 }
9321
9322 unlock:
9323 MUTEX_UNLOCK( &stream_.mutex );
9324
9325 RtApi::tickStreamTime();
9326 if ( doStopStream == 1 ) this->stopStream();
9327 }
9328
ossCallbackHandler(void * ptr)9329 static void *ossCallbackHandler( void *ptr )
9330 {
9331 CallbackInfo *info = (CallbackInfo *) ptr;
9332 RtApiOss *object = (RtApiOss *) info->object;
9333 bool *isRunning = &info->isRunning;
9334
9335 while ( *isRunning == true ) {
9336 pthread_testcancel();
9337 object->callbackEvent();
9338 }
9339
9340 pthread_exit( NULL );
9341
9342 return NULL;
9343 }
9344
9345 //******************** End of __LINUX_OSS__ *********************//
9346 #endif
9347
9348
9349 // *************************************************** //
9350 //
9351 // Protected common (OS-independent) RtAudio methods.
9352 //
9353 // *************************************************** //
9354
9355 // This method can be modified to control the behavior of error
9356 // message printing.
error(RtAudioError::Type type)9357 void RtApi :: error( RtAudioError::Type type )
9358 {
9359 errorStream_.str(""); // clear the ostringstream
9360
9361 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9362 if ( errorCallback ) {
9363 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9364
9365 if ( firstErrorOccurred_ )
9366 return;
9367
9368 firstErrorOccurred_ = true;
9369 const std::string errorMessage = errorText_;
9370
9371 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9372 stream_.callbackInfo.isRunning = false; // exit from the thread
9373 abortStream();
9374 }
9375
9376 errorCallback( type, errorMessage );
9377 firstErrorOccurred_ = false;
9378 return;
9379 }
9380
9381 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9382 std::cerr << '\n' << errorText_ << "\n\n";
9383 else if ( type != RtAudioError::WARNING )
9384 throw( RtAudioError( errorText_, type ) );
9385 }
9386
verifyStream()9387 void RtApi :: verifyStream()
9388 {
9389 if ( stream_.state == STREAM_CLOSED ) {
9390 errorText_ = "RtApi:: a stream is not open!";
9391 error( RtAudioError::INVALID_USE );
9392 }
9393 }
9394
clearStreamInfo()9395 void RtApi :: clearStreamInfo()
9396 {
9397 stream_.mode = UNINITIALIZED;
9398 stream_.state = STREAM_CLOSED;
9399 stream_.sampleRate = 0;
9400 stream_.bufferSize = 0;
9401 stream_.nBuffers = 0;
9402 stream_.userFormat = 0;
9403 stream_.userInterleaved = true;
9404 stream_.streamTime = 0.0;
9405 stream_.apiHandle = 0;
9406 stream_.deviceBuffer = 0;
9407 stream_.callbackInfo.callback = 0;
9408 stream_.callbackInfo.userData = 0;
9409 stream_.callbackInfo.isRunning = false;
9410 stream_.callbackInfo.errorCallback = 0;
9411 for ( int i=0; i<2; i++ ) {
9412 stream_.device[i] = 11111;
9413 stream_.doConvertBuffer[i] = false;
9414 stream_.deviceInterleaved[i] = true;
9415 stream_.doByteSwap[i] = false;
9416 stream_.nUserChannels[i] = 0;
9417 stream_.nDeviceChannels[i] = 0;
9418 stream_.channelOffset[i] = 0;
9419 stream_.deviceFormat[i] = 0;
9420 stream_.latency[i] = 0;
9421 stream_.userBuffer[i] = 0;
9422 stream_.convertInfo[i].channels = 0;
9423 stream_.convertInfo[i].inJump = 0;
9424 stream_.convertInfo[i].outJump = 0;
9425 stream_.convertInfo[i].inFormat = 0;
9426 stream_.convertInfo[i].outFormat = 0;
9427 stream_.convertInfo[i].inOffset.clear();
9428 stream_.convertInfo[i].outOffset.clear();
9429 }
9430 }
9431
formatBytes(RtAudioFormat format)9432 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9433 {
9434 if ( format == RTAUDIO_SINT16 )
9435 return 2;
9436 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9437 return 4;
9438 else if ( format == RTAUDIO_FLOAT64 )
9439 return 8;
9440 else if ( format == RTAUDIO_SINT24 )
9441 return 3;
9442 else if ( format == RTAUDIO_SINT8 )
9443 return 1;
9444
9445 errorText_ = "RtApi::formatBytes: undefined format.";
9446 error( RtAudioError::WARNING );
9447
9448 return 0;
9449 }
9450
setConvertInfo(StreamMode mode,unsigned int firstChannel)9451 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9452 {
9453 if ( mode == INPUT ) { // convert device to user buffer
9454 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9455 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9456 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9457 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9458 }
9459 else { // convert user to device buffer
9460 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9461 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9462 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9463 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9464 }
9465
9466 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9467 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9468 else
9469 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9470
9471 // Set up the interleave/deinterleave offsets.
9472 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9473 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9474 ( mode == INPUT && stream_.userInterleaved ) ) {
9475 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9476 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9477 stream_.convertInfo[mode].outOffset.push_back( k );
9478 stream_.convertInfo[mode].inJump = 1;
9479 }
9480 }
9481 else {
9482 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9483 stream_.convertInfo[mode].inOffset.push_back( k );
9484 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9485 stream_.convertInfo[mode].outJump = 1;
9486 }
9487 }
9488 }
9489 else { // no (de)interleaving
9490 if ( stream_.userInterleaved ) {
9491 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9492 stream_.convertInfo[mode].inOffset.push_back( k );
9493 stream_.convertInfo[mode].outOffset.push_back( k );
9494 }
9495 }
9496 else {
9497 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9498 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9499 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9500 stream_.convertInfo[mode].inJump = 1;
9501 stream_.convertInfo[mode].outJump = 1;
9502 }
9503 }
9504 }
9505
9506 // Add channel offset.
9507 if ( firstChannel > 0 ) {
9508 if ( stream_.deviceInterleaved[mode] ) {
9509 if ( mode == OUTPUT ) {
9510 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9511 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9512 }
9513 else {
9514 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9515 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9516 }
9517 }
9518 else {
9519 if ( mode == OUTPUT ) {
9520 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9521 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9522 }
9523 else {
9524 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9525 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9526 }
9527 }
9528 }
9529 }
9530
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)9531 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9532 {
9533 // This function does format conversion, input/output channel compensation, and
9534 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9535 // the lower three bytes of a 32-bit integer.
9536
9537 // Clear our device buffer when in/out duplex device channels are different
9538 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9539 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9540 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9541
9542 int j;
9543 if (info.outFormat == RTAUDIO_FLOAT64) {
9544 Float64 scale;
9545 Float64 *out = (Float64 *)outBuffer;
9546
9547 if (info.inFormat == RTAUDIO_SINT8) {
9548 signed char *in = (signed char *)inBuffer;
9549 scale = 1.0 / 127.5;
9550 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9551 for (j=0; j<info.channels; j++) {
9552 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9553 out[info.outOffset[j]] += 0.5;
9554 out[info.outOffset[j]] *= scale;
9555 }
9556 in += info.inJump;
9557 out += info.outJump;
9558 }
9559 }
9560 else if (info.inFormat == RTAUDIO_SINT16) {
9561 Int16 *in = (Int16 *)inBuffer;
9562 scale = 1.0 / 32767.5;
9563 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9564 for (j=0; j<info.channels; j++) {
9565 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9566 out[info.outOffset[j]] += 0.5;
9567 out[info.outOffset[j]] *= scale;
9568 }
9569 in += info.inJump;
9570 out += info.outJump;
9571 }
9572 }
9573 else if (info.inFormat == RTAUDIO_SINT24) {
9574 Int24 *in = (Int24 *)inBuffer;
9575 scale = 1.0 / 8388607.5;
9576 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9577 for (j=0; j<info.channels; j++) {
9578 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9579 out[info.outOffset[j]] += 0.5;
9580 out[info.outOffset[j]] *= scale;
9581 }
9582 in += info.inJump;
9583 out += info.outJump;
9584 }
9585 }
9586 else if (info.inFormat == RTAUDIO_SINT32) {
9587 Int32 *in = (Int32 *)inBuffer;
9588 scale = 1.0 / 2147483647.5;
9589 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9590 for (j=0; j<info.channels; j++) {
9591 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9592 out[info.outOffset[j]] += 0.5;
9593 out[info.outOffset[j]] *= scale;
9594 }
9595 in += info.inJump;
9596 out += info.outJump;
9597 }
9598 }
9599 else if (info.inFormat == RTAUDIO_FLOAT32) {
9600 Float32 *in = (Float32 *)inBuffer;
9601 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9602 for (j=0; j<info.channels; j++) {
9603 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9604 }
9605 in += info.inJump;
9606 out += info.outJump;
9607 }
9608 }
9609 else if (info.inFormat == RTAUDIO_FLOAT64) {
9610 // Channel compensation and/or (de)interleaving only.
9611 Float64 *in = (Float64 *)inBuffer;
9612 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9613 for (j=0; j<info.channels; j++) {
9614 out[info.outOffset[j]] = in[info.inOffset[j]];
9615 }
9616 in += info.inJump;
9617 out += info.outJump;
9618 }
9619 }
9620 }
9621 else if (info.outFormat == RTAUDIO_FLOAT32) {
9622 Float32 scale;
9623 Float32 *out = (Float32 *)outBuffer;
9624
9625 if (info.inFormat == RTAUDIO_SINT8) {
9626 signed char *in = (signed char *)inBuffer;
9627 scale = (Float32) ( 1.0 / 127.5 );
9628 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9629 for (j=0; j<info.channels; j++) {
9630 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9631 out[info.outOffset[j]] += 0.5;
9632 out[info.outOffset[j]] *= scale;
9633 }
9634 in += info.inJump;
9635 out += info.outJump;
9636 }
9637 }
9638 else if (info.inFormat == RTAUDIO_SINT16) {
9639 Int16 *in = (Int16 *)inBuffer;
9640 scale = (Float32) ( 1.0 / 32767.5 );
9641 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9642 for (j=0; j<info.channels; j++) {
9643 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9644 out[info.outOffset[j]] += 0.5;
9645 out[info.outOffset[j]] *= scale;
9646 }
9647 in += info.inJump;
9648 out += info.outJump;
9649 }
9650 }
9651 else if (info.inFormat == RTAUDIO_SINT24) {
9652 Int24 *in = (Int24 *)inBuffer;
9653 scale = (Float32) ( 1.0 / 8388607.5 );
9654 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9655 for (j=0; j<info.channels; j++) {
9656 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9657 out[info.outOffset[j]] += 0.5;
9658 out[info.outOffset[j]] *= scale;
9659 }
9660 in += info.inJump;
9661 out += info.outJump;
9662 }
9663 }
9664 else if (info.inFormat == RTAUDIO_SINT32) {
9665 Int32 *in = (Int32 *)inBuffer;
9666 scale = (Float32) ( 1.0 / 2147483647.5 );
9667 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9668 for (j=0; j<info.channels; j++) {
9669 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9670 out[info.outOffset[j]] += 0.5;
9671 out[info.outOffset[j]] *= scale;
9672 }
9673 in += info.inJump;
9674 out += info.outJump;
9675 }
9676 }
9677 else if (info.inFormat == RTAUDIO_FLOAT32) {
9678 // Channel compensation and/or (de)interleaving only.
9679 Float32 *in = (Float32 *)inBuffer;
9680 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9681 for (j=0; j<info.channels; j++) {
9682 out[info.outOffset[j]] = in[info.inOffset[j]];
9683 }
9684 in += info.inJump;
9685 out += info.outJump;
9686 }
9687 }
9688 else if (info.inFormat == RTAUDIO_FLOAT64) {
9689 Float64 *in = (Float64 *)inBuffer;
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9691 for (j=0; j<info.channels; j++) {
9692 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9693 }
9694 in += info.inJump;
9695 out += info.outJump;
9696 }
9697 }
9698 }
9699 else if (info.outFormat == RTAUDIO_SINT32) {
9700 Int32 *out = (Int32 *)outBuffer;
9701 if (info.inFormat == RTAUDIO_SINT8) {
9702 signed char *in = (signed char *)inBuffer;
9703 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9704 for (j=0; j<info.channels; j++) {
9705 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9706 out[info.outOffset[j]] <<= 24;
9707 }
9708 in += info.inJump;
9709 out += info.outJump;
9710 }
9711 }
9712 else if (info.inFormat == RTAUDIO_SINT16) {
9713 Int16 *in = (Int16 *)inBuffer;
9714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9715 for (j=0; j<info.channels; j++) {
9716 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9717 out[info.outOffset[j]] <<= 16;
9718 }
9719 in += info.inJump;
9720 out += info.outJump;
9721 }
9722 }
9723 else if (info.inFormat == RTAUDIO_SINT24) {
9724 Int24 *in = (Int24 *)inBuffer;
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9726 for (j=0; j<info.channels; j++) {
9727 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9728 out[info.outOffset[j]] <<= 8;
9729 }
9730 in += info.inJump;
9731 out += info.outJump;
9732 }
9733 }
9734 else if (info.inFormat == RTAUDIO_SINT32) {
9735 // Channel compensation and/or (de)interleaving only.
9736 Int32 *in = (Int32 *)inBuffer;
9737 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9738 for (j=0; j<info.channels; j++) {
9739 out[info.outOffset[j]] = in[info.inOffset[j]];
9740 }
9741 in += info.inJump;
9742 out += info.outJump;
9743 }
9744 }
9745 else if (info.inFormat == RTAUDIO_FLOAT32) {
9746 Float32 *in = (Float32 *)inBuffer;
9747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9748 for (j=0; j<info.channels; j++) {
9749 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9750 }
9751 in += info.inJump;
9752 out += info.outJump;
9753 }
9754 }
9755 else if (info.inFormat == RTAUDIO_FLOAT64) {
9756 Float64 *in = (Float64 *)inBuffer;
9757 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9758 for (j=0; j<info.channels; j++) {
9759 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9760 }
9761 in += info.inJump;
9762 out += info.outJump;
9763 }
9764 }
9765 }
9766 else if (info.outFormat == RTAUDIO_SINT24) {
9767 Int24 *out = (Int24 *)outBuffer;
9768 if (info.inFormat == RTAUDIO_SINT8) {
9769 signed char *in = (signed char *)inBuffer;
9770 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9771 for (j=0; j<info.channels; j++) {
9772 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9773 //out[info.outOffset[j]] <<= 16;
9774 }
9775 in += info.inJump;
9776 out += info.outJump;
9777 }
9778 }
9779 else if (info.inFormat == RTAUDIO_SINT16) {
9780 Int16 *in = (Int16 *)inBuffer;
9781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9782 for (j=0; j<info.channels; j++) {
9783 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9784 //out[info.outOffset[j]] <<= 8;
9785 }
9786 in += info.inJump;
9787 out += info.outJump;
9788 }
9789 }
9790 else if (info.inFormat == RTAUDIO_SINT24) {
9791 // Channel compensation and/or (de)interleaving only.
9792 Int24 *in = (Int24 *)inBuffer;
9793 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9794 for (j=0; j<info.channels; j++) {
9795 out[info.outOffset[j]] = in[info.inOffset[j]];
9796 }
9797 in += info.inJump;
9798 out += info.outJump;
9799 }
9800 }
9801 else if (info.inFormat == RTAUDIO_SINT32) {
9802 Int32 *in = (Int32 *)inBuffer;
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9804 for (j=0; j<info.channels; j++) {
9805 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9806 //out[info.outOffset[j]] >>= 8;
9807 }
9808 in += info.inJump;
9809 out += info.outJump;
9810 }
9811 }
9812 else if (info.inFormat == RTAUDIO_FLOAT32) {
9813 Float32 *in = (Float32 *)inBuffer;
9814 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9815 for (j=0; j<info.channels; j++) {
9816 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9817 }
9818 in += info.inJump;
9819 out += info.outJump;
9820 }
9821 }
9822 else if (info.inFormat == RTAUDIO_FLOAT64) {
9823 Float64 *in = (Float64 *)inBuffer;
9824 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9825 for (j=0; j<info.channels; j++) {
9826 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9827 }
9828 in += info.inJump;
9829 out += info.outJump;
9830 }
9831 }
9832 }
9833 else if (info.outFormat == RTAUDIO_SINT16) {
9834 Int16 *out = (Int16 *)outBuffer;
9835 if (info.inFormat == RTAUDIO_SINT8) {
9836 signed char *in = (signed char *)inBuffer;
9837 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9838 for (j=0; j<info.channels; j++) {
9839 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
9840 out[info.outOffset[j]] <<= 8;
9841 }
9842 in += info.inJump;
9843 out += info.outJump;
9844 }
9845 }
9846 else if (info.inFormat == RTAUDIO_SINT16) {
9847 // Channel compensation and/or (de)interleaving only.
9848 Int16 *in = (Int16 *)inBuffer;
9849 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9850 for (j=0; j<info.channels; j++) {
9851 out[info.outOffset[j]] = in[info.inOffset[j]];
9852 }
9853 in += info.inJump;
9854 out += info.outJump;
9855 }
9856 }
9857 else if (info.inFormat == RTAUDIO_SINT24) {
9858 Int24 *in = (Int24 *)inBuffer;
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9860 for (j=0; j<info.channels; j++) {
9861 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
9862 }
9863 in += info.inJump;
9864 out += info.outJump;
9865 }
9866 }
9867 else if (info.inFormat == RTAUDIO_SINT32) {
9868 Int32 *in = (Int32 *)inBuffer;
9869 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9870 for (j=0; j<info.channels; j++) {
9871 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
9872 }
9873 in += info.inJump;
9874 out += info.outJump;
9875 }
9876 }
9877 else if (info.inFormat == RTAUDIO_FLOAT32) {
9878 Float32 *in = (Float32 *)inBuffer;
9879 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9880 for (j=0; j<info.channels; j++) {
9881 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9882 }
9883 in += info.inJump;
9884 out += info.outJump;
9885 }
9886 }
9887 else if (info.inFormat == RTAUDIO_FLOAT64) {
9888 Float64 *in = (Float64 *)inBuffer;
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9890 for (j=0; j<info.channels; j++) {
9891 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9892 }
9893 in += info.inJump;
9894 out += info.outJump;
9895 }
9896 }
9897 }
9898 else if (info.outFormat == RTAUDIO_SINT8) {
9899 signed char *out = (signed char *)outBuffer;
9900 if (info.inFormat == RTAUDIO_SINT8) {
9901 // Channel compensation and/or (de)interleaving only.
9902 signed char *in = (signed char *)inBuffer;
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9904 for (j=0; j<info.channels; j++) {
9905 out[info.outOffset[j]] = in[info.inOffset[j]];
9906 }
9907 in += info.inJump;
9908 out += info.outJump;
9909 }
9910 }
9911 if (info.inFormat == RTAUDIO_SINT16) {
9912 Int16 *in = (Int16 *)inBuffer;
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9914 for (j=0; j<info.channels; j++) {
9915 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
9916 }
9917 in += info.inJump;
9918 out += info.outJump;
9919 }
9920 }
9921 else if (info.inFormat == RTAUDIO_SINT24) {
9922 Int24 *in = (Int24 *)inBuffer;
9923 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9924 for (j=0; j<info.channels; j++) {
9925 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
9926 }
9927 in += info.inJump;
9928 out += info.outJump;
9929 }
9930 }
9931 else if (info.inFormat == RTAUDIO_SINT32) {
9932 Int32 *in = (Int32 *)inBuffer;
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9934 for (j=0; j<info.channels; j++) {
9935 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
9936 }
9937 in += info.inJump;
9938 out += info.outJump;
9939 }
9940 }
9941 else if (info.inFormat == RTAUDIO_FLOAT32) {
9942 Float32 *in = (Float32 *)inBuffer;
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9944 for (j=0; j<info.channels; j++) {
9945 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
9946 }
9947 in += info.inJump;
9948 out += info.outJump;
9949 }
9950 }
9951 else if (info.inFormat == RTAUDIO_FLOAT64) {
9952 Float64 *in = (Float64 *)inBuffer;
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9954 for (j=0; j<info.channels; j++) {
9955 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
9956 }
9957 in += info.inJump;
9958 out += info.outJump;
9959 }
9960 }
9961 }
9962 }
9963
9964 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
9965 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
9966 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
9967
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)9968 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
9969 {
9970 char val;
9971 char *ptr;
9972
9973 ptr = buffer;
9974 if ( format == RTAUDIO_SINT16 ) {
9975 for ( unsigned int i=0; i<samples; i++ ) {
9976 // Swap 1st and 2nd bytes.
9977 val = *(ptr);
9978 *(ptr) = *(ptr+1);
9979 *(ptr+1) = val;
9980
9981 // Increment 2 bytes.
9982 ptr += 2;
9983 }
9984 }
9985 else if ( format == RTAUDIO_SINT32 ||
9986 format == RTAUDIO_FLOAT32 ) {
9987 for ( unsigned int i=0; i<samples; i++ ) {
9988 // Swap 1st and 4th bytes.
9989 val = *(ptr);
9990 *(ptr) = *(ptr+3);
9991 *(ptr+3) = val;
9992
9993 // Swap 2nd and 3rd bytes.
9994 ptr += 1;
9995 val = *(ptr);
9996 *(ptr) = *(ptr+1);
9997 *(ptr+1) = val;
9998
9999 // Increment 3 more bytes.
10000 ptr += 3;
10001 }
10002 }
10003 else if ( format == RTAUDIO_SINT24 ) {
10004 for ( unsigned int i=0; i<samples; i++ ) {
10005 // Swap 1st and 3rd bytes.
10006 val = *(ptr);
10007 *(ptr) = *(ptr+2);
10008 *(ptr+2) = val;
10009
10010 // Increment 2 more bytes.
10011 ptr += 2;
10012 }
10013 }
10014 else if ( format == RTAUDIO_FLOAT64 ) {
10015 for ( unsigned int i=0; i<samples; i++ ) {
10016 // Swap 1st and 8th bytes
10017 val = *(ptr);
10018 *(ptr) = *(ptr+7);
10019 *(ptr+7) = val;
10020
10021 // Swap 2nd and 7th bytes
10022 ptr += 1;
10023 val = *(ptr);
10024 *(ptr) = *(ptr+5);
10025 *(ptr+5) = val;
10026
10027 // Swap 3rd and 6th bytes
10028 ptr += 1;
10029 val = *(ptr);
10030 *(ptr) = *(ptr+3);
10031 *(ptr+3) = val;
10032
10033 // Swap 4th and 5th bytes
10034 ptr += 1;
10035 val = *(ptr);
10036 *(ptr) = *(ptr+1);
10037 *(ptr+1) = val;
10038
10039 // Increment 5 more bytes.
10040 ptr += 5;
10041 }
10042 }
10043 }
10044
10045 // Indentation settings for Vim and Emacs
10046 //
10047 // Local Variables:
10048 // c-basic-offset: 2
10049 // indent-tabs-mode: nil
10050 // End:
10051 //
10052 // vim: et sts=2 sw=2
10053
10054