1 /************************************************************************/
2 /*! \class RtAudio
3     \brief Realtime audio i/o C++ classes.
4 
5     RtAudio provides a common API (Application Programming Interface)
6     for realtime audio input/output across Linux (native ALSA, Jack,
7     and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8     (DirectSound, ASIO and WASAPI) operating systems.
9 
10     RtAudio GitHub site: https://github.com/thestk/rtaudio
11     RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 
13     RtAudio: realtime audio i/o C++ classes
14     Copyright (c) 2001-2019 Gary P. Scavone
15 
16     Permission is hereby granted, free of charge, to any person
17     obtaining a copy of this software and associated documentation files
18     (the "Software"), to deal in the Software without restriction,
19     including without limitation the rights to use, copy, modify, merge,
20     publish, distribute, sublicense, and/or sell copies of the Software,
21     and to permit persons to whom the Software is furnished to do so,
22     subject to the following conditions:
23 
24     The above copyright notice and this permission notice shall be
25     included in all copies or substantial portions of the Software.
26 
27     Any person wishing to distribute modifications to the Software is
28     asked to send the modifications to the original developer so that
29     they can be incorporated into the canonical version.  This is,
30     however, not a binding provision of this license.
31 
32     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34     MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35     IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36     ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37     CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38     WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 */
40 /************************************************************************/
41 
42 // RtAudio: Version 5.1.0
43 
44 #include "RtAudio.h"
45 #include <iostream>
46 #include <cstdlib>
47 #include <cstring>
48 #include <climits>
49 #include <cmath>
50 #include <algorithm>
51 
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55   4000, 5512, 8000, 9600, 11025, 16000, 22050,
56   32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58 
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60   #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61   #define MUTEX_DESTROY(A)    DeleteCriticalSection(A)
62   #define MUTEX_LOCK(A)       EnterCriticalSection(A)
63   #define MUTEX_UNLOCK(A)     LeaveCriticalSection(A)
64 
65   #include "tchar.h"
66 
67   template<typename T> inline
68   std::string convertCharPointerToStdString(const T *text);
69 
70   template<> inline
71   std::string convertCharPointerToStdString(const char *text)
72   {
73     return std::string(text);
74   }
75 
76   template<> inline
77   std::string convertCharPointerToStdString(const wchar_t *text)
78   {
79     int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
80     std::string s( length-1, '\0' );
81     WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
82     return s;
83   }
84 
85 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
86   // pthread API
87   #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
88   #define MUTEX_DESTROY(A)    pthread_mutex_destroy(A)
89   #define MUTEX_LOCK(A)       pthread_mutex_lock(A)
90   #define MUTEX_UNLOCK(A)     pthread_mutex_unlock(A)
91 #else
92   #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
93   #define MUTEX_DESTROY(A)    abs(*A) // dummy definitions
94 #endif
95 
96 // *************************************************** //
97 //
98 // RtAudio definitions.
99 //
100 // *************************************************** //
101 
102 std::string RtAudio :: getVersion( void )
103 {
104   return RTAUDIO_VERSION;
105 }
106 
107 // Define API names and display names.
108 // Must be in same order as API enum.
109 extern "C" {
110 const char* rtaudio_api_names[][2] = {
111   { "unspecified" , "Unknown" },
112   { "alsa"        , "ALSA" },
113   { "pulse"       , "Pulse" },
114   { "oss"         , "OpenSoundSystem" },
115   { "jack"        , "Jack" },
116   { "core"        , "CoreAudio" },
117   { "wasapi"      , "WASAPI" },
118   { "asio"        , "ASIO" },
119   { "ds"          , "DirectSound" },
120   { "dummy"       , "Dummy" },
121 };
122 const unsigned int rtaudio_num_api_names =
123   sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
124 
125 // The order here will control the order of RtAudio's API search in
126 // the constructor.
127 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
128 #if defined(__UNIX_JACK__)
129   RtAudio::UNIX_JACK,
130 #endif
131 #if defined(__LINUX_PULSE__)
132   RtAudio::LINUX_PULSE,
133 #endif
134 #if defined(__LINUX_ALSA__)
135   RtAudio::LINUX_ALSA,
136 #endif
137 #if defined(__LINUX_OSS__)
138   RtAudio::LINUX_OSS,
139 #endif
140 #if defined(__WINDOWS_ASIO__)
141   RtAudio::WINDOWS_ASIO,
142 #endif
143 #if defined(__WINDOWS_WASAPI__)
144   RtAudio::WINDOWS_WASAPI,
145 #endif
146 #if defined(__WINDOWS_DS__)
147   RtAudio::WINDOWS_DS,
148 #endif
149 #if defined(__MACOSX_CORE__)
150   RtAudio::MACOSX_CORE,
151 #endif
152 #if defined(__RTAUDIO_DUMMY__)
153   RtAudio::RTAUDIO_DUMMY,
154 #endif
155   RtAudio::UNSPECIFIED,
156 };
157 extern "C" const unsigned int rtaudio_num_compiled_apis =
158   sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
159 }
160 
161 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
162 // If the build breaks here, check that they match.
163 template<bool b> class StaticAssert { private: StaticAssert() {} };
164 template<> class StaticAssert<true>{ public: StaticAssert() {} };
165 class StaticAssertions { StaticAssertions() {
166   StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
167 }};
168 
169 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
170 {
171   apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
172                                    rtaudio_compiled_apis + rtaudio_num_compiled_apis);
173 }
174 
175 std::string RtAudio :: getApiName( RtAudio::Api api )
176 {
177   if (api < 0 || api >= RtAudio::NUM_APIS)
178     return "";
179   return rtaudio_api_names[api][0];
180 }
181 
182 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
183 {
184   if (api < 0 || api >= RtAudio::NUM_APIS)
185     return "Unknown";
186   return rtaudio_api_names[api][1];
187 }
188 
189 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
190 {
191   unsigned int i=0;
192   for (i = 0; i < rtaudio_num_compiled_apis; ++i)
193     if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
194       return rtaudio_compiled_apis[i];
195   return RtAudio::UNSPECIFIED;
196 }
197 
198 void RtAudio :: openRtApi( RtAudio::Api api )
199 {
200   if ( rtapi_ )
201     delete rtapi_;
202   rtapi_ = 0;
203 
204 #if defined(__UNIX_JACK__)
205   if ( api == UNIX_JACK )
206     rtapi_ = new RtApiJack();
207 #endif
208 #if defined(__LINUX_ALSA__)
209   if ( api == LINUX_ALSA )
210     rtapi_ = new RtApiAlsa();
211 #endif
212 #if defined(__LINUX_PULSE__)
213   if ( api == LINUX_PULSE )
214     rtapi_ = new RtApiPulse();
215 #endif
216 #if defined(__LINUX_OSS__)
217   if ( api == LINUX_OSS )
218     rtapi_ = new RtApiOss();
219 #endif
220 #if defined(__WINDOWS_ASIO__)
221   if ( api == WINDOWS_ASIO )
222     rtapi_ = new RtApiAsio();
223 #endif
224 #if defined(__WINDOWS_WASAPI__)
225   if ( api == WINDOWS_WASAPI )
226     rtapi_ = new RtApiWasapi();
227 #endif
228 #if defined(__WINDOWS_DS__)
229   if ( api == WINDOWS_DS )
230     rtapi_ = new RtApiDs();
231 #endif
232 #if defined(__MACOSX_CORE__)
233   if ( api == MACOSX_CORE )
234     rtapi_ = new RtApiCore();
235 #endif
236 #if defined(__RTAUDIO_DUMMY__)
237   if ( api == RTAUDIO_DUMMY )
238     rtapi_ = new RtApiDummy();
239 #endif
240 }
241 
242 RtAudio :: RtAudio( RtAudio::Api api )
243 {
244   rtapi_ = 0;
245 
246   if ( api != UNSPECIFIED ) {
247     // Attempt to open the specified API.
248     openRtApi( api );
249     if ( rtapi_ ) return;
250 
251     // No compiled support for specified API value.  Issue a debug
252     // warning and continue as if no API was specified.
253     std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
254   }
255 
256   // Iterate through the compiled APIs and return as soon as we find
257   // one with at least one device or we reach the end of the list.
258   std::vector< RtAudio::Api > apis;
259   getCompiledApi( apis );
260   for ( unsigned int i=0; i<apis.size(); i++ ) {
261     openRtApi( apis[i] );
262     if ( rtapi_ && rtapi_->getDeviceCount() ) break;
263   }
264 
265   if ( rtapi_ ) return;
266 
267   // It should not be possible to get here because the preprocessor
268   // definition __RTAUDIO_DUMMY__ is automatically defined if no
269   // API-specific definitions are passed to the compiler. But just in
270   // case something weird happens, we'll thow an error.
271   std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
272   throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
273 }
274 
275 RtAudio :: ~RtAudio()
276 {
277   if ( rtapi_ )
278     delete rtapi_;
279 }
280 
281 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
282                             RtAudio::StreamParameters *inputParameters,
283                             RtAudioFormat format, unsigned int sampleRate,
284                             unsigned int *bufferFrames,
285                             RtAudioCallback callback, void *userData,
286                             RtAudio::StreamOptions *options,
287                             RtAudioErrorCallback errorCallback )
288 {
289   return rtapi_->openStream( outputParameters, inputParameters, format,
290                              sampleRate, bufferFrames, callback,
291                              userData, options, errorCallback );
292 }
293 
294 // *************************************************** //
295 //
296 // Public RtApi definitions (see end of file for
297 // private or protected utility functions).
298 //
299 // *************************************************** //
300 
301 RtApi :: RtApi()
302 {
303   stream_.state = STREAM_CLOSED;
304   stream_.mode = UNINITIALIZED;
305   stream_.apiHandle = 0;
306   stream_.userBuffer[0] = 0;
307   stream_.userBuffer[1] = 0;
308   MUTEX_INITIALIZE( &stream_.mutex );
309   showWarnings_ = true;
310   firstErrorOccurred_ = false;
311 }
312 
313 RtApi :: ~RtApi()
314 {
315   MUTEX_DESTROY( &stream_.mutex );
316 }
317 
318 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
319                           RtAudio::StreamParameters *iParams,
320                           RtAudioFormat format, unsigned int sampleRate,
321                           unsigned int *bufferFrames,
322                           RtAudioCallback callback, void *userData,
323                           RtAudio::StreamOptions *options,
324                           RtAudioErrorCallback errorCallback )
325 {
326   if ( stream_.state != STREAM_CLOSED ) {
327     errorText_ = "RtApi::openStream: a stream is already open!";
328     error( RtAudioError::INVALID_USE );
329     return;
330   }
331 
332   // Clear stream information potentially left from a previously open stream.
333   clearStreamInfo();
334 
335   if ( oParams && oParams->nChannels < 1 ) {
336     errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
337     error( RtAudioError::INVALID_USE );
338     return;
339   }
340 
341   if ( iParams && iParams->nChannels < 1 ) {
342     errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
343     error( RtAudioError::INVALID_USE );
344     return;
345   }
346 
347   if ( oParams == NULL && iParams == NULL ) {
348     errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
349     error( RtAudioError::INVALID_USE );
350     return;
351   }
352 
353   if ( formatBytes(format) == 0 ) {
354     errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
355     error( RtAudioError::INVALID_USE );
356     return;
357   }
358 
359   unsigned int nDevices = getDeviceCount();
360   unsigned int oChannels = 0;
361   if ( oParams ) {
362     oChannels = oParams->nChannels;
363     if ( oParams->deviceId >= nDevices ) {
364       errorText_ = "RtApi::openStream: output device parameter value is invalid.";
365       error( RtAudioError::INVALID_USE );
366       return;
367     }
368   }
369 
370   unsigned int iChannels = 0;
371   if ( iParams ) {
372     iChannels = iParams->nChannels;
373     if ( iParams->deviceId >= nDevices ) {
374       errorText_ = "RtApi::openStream: input device parameter value is invalid.";
375       error( RtAudioError::INVALID_USE );
376       return;
377     }
378   }
379 
380   bool result;
381 
382   if ( oChannels > 0 ) {
383 
384     result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385                               sampleRate, format, bufferFrames, options );
386     if ( result == false ) {
387       error( RtAudioError::SYSTEM_ERROR );
388       return;
389     }
390   }
391 
392   if ( iChannels > 0 ) {
393 
394     result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
395                               sampleRate, format, bufferFrames, options );
396     if ( result == false ) {
397       if ( oChannels > 0 ) closeStream();
398       error( RtAudioError::SYSTEM_ERROR );
399       return;
400     }
401   }
402 
403   stream_.callbackInfo.callback = (void *) callback;
404   stream_.callbackInfo.userData = userData;
405   stream_.callbackInfo.errorCallback = (void *) errorCallback;
406 
407   if ( options ) options->numberOfBuffers = stream_.nBuffers;
408   stream_.state = STREAM_STOPPED;
409 }
410 
411 unsigned int RtApi :: getDefaultInputDevice( void )
412 {
413   // Should be implemented in subclasses if possible.
414   return 0;
415 }
416 
417 unsigned int RtApi :: getDefaultOutputDevice( void )
418 {
419   // Should be implemented in subclasses if possible.
420   return 0;
421 }
422 
423 void RtApi :: closeStream( void )
424 {
425   // MUST be implemented in subclasses!
426   return;
427 }
428 
429 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
430                                unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
431                                RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
432                                RtAudio::StreamOptions * /*options*/ )
433 {
434   // MUST be implemented in subclasses!
435   return FAILURE;
436 }
437 
438 void RtApi :: tickStreamTime( void )
439 {
440   // Subclasses that do not provide their own implementation of
441   // getStreamTime should call this function once per buffer I/O to
442   // provide basic stream time support.
443 
444   stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
445 
446 #if defined( HAVE_GETTIMEOFDAY )
447   gettimeofday( &stream_.lastTickTimestamp, NULL );
448 #endif
449 }
450 
451 long RtApi :: getStreamLatency( void )
452 {
453   verifyStream();
454 
455   long totalLatency = 0;
456   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
457     totalLatency = stream_.latency[0];
458   if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
459     totalLatency += stream_.latency[1];
460 
461   return totalLatency;
462 }
463 
464 double RtApi :: getStreamTime( void )
465 {
466   verifyStream();
467 
468 #if defined( HAVE_GETTIMEOFDAY )
469   // Return a very accurate estimate of the stream time by
470   // adding in the elapsed time since the last tick.
471   struct timeval then;
472   struct timeval now;
473 
474   if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
475     return stream_.streamTime;
476 
477   gettimeofday( &now, NULL );
478   then = stream_.lastTickTimestamp;
479   return stream_.streamTime +
480     ((now.tv_sec + 0.000001 * now.tv_usec) -
481      (then.tv_sec + 0.000001 * then.tv_usec));
482 #else
483   return stream_.streamTime;
484 #endif
485 }
486 
487 void RtApi :: setStreamTime( double time )
488 {
489   verifyStream();
490 
491   if ( time >= 0.0 )
492     stream_.streamTime = time;
493 #if defined( HAVE_GETTIMEOFDAY )
494   gettimeofday( &stream_.lastTickTimestamp, NULL );
495 #endif
496 }
497 
498 unsigned int RtApi :: getStreamSampleRate( void )
499 {
500  verifyStream();
501 
502  return stream_.sampleRate;
503 }
504 
505 
506 // *************************************************** //
507 //
508 // OS/API-specific methods.
509 //
510 // *************************************************** //
511 
512 #if defined(__MACOSX_CORE__)
513 
514 #include <unistd.h>
515 
516 // The OS X CoreAudio API is designed to use a separate callback
517 // procedure for each of its audio devices.  A single RtAudio duplex
518 // stream using two different devices is supported here, though it
519 // cannot be guaranteed to always behave correctly because we cannot
520 // synchronize these two callbacks.
521 //
522 // A property listener is installed for over/underrun information.
523 // However, no functionality is currently provided to allow property
524 // listeners to trigger user handlers because it is unclear what could
525 // be done if a critical stream parameter (buffer size, sample rate,
526 // device disconnect) notification arrived.  The listeners entail
527 // quite a bit of extra code and most likely, a user program wouldn't
528 // be prepared for the result anyway.  However, we do provide a flag
529 // to the client callback function to inform of an over/underrun.
530 
531 // A structure to hold various information related to the CoreAudio API
532 // implementation.
533 struct CoreHandle {
534   AudioDeviceID id[2];    // device ids
535 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
536   AudioDeviceIOProcID procId[2];
537 #endif
538   UInt32 iStream[2];      // device stream index (or first if using multiple)
539   UInt32 nStreams[2];     // number of streams to use
540   bool xrun[2];
541   char *deviceBuffer;
542   pthread_cond_t condition;
543   int drainCounter;       // Tracks callback counts when draining
544   bool internalDrain;     // Indicates if stop is initiated from callback or not.
545 
546   CoreHandle()
547     :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
548 };
549 
550 RtApiCore:: RtApiCore()
551 {
552 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
553   // This is a largely undocumented but absolutely necessary
554   // requirement starting with OS-X 10.6.  If not called, queries and
555   // updates to various audio device properties are not handled
556   // correctly.
557   CFRunLoopRef theRunLoop = NULL;
558   AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
559                                           kAudioObjectPropertyScopeGlobal,
560                                           kAudioObjectPropertyElementMaster };
561   OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
562   if ( result != noErr ) {
563     errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
564     error( RtAudioError::WARNING );
565   }
566 #endif
567 }
568 
569 RtApiCore :: ~RtApiCore()
570 {
571   // The subclass destructor gets called before the base class
572   // destructor, so close an existing stream before deallocating
573   // apiDeviceId memory.
574   if ( stream_.state != STREAM_CLOSED ) closeStream();
575 }
576 
577 unsigned int RtApiCore :: getDeviceCount( void )
578 {
579   // Find out how many audio devices there are, if any.
580   UInt32 dataSize;
581   AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
582   OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
583   if ( result != noErr ) {
584     errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
585     error( RtAudioError::WARNING );
586     return 0;
587   }
588 
589   return dataSize / sizeof( AudioDeviceID );
590 }
591 
592 unsigned int RtApiCore :: getDefaultInputDevice( void )
593 {
594   unsigned int nDevices = getDeviceCount();
595   if ( nDevices <= 1 ) return 0;
596 
597   AudioDeviceID id;
598   UInt32 dataSize = sizeof( AudioDeviceID );
599   AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
600   OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
601   if ( result != noErr ) {
602     errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
603     error( RtAudioError::WARNING );
604     return 0;
605   }
606 
607   dataSize *= nDevices;
608   AudioDeviceID deviceList[ nDevices ];
609   property.mSelector = kAudioHardwarePropertyDevices;
610   result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
611   if ( result != noErr ) {
612     errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
613     error( RtAudioError::WARNING );
614     return 0;
615   }
616 
617   for ( unsigned int i=0; i<nDevices; i++ )
618     if ( id == deviceList[i] ) return i;
619 
620   errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
621   error( RtAudioError::WARNING );
622   return 0;
623 }
624 
625 unsigned int RtApiCore :: getDefaultOutputDevice( void )
626 {
627   unsigned int nDevices = getDeviceCount();
628   if ( nDevices <= 1 ) return 0;
629 
630   AudioDeviceID id;
631   UInt32 dataSize = sizeof( AudioDeviceID );
632   AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
633   OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
634   if ( result != noErr ) {
635     errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
636     error( RtAudioError::WARNING );
637     return 0;
638   }
639 
640   dataSize = sizeof( AudioDeviceID ) * nDevices;
641   AudioDeviceID deviceList[ nDevices ];
642   property.mSelector = kAudioHardwarePropertyDevices;
643   result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
644   if ( result != noErr ) {
645     errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
646     error( RtAudioError::WARNING );
647     return 0;
648   }
649 
650   for ( unsigned int i=0; i<nDevices; i++ )
651     if ( id == deviceList[i] ) return i;
652 
653   errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
654   error( RtAudioError::WARNING );
655   return 0;
656 }
657 
658 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
659 {
660   RtAudio::DeviceInfo info;
661   info.probed = false;
662 
663   // Get device ID
664   unsigned int nDevices = getDeviceCount();
665   if ( nDevices == 0 ) {
666     errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
667     error( RtAudioError::INVALID_USE );
668     return info;
669   }
670 
671   if ( device >= nDevices ) {
672     errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
673     error( RtAudioError::INVALID_USE );
674     return info;
675   }
676 
677   AudioDeviceID deviceList[ nDevices ];
678   UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
679   AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
680                                           kAudioObjectPropertyScopeGlobal,
681                                           kAudioObjectPropertyElementMaster };
682   OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
683                                                 0, NULL, &dataSize, (void *) &deviceList );
684   if ( result != noErr ) {
685     errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
686     error( RtAudioError::WARNING );
687     return info;
688   }
689 
690   AudioDeviceID id = deviceList[ device ];
691 
692   // Get the device name.
693   info.name.erase();
694   CFStringRef cfname;
695   dataSize = sizeof( CFStringRef );
696   property.mSelector = kAudioObjectPropertyManufacturer;
697   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
698   if ( result != noErr ) {
699     errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
700     errorText_ = errorStream_.str();
701     error( RtAudioError::WARNING );
702     return info;
703   }
704 
705   //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
706   int length = CFStringGetLength(cfname);
707   char *mname = (char *)malloc(length * 3 + 1);
708 #if defined( UNICODE ) || defined( _UNICODE )
709   CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
710 #else
711   CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
712 #endif
713   info.name.append( (const char *)mname, strlen(mname) );
714   info.name.append( ": " );
715   CFRelease( cfname );
716   free(mname);
717 
718   property.mSelector = kAudioObjectPropertyName;
719   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
720   if ( result != noErr ) {
721     errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
722     errorText_ = errorStream_.str();
723     error( RtAudioError::WARNING );
724     return info;
725   }
726 
727   //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
728   length = CFStringGetLength(cfname);
729   char *name = (char *)malloc(length * 3 + 1);
730 #if defined( UNICODE ) || defined( _UNICODE )
731   CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
732 #else
733   CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
734 #endif
735   info.name.append( (const char *)name, strlen(name) );
736   CFRelease( cfname );
737   free(name);
738 
739   // Get the output stream "configuration".
740   AudioBufferList	*bufferList = nil;
741   property.mSelector = kAudioDevicePropertyStreamConfiguration;
742   property.mScope = kAudioDevicePropertyScopeOutput;
743   //  property.mElement = kAudioObjectPropertyElementWildcard;
744   dataSize = 0;
745   result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
746   if ( result != noErr || dataSize == 0 ) {
747     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
748     errorText_ = errorStream_.str();
749     error( RtAudioError::WARNING );
750     return info;
751   }
752 
753   // Allocate the AudioBufferList.
754   bufferList = (AudioBufferList *) malloc( dataSize );
755   if ( bufferList == NULL ) {
756     errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
757     error( RtAudioError::WARNING );
758     return info;
759   }
760 
761   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
762   if ( result != noErr || dataSize == 0 ) {
763     free( bufferList );
764     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
765     errorText_ = errorStream_.str();
766     error( RtAudioError::WARNING );
767     return info;
768   }
769 
770   // Get output channel information.
771   unsigned int i, nStreams = bufferList->mNumberBuffers;
772   for ( i=0; i<nStreams; i++ )
773     info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
774   free( bufferList );
775 
776   // Get the input stream "configuration".
777   property.mScope = kAudioDevicePropertyScopeInput;
778   result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
779   if ( result != noErr || dataSize == 0 ) {
780     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
781     errorText_ = errorStream_.str();
782     error( RtAudioError::WARNING );
783     return info;
784   }
785 
786   // Allocate the AudioBufferList.
787   bufferList = (AudioBufferList *) malloc( dataSize );
788   if ( bufferList == NULL ) {
789     errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
790     error( RtAudioError::WARNING );
791     return info;
792   }
793 
794   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
795   if (result != noErr || dataSize == 0) {
796     free( bufferList );
797     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
798     errorText_ = errorStream_.str();
799     error( RtAudioError::WARNING );
800     return info;
801   }
802 
803   // Get input channel information.
804   nStreams = bufferList->mNumberBuffers;
805   for ( i=0; i<nStreams; i++ )
806     info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
807   free( bufferList );
808 
809   // If device opens for both playback and capture, we determine the channels.
810   if ( info.outputChannels > 0 && info.inputChannels > 0 )
811     info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
812 
813   // Probe the device sample rates.
814   bool isInput = false;
815   if ( info.outputChannels == 0 ) isInput = true;
816 
817   // Determine the supported sample rates.
818   property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
819   if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
820   result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
821   if ( result != kAudioHardwareNoError || dataSize == 0 ) {
822     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
823     errorText_ = errorStream_.str();
824     error( RtAudioError::WARNING );
825     return info;
826   }
827 
828   UInt32 nRanges = dataSize / sizeof( AudioValueRange );
829   AudioValueRange rangeList[ nRanges ];
830   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
831   if ( result != kAudioHardwareNoError ) {
832     errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
833     errorText_ = errorStream_.str();
834     error( RtAudioError::WARNING );
835     return info;
836   }
837 
838   // The sample rate reporting mechanism is a bit of a mystery.  It
839   // seems that it can either return individual rates or a range of
840   // rates.  I assume that if the min / max range values are the same,
841   // then that represents a single supported rate and if the min / max
842   // range values are different, the device supports an arbitrary
843   // range of values (though there might be multiple ranges, so we'll
844   // use the most conservative range).
845   Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
846   bool haveValueRange = false;
847   info.sampleRates.clear();
848   for ( UInt32 i=0; i<nRanges; i++ ) {
849     if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
850       unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
851       info.sampleRates.push_back( tmpSr );
852 
853       if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
854         info.preferredSampleRate = tmpSr;
855 
856     } else {
857       haveValueRange = true;
858       if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
859       if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
860     }
861   }
862 
863   if ( haveValueRange ) {
864     for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
865       if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
866         info.sampleRates.push_back( SAMPLE_RATES[k] );
867 
868         if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
869           info.preferredSampleRate = SAMPLE_RATES[k];
870       }
871     }
872   }
873 
874   // Sort and remove any redundant values
875   std::sort( info.sampleRates.begin(), info.sampleRates.end() );
876   info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
877 
878   if ( info.sampleRates.size() == 0 ) {
879     errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
880     errorText_ = errorStream_.str();
881     error( RtAudioError::WARNING );
882     return info;
883   }
884 
885   // CoreAudio always uses 32-bit floating point data for PCM streams.
886   // Thus, any other "physical" formats supported by the device are of
887   // no interest to the client.
888   info.nativeFormats = RTAUDIO_FLOAT32;
889 
890   if ( info.outputChannels > 0 )
891     if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
892   if ( info.inputChannels > 0 )
893     if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
894 
895   info.probed = true;
896   return info;
897 }
898 
899 static OSStatus callbackHandler( AudioDeviceID inDevice,
900                                  const AudioTimeStamp* /*inNow*/,
901                                  const AudioBufferList* inInputData,
902                                  const AudioTimeStamp* /*inInputTime*/,
903                                  AudioBufferList* outOutputData,
904                                  const AudioTimeStamp* /*inOutputTime*/,
905                                  void* infoPointer )
906 {
907   CallbackInfo *info = (CallbackInfo *) infoPointer;
908 
909   RtApiCore *object = (RtApiCore *) info->object;
910   if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
911     return kAudioHardwareUnspecifiedError;
912   else
913     return kAudioHardwareNoError;
914 }
915 
916 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
917                               UInt32 nAddresses,
918                               const AudioObjectPropertyAddress properties[],
919                               void* handlePointer )
920 {
921   CoreHandle *handle = (CoreHandle *) handlePointer;
922   for ( UInt32 i=0; i<nAddresses; i++ ) {
923     if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
924       if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
925         handle->xrun[1] = true;
926       else
927         handle->xrun[0] = true;
928     }
929   }
930 
931   return kAudioHardwareNoError;
932 }
933 
934 static OSStatus rateListener( AudioObjectID inDevice,
935                               UInt32 /*nAddresses*/,
936                               const AudioObjectPropertyAddress /*properties*/[],
937                               void* ratePointer )
938 {
939   Float64 *rate = (Float64 *) ratePointer;
940   UInt32 dataSize = sizeof( Float64 );
941   AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
942                                           kAudioObjectPropertyScopeGlobal,
943                                           kAudioObjectPropertyElementMaster };
944   AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
945   return kAudioHardwareNoError;
946 }
947 
948 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
949                                    unsigned int firstChannel, unsigned int sampleRate,
950                                    RtAudioFormat format, unsigned int *bufferSize,
951                                    RtAudio::StreamOptions *options )
952 {
953   // Get device ID
954   unsigned int nDevices = getDeviceCount();
955   if ( nDevices == 0 ) {
956     // This should not happen because a check is made before this function is called.
957     errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
958     return FAILURE;
959   }
960 
961   if ( device >= nDevices ) {
962     // This should not happen because a check is made before this function is called.
963     errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
964     return FAILURE;
965   }
966 
967   AudioDeviceID deviceList[ nDevices ];
968   UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
969   AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
970                                           kAudioObjectPropertyScopeGlobal,
971                                           kAudioObjectPropertyElementMaster };
972   OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
973                                                 0, NULL, &dataSize, (void *) &deviceList );
974   if ( result != noErr ) {
975     errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
976     return FAILURE;
977   }
978 
979   AudioDeviceID id = deviceList[ device ];
980 
981   // Setup for stream mode.
982   bool isInput = false;
983   if ( mode == INPUT ) {
984     isInput = true;
985     property.mScope = kAudioDevicePropertyScopeInput;
986   }
987   else
988     property.mScope = kAudioDevicePropertyScopeOutput;
989 
990   // Get the stream "configuration".
991   AudioBufferList	*bufferList = nil;
992   dataSize = 0;
993   property.mSelector = kAudioDevicePropertyStreamConfiguration;
994   result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
995   if ( result != noErr || dataSize == 0 ) {
996     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
997     errorText_ = errorStream_.str();
998     return FAILURE;
999   }
1000 
1001   // Allocate the AudioBufferList.
1002   bufferList = (AudioBufferList *) malloc( dataSize );
1003   if ( bufferList == NULL ) {
1004     errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1005     return FAILURE;
1006   }
1007 
1008   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1009   if (result != noErr || dataSize == 0) {
1010     free( bufferList );
1011     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1012     errorText_ = errorStream_.str();
1013     return FAILURE;
1014   }
1015 
1016   // Search for one or more streams that contain the desired number of
1017   // channels. CoreAudio devices can have an arbitrary number of
1018   // streams and each stream can have an arbitrary number of channels.
1019   // For each stream, a single buffer of interleaved samples is
1020   // provided.  RtAudio prefers the use of one stream of interleaved
1021   // data or multiple consecutive single-channel streams.  However, we
1022   // now support multiple consecutive multi-channel streams of
1023   // interleaved data as well.
1024   UInt32 iStream, offsetCounter = firstChannel;
1025   UInt32 nStreams = bufferList->mNumberBuffers;
1026   bool monoMode = false;
1027   bool foundStream = false;
1028 
1029   // First check that the device supports the requested number of
1030   // channels.
1031   UInt32 deviceChannels = 0;
1032   for ( iStream=0; iStream<nStreams; iStream++ )
1033     deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1034 
1035   if ( deviceChannels < ( channels + firstChannel ) ) {
1036     free( bufferList );
1037     errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1038     errorText_ = errorStream_.str();
1039     return FAILURE;
1040   }
1041 
1042   // Look for a single stream meeting our needs.
1043   UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1044   for ( iStream=0; iStream<nStreams; iStream++ ) {
1045     streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1046     if ( streamChannels >= channels + offsetCounter ) {
1047       firstStream = iStream;
1048       channelOffset = offsetCounter;
1049       foundStream = true;
1050       break;
1051     }
1052     if ( streamChannels > offsetCounter ) break;
1053     offsetCounter -= streamChannels;
1054   }
1055 
1056   // If we didn't find a single stream above, then we should be able
1057   // to meet the channel specification with multiple streams.
1058   if ( foundStream == false ) {
1059     monoMode = true;
1060     offsetCounter = firstChannel;
1061     for ( iStream=0; iStream<nStreams; iStream++ ) {
1062       streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1063       if ( streamChannels > offsetCounter ) break;
1064       offsetCounter -= streamChannels;
1065     }
1066 
1067     firstStream = iStream;
1068     channelOffset = offsetCounter;
1069     Int32 channelCounter = channels + offsetCounter - streamChannels;
1070 
1071     if ( streamChannels > 1 ) monoMode = false;
1072     while ( channelCounter > 0 ) {
1073       streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1074       if ( streamChannels > 1 ) monoMode = false;
1075       channelCounter -= streamChannels;
1076       streamCount++;
1077     }
1078   }
1079 
1080   free( bufferList );
1081 
1082   // Determine the buffer size.
1083   AudioValueRange	bufferRange;
1084   dataSize = sizeof( AudioValueRange );
1085   property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1086   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1087 
1088   if ( result != noErr ) {
1089     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1090     errorText_ = errorStream_.str();
1091     return FAILURE;
1092   }
1093 
1094   if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1095   else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1096   if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1097 
1098   // Set the buffer size.  For multiple streams, I'm assuming we only
1099   // need to make this setting for the master channel.
1100   UInt32 theSize = (UInt32) *bufferSize;
1101   dataSize = sizeof( UInt32 );
1102   property.mSelector = kAudioDevicePropertyBufferFrameSize;
1103   result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1104 
1105   if ( result != noErr ) {
1106     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1107     errorText_ = errorStream_.str();
1108     return FAILURE;
1109   }
1110 
1111   // If attempting to setup a duplex stream, the bufferSize parameter
1112   // MUST be the same in both directions!
1113   *bufferSize = theSize;
1114   if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1115     errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1116     errorText_ = errorStream_.str();
1117     return FAILURE;
1118   }
1119 
1120   stream_.bufferSize = *bufferSize;
1121   stream_.nBuffers = 1;
1122 
1123   // Try to set "hog" mode ... it's not clear to me this is working.
1124   if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1125     pid_t hog_pid;
1126     dataSize = sizeof( hog_pid );
1127     property.mSelector = kAudioDevicePropertyHogMode;
1128     result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1129     if ( result != noErr ) {
1130       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1131       errorText_ = errorStream_.str();
1132       return FAILURE;
1133     }
1134 
1135     if ( hog_pid != getpid() ) {
1136       hog_pid = getpid();
1137       result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1138       if ( result != noErr ) {
1139         errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1140         errorText_ = errorStream_.str();
1141         return FAILURE;
1142       }
1143     }
1144   }
1145 
1146   // Check and if necessary, change the sample rate for the device.
1147   Float64 nominalRate;
1148   dataSize = sizeof( Float64 );
1149   property.mSelector = kAudioDevicePropertyNominalSampleRate;
1150   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1151   if ( result != noErr ) {
1152     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1153     errorText_ = errorStream_.str();
1154     return FAILURE;
1155   }
1156 
1157   // Only change the sample rate if off by more than 1 Hz.
1158   if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1159 
1160     // Set a property listener for the sample rate change
1161     Float64 reportedRate = 0.0;
1162     AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1163     result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1164     if ( result != noErr ) {
1165       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1166       errorText_ = errorStream_.str();
1167       return FAILURE;
1168     }
1169 
1170     nominalRate = (Float64) sampleRate;
1171     result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1172     if ( result != noErr ) {
1173       AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1174       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1175       errorText_ = errorStream_.str();
1176       return FAILURE;
1177     }
1178 
1179     // Now wait until the reported nominal rate is what we just set.
1180     UInt32 microCounter = 0;
1181     while ( reportedRate != nominalRate ) {
1182       microCounter += 5000;
1183       if ( microCounter > 5000000 ) break;
1184       usleep( 5000 );
1185     }
1186 
1187     // Remove the property listener.
1188     AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1189 
1190     if ( microCounter > 5000000 ) {
1191       errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1192       errorText_ = errorStream_.str();
1193       return FAILURE;
1194     }
1195   }
1196 
1197   // Now set the stream format for all streams.  Also, check the
1198   // physical format of the device and change that if necessary.
1199   AudioStreamBasicDescription	description;
1200   dataSize = sizeof( AudioStreamBasicDescription );
1201   property.mSelector = kAudioStreamPropertyVirtualFormat;
1202   result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1203   if ( result != noErr ) {
1204     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1205     errorText_ = errorStream_.str();
1206     return FAILURE;
1207   }
1208 
1209   // Set the sample rate and data format id.  However, only make the
1210   // change if the sample rate is not within 1.0 of the desired
1211   // rate and the format is not linear pcm.
1212   bool updateFormat = false;
1213   if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1214     description.mSampleRate = (Float64) sampleRate;
1215     updateFormat = true;
1216   }
1217 
1218   if ( description.mFormatID != kAudioFormatLinearPCM ) {
1219     description.mFormatID = kAudioFormatLinearPCM;
1220     updateFormat = true;
1221   }
1222 
1223   if ( updateFormat ) {
1224     result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1225     if ( result != noErr ) {
1226       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1227       errorText_ = errorStream_.str();
1228       return FAILURE;
1229     }
1230   }
1231 
1232   // Now check the physical format.
1233   property.mSelector = kAudioStreamPropertyPhysicalFormat;
1234   result = AudioObjectGetPropertyData( id, &property, 0, NULL,  &dataSize, &description );
1235   if ( result != noErr ) {
1236     errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1237     errorText_ = errorStream_.str();
1238     return FAILURE;
1239   }
1240 
1241   //std::cout << "Current physical stream format:" << std::endl;
1242   //std::cout << "   mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1243   //std::cout << "   aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1244   //std::cout << "   bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1245   //std::cout << "   sample rate = " << description.mSampleRate << std::endl;
1246 
1247   if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1248     description.mFormatID = kAudioFormatLinearPCM;
1249     //description.mSampleRate = (Float64) sampleRate;
1250     AudioStreamBasicDescription	testDescription = description;
1251     UInt32 formatFlags;
1252 
1253     // We'll try higher bit rates first and then work our way down.
1254     std::vector< std::pair<UInt32, UInt32>  > physicalFormats;
1255     formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1256     physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1257     formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258     physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1259     physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) );   // 24-bit packed
1260     formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1261     physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1262     formatFlags |= kAudioFormatFlagIsAlignedHigh;
1263     physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1264     formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1265     physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1266     physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1267 
1268     bool setPhysicalFormat = false;
1269     for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1270       testDescription = description;
1271       testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1272       testDescription.mFormatFlags = physicalFormats[i].second;
1273       if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1274         testDescription.mBytesPerFrame =  4 * testDescription.mChannelsPerFrame;
1275       else
1276         testDescription.mBytesPerFrame =  testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1277       testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1278       result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1279       if ( result == noErr ) {
1280         setPhysicalFormat = true;
1281         //std::cout << "Updated physical stream format:" << std::endl;
1282         //std::cout << "   mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1283         //std::cout << "   aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1284         //std::cout << "   bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1285         //std::cout << "   sample rate = " << testDescription.mSampleRate << std::endl;
1286         break;
1287       }
1288     }
1289 
1290     if ( !setPhysicalFormat ) {
1291       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1292       errorText_ = errorStream_.str();
1293       return FAILURE;
1294     }
1295   } // done setting virtual/physical formats.
1296 
1297   // Get the stream / device latency.
1298   UInt32 latency;
1299   dataSize = sizeof( UInt32 );
1300   property.mSelector = kAudioDevicePropertyLatency;
1301   if ( AudioObjectHasProperty( id, &property ) == true ) {
1302     result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1303     if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1304     else {
1305       errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1306       errorText_ = errorStream_.str();
1307       error( RtAudioError::WARNING );
1308     }
1309   }
1310 
1311   // Byte-swapping: According to AudioHardware.h, the stream data will
1312   // always be presented in native-endian format, so we should never
1313   // need to byte swap.
1314   stream_.doByteSwap[mode] = false;
1315 
1316   // From the CoreAudio documentation, PCM data must be supplied as
1317   // 32-bit floats.
1318   stream_.userFormat = format;
1319   stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1320 
1321   if ( streamCount == 1 )
1322     stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1323   else // multiple streams
1324     stream_.nDeviceChannels[mode] = channels;
1325   stream_.nUserChannels[mode] = channels;
1326   stream_.channelOffset[mode] = channelOffset;  // offset within a CoreAudio stream
1327   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1328   else stream_.userInterleaved = true;
1329   stream_.deviceInterleaved[mode] = true;
1330   if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1331 
1332   // Set flags for buffer conversion.
1333   stream_.doConvertBuffer[mode] = false;
1334   if ( stream_.userFormat != stream_.deviceFormat[mode] )
1335     stream_.doConvertBuffer[mode] = true;
1336   if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1337     stream_.doConvertBuffer[mode] = true;
1338   if ( streamCount == 1 ) {
1339     if ( stream_.nUserChannels[mode] > 1 &&
1340          stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1341       stream_.doConvertBuffer[mode] = true;
1342   }
1343   else if ( monoMode && stream_.userInterleaved )
1344     stream_.doConvertBuffer[mode] = true;
1345 
1346   // Allocate our CoreHandle structure for the stream.
1347   CoreHandle *handle = 0;
1348   if ( stream_.apiHandle == 0 ) {
1349     try {
1350       handle = new CoreHandle;
1351     }
1352     catch ( std::bad_alloc& ) {
1353       errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1354       goto error;
1355     }
1356 
1357     if ( pthread_cond_init( &handle->condition, NULL ) ) {
1358       errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1359       goto error;
1360     }
1361     stream_.apiHandle = (void *) handle;
1362   }
1363   else
1364     handle = (CoreHandle *) stream_.apiHandle;
1365   handle->iStream[mode] = firstStream;
1366   handle->nStreams[mode] = streamCount;
1367   handle->id[mode] = id;
1368 
1369   // Allocate necessary internal buffers.
1370   unsigned long bufferBytes;
1371   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1372   //  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1373   stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1374   memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1375   if ( stream_.userBuffer[mode] == NULL ) {
1376     errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1377     goto error;
1378   }
1379 
1380   // If possible, we will make use of the CoreAudio stream buffers as
1381   // "device buffers".  However, we can't do this if using multiple
1382   // streams.
1383   if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1384 
1385     bool makeBuffer = true;
1386     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1387     if ( mode == INPUT ) {
1388       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1389         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1390         if ( bufferBytes <= bytesOut ) makeBuffer = false;
1391       }
1392     }
1393 
1394     if ( makeBuffer ) {
1395       bufferBytes *= *bufferSize;
1396       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1397       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1398       if ( stream_.deviceBuffer == NULL ) {
1399         errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1400         goto error;
1401       }
1402     }
1403   }
1404 
1405   stream_.sampleRate = sampleRate;
1406   stream_.device[mode] = device;
1407   stream_.state = STREAM_STOPPED;
1408   stream_.callbackInfo.object = (void *) this;
1409 
1410   // Setup the buffer conversion information structure.
1411   if ( stream_.doConvertBuffer[mode] ) {
1412     if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1413     else setConvertInfo( mode, channelOffset );
1414   }
1415 
1416   if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1417     // Only one callback procedure per device.
1418     stream_.mode = DUPLEX;
1419   else {
1420 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1421     result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1422 #else
1423     // deprecated in favor of AudioDeviceCreateIOProcID()
1424     result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1425 #endif
1426     if ( result != noErr ) {
1427       errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1428       errorText_ = errorStream_.str();
1429       goto error;
1430     }
1431     if ( stream_.mode == OUTPUT && mode == INPUT )
1432       stream_.mode = DUPLEX;
1433     else
1434       stream_.mode = mode;
1435   }
1436 
1437   // Setup the device property listener for over/underload.
1438   property.mSelector = kAudioDeviceProcessorOverload;
1439   property.mScope = kAudioObjectPropertyScopeGlobal;
1440   result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1441 
1442   return SUCCESS;
1443 
1444  error:
1445   if ( handle ) {
1446     pthread_cond_destroy( &handle->condition );
1447     delete handle;
1448     stream_.apiHandle = 0;
1449   }
1450 
1451   for ( int i=0; i<2; i++ ) {
1452     if ( stream_.userBuffer[i] ) {
1453       free( stream_.userBuffer[i] );
1454       stream_.userBuffer[i] = 0;
1455     }
1456   }
1457 
1458   if ( stream_.deviceBuffer ) {
1459     free( stream_.deviceBuffer );
1460     stream_.deviceBuffer = 0;
1461   }
1462 
1463   stream_.state = STREAM_CLOSED;
1464   return FAILURE;
1465 }
1466 
1467 void RtApiCore :: closeStream( void )
1468 {
1469   if ( stream_.state == STREAM_CLOSED ) {
1470     errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1471     error( RtAudioError::WARNING );
1472     return;
1473   }
1474 
1475   CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1476   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1477     if (handle) {
1478       AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1479         kAudioObjectPropertyScopeGlobal,
1480         kAudioObjectPropertyElementMaster };
1481 
1482       property.mSelector = kAudioDeviceProcessorOverload;
1483       property.mScope = kAudioObjectPropertyScopeGlobal;
1484       if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1485         errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1486         error( RtAudioError::WARNING );
1487       }
1488 
1489 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1490       if ( stream_.state == STREAM_RUNNING )
1491         AudioDeviceStop( handle->id[0], handle->procId[0] );
1492       AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1493 #else // deprecated behaviour
1494       if ( stream_.state == STREAM_RUNNING )
1495         AudioDeviceStop( handle->id[0], callbackHandler );
1496       AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1497 #endif
1498     }
1499   }
1500 
1501   if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502     if (handle) {
1503       AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1504         kAudioObjectPropertyScopeGlobal,
1505         kAudioObjectPropertyElementMaster };
1506 
1507       property.mSelector = kAudioDeviceProcessorOverload;
1508       property.mScope = kAudioObjectPropertyScopeGlobal;
1509       if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1510         errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1511         error( RtAudioError::WARNING );
1512       }
1513 
1514 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1515       if ( stream_.state == STREAM_RUNNING )
1516         AudioDeviceStop( handle->id[1], handle->procId[1] );
1517       AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1518 #else // deprecated behaviour
1519       if ( stream_.state == STREAM_RUNNING )
1520         AudioDeviceStop( handle->id[1], callbackHandler );
1521       AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1522 #endif
1523     }
1524   }
1525 
1526   for ( int i=0; i<2; i++ ) {
1527     if ( stream_.userBuffer[i] ) {
1528       free( stream_.userBuffer[i] );
1529       stream_.userBuffer[i] = 0;
1530     }
1531   }
1532 
1533   if ( stream_.deviceBuffer ) {
1534     free( stream_.deviceBuffer );
1535     stream_.deviceBuffer = 0;
1536   }
1537 
1538   // Destroy pthread condition variable.
1539   pthread_cond_destroy( &handle->condition );
1540   delete handle;
1541   stream_.apiHandle = 0;
1542 
1543   stream_.mode = UNINITIALIZED;
1544   stream_.state = STREAM_CLOSED;
1545 }
1546 
1547 void RtApiCore :: startStream( void )
1548 {
1549   verifyStream();
1550   if ( stream_.state == STREAM_RUNNING ) {
1551     errorText_ = "RtApiCore::startStream(): the stream is already running!";
1552     error( RtAudioError::WARNING );
1553     return;
1554   }
1555 
1556 #if defined( HAVE_GETTIMEOFDAY )
1557   gettimeofday( &stream_.lastTickTimestamp, NULL );
1558 #endif
1559 
1560   OSStatus result = noErr;
1561   CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1562   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1563 
1564 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1565     result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1566 #else // deprecated behaviour
1567     result = AudioDeviceStart( handle->id[0], callbackHandler );
1568 #endif
1569     if ( result != noErr ) {
1570       errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1571       errorText_ = errorStream_.str();
1572       goto unlock;
1573     }
1574   }
1575 
1576   if ( stream_.mode == INPUT ||
1577        ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1578 
1579 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1580     result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1581 #else // deprecated behaviour
1582     result = AudioDeviceStart( handle->id[1], callbackHandler );
1583 #endif
1584     if ( result != noErr ) {
1585       errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1586       errorText_ = errorStream_.str();
1587       goto unlock;
1588     }
1589   }
1590 
1591   handle->drainCounter = 0;
1592   handle->internalDrain = false;
1593   stream_.state = STREAM_RUNNING;
1594 
1595  unlock:
1596   if ( result == noErr ) return;
1597   error( RtAudioError::SYSTEM_ERROR );
1598 }
1599 
1600 void RtApiCore :: stopStream( void )
1601 {
1602   verifyStream();
1603   if ( stream_.state == STREAM_STOPPED ) {
1604     errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1605     error( RtAudioError::WARNING );
1606     return;
1607   }
1608 
1609   OSStatus result = noErr;
1610   CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1611   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1612 
1613     if ( handle->drainCounter == 0 ) {
1614       handle->drainCounter = 2;
1615       pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1616     }
1617 
1618 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1619     result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1620 #else // deprecated behaviour
1621     result = AudioDeviceStop( handle->id[0], callbackHandler );
1622 #endif
1623     if ( result != noErr ) {
1624       errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1625       errorText_ = errorStream_.str();
1626       goto unlock;
1627     }
1628   }
1629 
1630   if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1631 
1632 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1633     result = AudioDeviceStop( handle->id[1], handle->procId[1] );
1634 #else  // deprecated behaviour
1635     result = AudioDeviceStop( handle->id[1], callbackHandler );
1636 #endif
1637     if ( result != noErr ) {
1638       errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1639       errorText_ = errorStream_.str();
1640       goto unlock;
1641     }
1642   }
1643 
1644   stream_.state = STREAM_STOPPED;
1645 
1646  unlock:
1647   if ( result == noErr ) return;
1648   error( RtAudioError::SYSTEM_ERROR );
1649 }
1650 
1651 void RtApiCore :: abortStream( void )
1652 {
1653   verifyStream();
1654   if ( stream_.state == STREAM_STOPPED ) {
1655     errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1656     error( RtAudioError::WARNING );
1657     return;
1658   }
1659 
1660   CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1661   handle->drainCounter = 2;
1662 
1663   stopStream();
1664 }
1665 
1666 // This function will be called by a spawned thread when the user
1667 // callback function signals that the stream should be stopped or
1668 // aborted.  It is better to handle it this way because the
1669 // callbackEvent() function probably should return before the AudioDeviceStop()
1670 // function is called.
1671 static void *coreStopStream( void *ptr )
1672 {
1673   CallbackInfo *info = (CallbackInfo *) ptr;
1674   RtApiCore *object = (RtApiCore *) info->object;
1675 
1676   object->stopStream();
1677   pthread_exit( NULL );
1678 }
1679 
1680 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1681                                  const AudioBufferList *inBufferList,
1682                                  const AudioBufferList *outBufferList )
1683 {
1684   if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1685   if ( stream_.state == STREAM_CLOSED ) {
1686     errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1687     error( RtAudioError::WARNING );
1688     return FAILURE;
1689   }
1690 
1691   CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1692   CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1693 
1694   // Check if we were draining the stream and signal is finished.
1695   if ( handle->drainCounter > 3 ) {
1696     ThreadHandle threadId;
1697 
1698     stream_.state = STREAM_STOPPING;
1699     if ( handle->internalDrain == true )
1700       pthread_create( &threadId, NULL, coreStopStream, info );
1701     else // external call to stopStream()
1702       pthread_cond_signal( &handle->condition );
1703     return SUCCESS;
1704   }
1705 
1706   AudioDeviceID outputDevice = handle->id[0];
1707 
1708   // Invoke user callback to get fresh output data UNLESS we are
1709   // draining stream or duplex mode AND the input/output devices are
1710   // different AND this function is called for the input device.
1711   if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1712     RtAudioCallback callback = (RtAudioCallback) info->callback;
1713     double streamTime = getStreamTime();
1714     RtAudioStreamStatus status = 0;
1715     if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1716       status |= RTAUDIO_OUTPUT_UNDERFLOW;
1717       handle->xrun[0] = false;
1718     }
1719     if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1720       status |= RTAUDIO_INPUT_OVERFLOW;
1721       handle->xrun[1] = false;
1722     }
1723 
1724     int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1725                                   stream_.bufferSize, streamTime, status, info->userData );
1726     if ( cbReturnValue == 2 ) {
1727       stream_.state = STREAM_STOPPING;
1728       handle->drainCounter = 2;
1729       abortStream();
1730       return SUCCESS;
1731     }
1732     else if ( cbReturnValue == 1 ) {
1733       handle->drainCounter = 1;
1734       handle->internalDrain = true;
1735     }
1736   }
1737 
1738   if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1739 
1740     if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1741 
1742       if ( handle->nStreams[0] == 1 ) {
1743         memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1744                 0,
1745                 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1746       }
1747       else { // fill multiple streams with zeros
1748         for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1749           memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1750                   0,
1751                   outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1752         }
1753       }
1754     }
1755     else if ( handle->nStreams[0] == 1 ) {
1756       if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1757         convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1758                        stream_.userBuffer[0], stream_.convertInfo[0] );
1759       }
1760       else { // copy from user buffer
1761         memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1762                 stream_.userBuffer[0],
1763                 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1764       }
1765     }
1766     else { // fill multiple streams
1767       Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1768       if ( stream_.doConvertBuffer[0] ) {
1769         convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1770         inBuffer = (Float32 *) stream_.deviceBuffer;
1771       }
1772 
1773       if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1774         UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1775         for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1776           memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1777                   (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1778         }
1779       }
1780       else { // fill multiple multi-channel streams with interleaved data
1781         UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1782         Float32 *out, *in;
1783 
1784         bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1785         UInt32 inChannels = stream_.nUserChannels[0];
1786         if ( stream_.doConvertBuffer[0] ) {
1787           inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1788           inChannels = stream_.nDeviceChannels[0];
1789         }
1790 
1791         if ( inInterleaved ) inOffset = 1;
1792         else inOffset = stream_.bufferSize;
1793 
1794         channelsLeft = inChannels;
1795         for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1796           in = inBuffer;
1797           out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1798           streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1799 
1800           outJump = 0;
1801           // Account for possible channel offset in first stream
1802           if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1803             streamChannels -= stream_.channelOffset[0];
1804             outJump = stream_.channelOffset[0];
1805             out += outJump;
1806           }
1807 
1808           // Account for possible unfilled channels at end of the last stream
1809           if ( streamChannels > channelsLeft ) {
1810             outJump = streamChannels - channelsLeft;
1811             streamChannels = channelsLeft;
1812           }
1813 
1814           // Determine input buffer offsets and skips
1815           if ( inInterleaved ) {
1816             inJump = inChannels;
1817             in += inChannels - channelsLeft;
1818           }
1819           else {
1820             inJump = 1;
1821             in += (inChannels - channelsLeft) * inOffset;
1822           }
1823 
1824           for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1825             for ( unsigned int j=0; j<streamChannels; j++ ) {
1826               *out++ = in[j*inOffset];
1827             }
1828             out += outJump;
1829             in += inJump;
1830           }
1831           channelsLeft -= streamChannels;
1832         }
1833       }
1834     }
1835   }
1836 
1837   // Don't bother draining input
1838   if ( handle->drainCounter ) {
1839     handle->drainCounter++;
1840     goto unlock;
1841   }
1842 
1843   AudioDeviceID inputDevice;
1844   inputDevice = handle->id[1];
1845   if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1846 
1847     if ( handle->nStreams[1] == 1 ) {
1848       if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1849         convertBuffer( stream_.userBuffer[1],
1850                        (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1851                        stream_.convertInfo[1] );
1852       }
1853       else { // copy to user buffer
1854         memcpy( stream_.userBuffer[1],
1855                 inBufferList->mBuffers[handle->iStream[1]].mData,
1856                 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1857       }
1858     }
1859     else { // read from multiple streams
1860       Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1861       if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1862 
1863       if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1864         UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1865         for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1866           memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1867                   inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1868         }
1869       }
1870       else { // read from multiple multi-channel streams
1871         UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1872         Float32 *out, *in;
1873 
1874         bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1875         UInt32 outChannels = stream_.nUserChannels[1];
1876         if ( stream_.doConvertBuffer[1] ) {
1877           outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1878           outChannels = stream_.nDeviceChannels[1];
1879         }
1880 
1881         if ( outInterleaved ) outOffset = 1;
1882         else outOffset = stream_.bufferSize;
1883 
1884         channelsLeft = outChannels;
1885         for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1886           out = outBuffer;
1887           in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1888           streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1889 
1890           inJump = 0;
1891           // Account for possible channel offset in first stream
1892           if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1893             streamChannels -= stream_.channelOffset[1];
1894             inJump = stream_.channelOffset[1];
1895             in += inJump;
1896           }
1897 
1898           // Account for possible unread channels at end of the last stream
1899           if ( streamChannels > channelsLeft ) {
1900             inJump = streamChannels - channelsLeft;
1901             streamChannels = channelsLeft;
1902           }
1903 
1904           // Determine output buffer offsets and skips
1905           if ( outInterleaved ) {
1906             outJump = outChannels;
1907             out += outChannels - channelsLeft;
1908           }
1909           else {
1910             outJump = 1;
1911             out += (outChannels - channelsLeft) * outOffset;
1912           }
1913 
1914           for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1915             for ( unsigned int j=0; j<streamChannels; j++ ) {
1916               out[j*outOffset] = *in++;
1917             }
1918             out += outJump;
1919             in += inJump;
1920           }
1921           channelsLeft -= streamChannels;
1922         }
1923       }
1924 
1925       if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1926         convertBuffer( stream_.userBuffer[1],
1927                        stream_.deviceBuffer,
1928                        stream_.convertInfo[1] );
1929       }
1930     }
1931   }
1932 
1933  unlock:
1934   //MUTEX_UNLOCK( &stream_.mutex );
1935 
1936   // Make sure to only tick duplex stream time once if using two devices
1937   if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1938     RtApi::tickStreamTime();
1939 
1940   return SUCCESS;
1941 }
1942 
1943 const char* RtApiCore :: getErrorCode( OSStatus code )
1944 {
1945   switch( code ) {
1946 
1947   case kAudioHardwareNotRunningError:
1948     return "kAudioHardwareNotRunningError";
1949 
1950   case kAudioHardwareUnspecifiedError:
1951     return "kAudioHardwareUnspecifiedError";
1952 
1953   case kAudioHardwareUnknownPropertyError:
1954     return "kAudioHardwareUnknownPropertyError";
1955 
1956   case kAudioHardwareBadPropertySizeError:
1957     return "kAudioHardwareBadPropertySizeError";
1958 
1959   case kAudioHardwareIllegalOperationError:
1960     return "kAudioHardwareIllegalOperationError";
1961 
1962   case kAudioHardwareBadObjectError:
1963     return "kAudioHardwareBadObjectError";
1964 
1965   case kAudioHardwareBadDeviceError:
1966     return "kAudioHardwareBadDeviceError";
1967 
1968   case kAudioHardwareBadStreamError:
1969     return "kAudioHardwareBadStreamError";
1970 
1971   case kAudioHardwareUnsupportedOperationError:
1972     return "kAudioHardwareUnsupportedOperationError";
1973 
1974   case kAudioDeviceUnsupportedFormatError:
1975     return "kAudioDeviceUnsupportedFormatError";
1976 
1977   case kAudioDevicePermissionsError:
1978     return "kAudioDevicePermissionsError";
1979 
1980   default:
1981     return "CoreAudio unknown error";
1982   }
1983 }
1984 
1985   //******************** End of __MACOSX_CORE__ *********************//
1986 #endif
1987 
1988 #if defined(__UNIX_JACK__)
1989 
1990 // JACK is a low-latency audio server, originally written for the
1991 // GNU/Linux operating system and now also ported to OS-X. It can
1992 // connect a number of different applications to an audio device, as
1993 // well as allowing them to share audio between themselves.
1994 //
1995 // When using JACK with RtAudio, "devices" refer to JACK clients that
1996 // have ports connected to the server.  The JACK server is typically
1997 // started in a terminal as follows:
1998 //
1999 // .jackd -d alsa -d hw:0
2000 //
2001 // or through an interface program such as qjackctl.  Many of the
2002 // parameters normally set for a stream are fixed by the JACK server
2003 // and can be specified when the JACK server is started.  In
2004 // particular,
2005 //
2006 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2007 //
2008 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2009 // frames, and number of buffers = 4.  Once the server is running, it
2010 // is not possible to override these values.  If the values are not
2011 // specified in the command-line, the JACK server uses default values.
2012 //
2013 // The JACK server does not have to be running when an instance of
2014 // RtApiJack is created, though the function getDeviceCount() will
2015 // report 0 devices found until JACK has been started.  When no
2016 // devices are available (i.e., the JACK server is not running), a
2017 // stream cannot be opened.
2018 
2019 #include <jack/jack.h>
2020 #include <unistd.h>
2021 #include <cstdio>
2022 
2023 // A structure to hold various information related to the Jack API
2024 // implementation.
2025 struct JackHandle {
2026   jack_client_t *client;
2027   jack_port_t **ports[2];
2028   std::string deviceName[2];
2029   bool xrun[2];
2030   pthread_cond_t condition;
2031   int drainCounter;       // Tracks callback counts when draining
2032   bool internalDrain;     // Indicates if stop is initiated from callback or not.
2033 
2034   JackHandle()
2035     :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2036 };
2037 
2038 #if !defined(__RTAUDIO_DEBUG__)
2039 static void jackSilentError( const char * ) {};
2040 #endif
2041 
2042 RtApiJack :: RtApiJack()
2043     :shouldAutoconnect_(true) {
2044   // Nothing to do here.
2045 #if !defined(__RTAUDIO_DEBUG__)
2046   // Turn off Jack's internal error reporting.
2047   jack_set_error_function( &jackSilentError );
2048 #endif
2049 }
2050 
2051 RtApiJack :: ~RtApiJack()
2052 {
2053   if ( stream_.state != STREAM_CLOSED ) closeStream();
2054 }
2055 
2056 unsigned int RtApiJack :: getDeviceCount( void )
2057 {
2058   // See if we can become a jack client.
2059   jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2060   jack_status_t *status = NULL;
2061   jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2062   if ( client == 0 ) return 0;
2063 
2064   const char **ports;
2065   std::string port, previousPort;
2066   unsigned int nChannels = 0, nDevices = 0;
2067   ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2068   if ( ports ) {
2069     // Parse the port names up to the first colon (:).
2070     size_t iColon = 0;
2071     do {
2072       port = (char *) ports[ nChannels ];
2073       iColon = port.find(":");
2074       if ( iColon != std::string::npos ) {
2075         port = port.substr( 0, iColon + 1 );
2076         if ( port != previousPort ) {
2077           nDevices++;
2078           previousPort = port;
2079         }
2080       }
2081     } while ( ports[++nChannels] );
2082     free( ports );
2083   }
2084 
2085   jack_client_close( client );
2086   return nDevices;
2087 }
2088 
2089 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2090 {
2091   RtAudio::DeviceInfo info;
2092   info.probed = false;
2093 
2094   jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2095   jack_status_t *status = NULL;
2096   jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2097   if ( client == 0 ) {
2098     errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2099     error( RtAudioError::WARNING );
2100     return info;
2101   }
2102 
2103   const char **ports;
2104   std::string port, previousPort;
2105   unsigned int nPorts = 0, nDevices = 0;
2106   ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2107   if ( ports ) {
2108     // Parse the port names up to the first colon (:).
2109     size_t iColon = 0;
2110     do {
2111       port = (char *) ports[ nPorts ];
2112       iColon = port.find(":");
2113       if ( iColon != std::string::npos ) {
2114         port = port.substr( 0, iColon );
2115         if ( port != previousPort ) {
2116           if ( nDevices == device ) info.name = port;
2117           nDevices++;
2118           previousPort = port;
2119         }
2120       }
2121     } while ( ports[++nPorts] );
2122     free( ports );
2123   }
2124 
2125   if ( device >= nDevices ) {
2126     jack_client_close( client );
2127     errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2128     error( RtAudioError::INVALID_USE );
2129     return info;
2130   }
2131 
2132   // Get the current jack server sample rate.
2133   info.sampleRates.clear();
2134 
2135   info.preferredSampleRate = jack_get_sample_rate( client );
2136   info.sampleRates.push_back( info.preferredSampleRate );
2137 
2138   // Count the available ports containing the client name as device
2139   // channels.  Jack "input ports" equal RtAudio output channels.
2140   unsigned int nChannels = 0;
2141   ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2142   if ( ports ) {
2143     while ( ports[ nChannels ] ) nChannels++;
2144     free( ports );
2145     info.outputChannels = nChannels;
2146   }
2147 
2148   // Jack "output ports" equal RtAudio input channels.
2149   nChannels = 0;
2150   ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2151   if ( ports ) {
2152     while ( ports[ nChannels ] ) nChannels++;
2153     free( ports );
2154     info.inputChannels = nChannels;
2155   }
2156 
2157   if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2158     jack_client_close(client);
2159     errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2160     error( RtAudioError::WARNING );
2161     return info;
2162   }
2163 
2164   // If device opens for both playback and capture, we determine the channels.
2165   if ( info.outputChannels > 0 && info.inputChannels > 0 )
2166     info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2167 
2168   // Jack always uses 32-bit floats.
2169   info.nativeFormats = RTAUDIO_FLOAT32;
2170 
2171   // Jack doesn't provide default devices so we'll use the first available one.
2172   if ( device == 0 && info.outputChannels > 0 )
2173     info.isDefaultOutput = true;
2174   if ( device == 0 && info.inputChannels > 0 )
2175     info.isDefaultInput = true;
2176 
2177   jack_client_close(client);
2178   info.probed = true;
2179   return info;
2180 }
2181 
2182 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2183 {
2184   CallbackInfo *info = (CallbackInfo *) infoPointer;
2185 
2186   RtApiJack *object = (RtApiJack *) info->object;
2187   if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2188 
2189   return 0;
2190 }
2191 
2192 // This function will be called by a spawned thread when the Jack
2193 // server signals that it is shutting down.  It is necessary to handle
2194 // it this way because the jackShutdown() function must return before
2195 // the jack_deactivate() function (in closeStream()) will return.
2196 static void *jackCloseStream( void *ptr )
2197 {
2198   CallbackInfo *info = (CallbackInfo *) ptr;
2199   RtApiJack *object = (RtApiJack *) info->object;
2200 
2201   object->closeStream();
2202 
2203   pthread_exit( NULL );
2204 }
2205 static void jackShutdown( void *infoPointer )
2206 {
2207   CallbackInfo *info = (CallbackInfo *) infoPointer;
2208   RtApiJack *object = (RtApiJack *) info->object;
2209 
2210   // Check current stream state.  If stopped, then we'll assume this
2211   // was called as a result of a call to RtApiJack::stopStream (the
2212   // deactivation of a client handle causes this function to be called).
2213   // If not, we'll assume the Jack server is shutting down or some
2214   // other problem occurred and we should close the stream.
2215   if ( object->isStreamRunning() == false ) return;
2216 
2217   ThreadHandle threadId;
2218   pthread_create( &threadId, NULL, jackCloseStream, info );
2219   std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2220 }
2221 
2222 static int jackXrun( void *infoPointer )
2223 {
2224   JackHandle *handle = *((JackHandle **) infoPointer);
2225 
2226   if ( handle->ports[0] ) handle->xrun[0] = true;
2227   if ( handle->ports[1] ) handle->xrun[1] = true;
2228 
2229   return 0;
2230 }
2231 
2232 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2233                                    unsigned int firstChannel, unsigned int sampleRate,
2234                                    RtAudioFormat format, unsigned int *bufferSize,
2235                                    RtAudio::StreamOptions *options )
2236 {
2237   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2238 
2239   // Look for jack server and try to become a client (only do once per stream).
2240   jack_client_t *client = 0;
2241   if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2242     jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2243     jack_status_t *status = NULL;
2244     if ( options && !options->streamName.empty() )
2245       client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2246     else
2247       client = jack_client_open( "RtApiJack", jackoptions, status );
2248     if ( client == 0 ) {
2249       errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2250       error( RtAudioError::WARNING );
2251       return FAILURE;
2252     }
2253   }
2254   else {
2255     // The handle must have been created on an earlier pass.
2256     client = handle->client;
2257   }
2258 
2259   const char **ports;
2260   std::string port, previousPort, deviceName;
2261   unsigned int nPorts = 0, nDevices = 0;
2262   ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2263   if ( ports ) {
2264     // Parse the port names up to the first colon (:).
2265     size_t iColon = 0;
2266     do {
2267       port = (char *) ports[ nPorts ];
2268       iColon = port.find(":");
2269       if ( iColon != std::string::npos ) {
2270         port = port.substr( 0, iColon );
2271         if ( port != previousPort ) {
2272           if ( nDevices == device ) deviceName = port;
2273           nDevices++;
2274           previousPort = port;
2275         }
2276       }
2277     } while ( ports[++nPorts] );
2278     free( ports );
2279   }
2280 
2281   if ( device >= nDevices ) {
2282     errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2283     return FAILURE;
2284   }
2285 
2286   unsigned long flag = JackPortIsInput;
2287   if ( mode == INPUT ) flag = JackPortIsOutput;
2288 
2289   if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2290     // Count the available ports containing the client name as device
2291     // channels.  Jack "input ports" equal RtAudio output channels.
2292     unsigned int nChannels = 0;
2293     ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2294     if ( ports ) {
2295       while ( ports[ nChannels ] ) nChannels++;
2296       free( ports );
2297     }
2298     // Compare the jack ports for specified client to the requested number of channels.
2299     if ( nChannels < (channels + firstChannel) ) {
2300       errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2301       errorText_ = errorStream_.str();
2302       return FAILURE;
2303     }
2304   }
2305 
2306   // Check the jack server sample rate.
2307   unsigned int jackRate = jack_get_sample_rate( client );
2308   if ( sampleRate != jackRate ) {
2309     jack_client_close( client );
2310     errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2311     errorText_ = errorStream_.str();
2312     return FAILURE;
2313   }
2314   stream_.sampleRate = jackRate;
2315 
2316   // Get the latency of the JACK port.
2317   ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2318   if ( ports[ firstChannel ] ) {
2319     // Added by Ge Wang
2320     jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2321     // the range (usually the min and max are equal)
2322     jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2323     // get the latency range
2324     jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2325     // be optimistic, use the min!
2326     stream_.latency[mode] = latrange.min;
2327     //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2328   }
2329   free( ports );
2330 
2331   // The jack server always uses 32-bit floating-point data.
2332   stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2333   stream_.userFormat = format;
2334 
2335   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2336   else stream_.userInterleaved = true;
2337 
2338   // Jack always uses non-interleaved buffers.
2339   stream_.deviceInterleaved[mode] = false;
2340 
2341   // Jack always provides host byte-ordered data.
2342   stream_.doByteSwap[mode] = false;
2343 
2344   // Get the buffer size.  The buffer size and number of buffers
2345   // (periods) is set when the jack server is started.
2346   stream_.bufferSize = (int) jack_get_buffer_size( client );
2347   *bufferSize = stream_.bufferSize;
2348 
2349   stream_.nDeviceChannels[mode] = channels;
2350   stream_.nUserChannels[mode] = channels;
2351 
2352   // Set flags for buffer conversion.
2353   stream_.doConvertBuffer[mode] = false;
2354   if ( stream_.userFormat != stream_.deviceFormat[mode] )
2355     stream_.doConvertBuffer[mode] = true;
2356   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2357        stream_.nUserChannels[mode] > 1 )
2358     stream_.doConvertBuffer[mode] = true;
2359 
2360   // Allocate our JackHandle structure for the stream.
2361   if ( handle == 0 ) {
2362     try {
2363       handle = new JackHandle;
2364     }
2365     catch ( std::bad_alloc& ) {
2366       errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2367       goto error;
2368     }
2369 
2370     if ( pthread_cond_init(&handle->condition, NULL) ) {
2371       errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2372       goto error;
2373     }
2374     stream_.apiHandle = (void *) handle;
2375     handle->client = client;
2376   }
2377   handle->deviceName[mode] = deviceName;
2378 
2379   // Allocate necessary internal buffers.
2380   unsigned long bufferBytes;
2381   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2382   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2383   if ( stream_.userBuffer[mode] == NULL ) {
2384     errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2385     goto error;
2386   }
2387 
2388   if ( stream_.doConvertBuffer[mode] ) {
2389 
2390     bool makeBuffer = true;
2391     if ( mode == OUTPUT )
2392       bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2393     else { // mode == INPUT
2394       bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2395       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2396         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2397         if ( bufferBytes < bytesOut ) makeBuffer = false;
2398       }
2399     }
2400 
2401     if ( makeBuffer ) {
2402       bufferBytes *= *bufferSize;
2403       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2404       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2405       if ( stream_.deviceBuffer == NULL ) {
2406         errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2407         goto error;
2408       }
2409     }
2410   }
2411 
2412   // Allocate memory for the Jack ports (channels) identifiers.
2413   handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2414   if ( handle->ports[mode] == NULL )  {
2415     errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2416     goto error;
2417   }
2418 
2419   stream_.device[mode] = device;
2420   stream_.channelOffset[mode] = firstChannel;
2421   stream_.state = STREAM_STOPPED;
2422   stream_.callbackInfo.object = (void *) this;
2423 
2424   if ( stream_.mode == OUTPUT && mode == INPUT )
2425     // We had already set up the stream for output.
2426     stream_.mode = DUPLEX;
2427   else {
2428     stream_.mode = mode;
2429     jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2430     jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2431     jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2432   }
2433 
2434   // Register our ports.
2435   char label[64];
2436   if ( mode == OUTPUT ) {
2437     for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2438       snprintf( label, 64, "outport %d", i );
2439       handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2440                                                 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2441     }
2442   }
2443   else {
2444     for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2445       snprintf( label, 64, "inport %d", i );
2446       handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2447                                                 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2448     }
2449   }
2450 
2451   // Setup the buffer conversion information structure.  We don't use
2452   // buffers to do channel offsets, so we override that parameter
2453   // here.
2454   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2455 
2456   if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2457 
2458   return SUCCESS;
2459 
2460  error:
2461   if ( handle ) {
2462     pthread_cond_destroy( &handle->condition );
2463     jack_client_close( handle->client );
2464 
2465     if ( handle->ports[0] ) free( handle->ports[0] );
2466     if ( handle->ports[1] ) free( handle->ports[1] );
2467 
2468     delete handle;
2469     stream_.apiHandle = 0;
2470   }
2471 
2472   for ( int i=0; i<2; i++ ) {
2473     if ( stream_.userBuffer[i] ) {
2474       free( stream_.userBuffer[i] );
2475       stream_.userBuffer[i] = 0;
2476     }
2477   }
2478 
2479   if ( stream_.deviceBuffer ) {
2480     free( stream_.deviceBuffer );
2481     stream_.deviceBuffer = 0;
2482   }
2483 
2484   return FAILURE;
2485 }
2486 
2487 void RtApiJack :: closeStream( void )
2488 {
2489   if ( stream_.state == STREAM_CLOSED ) {
2490     errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2491     error( RtAudioError::WARNING );
2492     return;
2493   }
2494 
2495   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2496   if ( handle ) {
2497 
2498     if ( stream_.state == STREAM_RUNNING )
2499       jack_deactivate( handle->client );
2500 
2501     jack_client_close( handle->client );
2502   }
2503 
2504   if ( handle ) {
2505     if ( handle->ports[0] ) free( handle->ports[0] );
2506     if ( handle->ports[1] ) free( handle->ports[1] );
2507     pthread_cond_destroy( &handle->condition );
2508     delete handle;
2509     stream_.apiHandle = 0;
2510   }
2511 
2512   for ( int i=0; i<2; i++ ) {
2513     if ( stream_.userBuffer[i] ) {
2514       free( stream_.userBuffer[i] );
2515       stream_.userBuffer[i] = 0;
2516     }
2517   }
2518 
2519   if ( stream_.deviceBuffer ) {
2520     free( stream_.deviceBuffer );
2521     stream_.deviceBuffer = 0;
2522   }
2523 
2524   stream_.mode = UNINITIALIZED;
2525   stream_.state = STREAM_CLOSED;
2526 }
2527 
2528 void RtApiJack :: startStream( void )
2529 {
2530   verifyStream();
2531   if ( stream_.state == STREAM_RUNNING ) {
2532     errorText_ = "RtApiJack::startStream(): the stream is already running!";
2533     error( RtAudioError::WARNING );
2534     return;
2535   }
2536 
2537   #if defined( HAVE_GETTIMEOFDAY )
2538   gettimeofday( &stream_.lastTickTimestamp, NULL );
2539   #endif
2540 
2541   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2542   int result = jack_activate( handle->client );
2543   if ( result ) {
2544     errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2545     goto unlock;
2546   }
2547 
2548   const char **ports;
2549 
2550   // Get the list of available ports.
2551   if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2552     result = 1;
2553     ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2554     if ( ports == NULL) {
2555       errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2556       goto unlock;
2557     }
2558 
2559     // Now make the port connections.  Since RtAudio wasn't designed to
2560     // allow the user to select particular channels of a device, we'll
2561     // just open the first "nChannels" ports with offset.
2562     for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2563       result = 1;
2564       if ( ports[ stream_.channelOffset[0] + i ] )
2565         result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2566       if ( result ) {
2567         free( ports );
2568         errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2569         goto unlock;
2570       }
2571     }
2572     free(ports);
2573   }
2574 
2575   if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2576     result = 1;
2577     ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2578     if ( ports == NULL) {
2579       errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2580       goto unlock;
2581     }
2582 
2583     // Now make the port connections.  See note above.
2584     for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2585       result = 1;
2586       if ( ports[ stream_.channelOffset[1] + i ] )
2587         result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2588       if ( result ) {
2589         free( ports );
2590         errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2591         goto unlock;
2592       }
2593     }
2594     free(ports);
2595   }
2596 
2597   handle->drainCounter = 0;
2598   handle->internalDrain = false;
2599   stream_.state = STREAM_RUNNING;
2600 
2601  unlock:
2602   if ( result == 0 ) return;
2603   error( RtAudioError::SYSTEM_ERROR );
2604 }
2605 
2606 void RtApiJack :: stopStream( void )
2607 {
2608   verifyStream();
2609   if ( stream_.state == STREAM_STOPPED ) {
2610     errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2611     error( RtAudioError::WARNING );
2612     return;
2613   }
2614 
2615   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2616   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2617 
2618     if ( handle->drainCounter == 0 ) {
2619       handle->drainCounter = 2;
2620       pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2621     }
2622   }
2623 
2624   jack_deactivate( handle->client );
2625   stream_.state = STREAM_STOPPED;
2626 }
2627 
2628 void RtApiJack :: abortStream( void )
2629 {
2630   verifyStream();
2631   if ( stream_.state == STREAM_STOPPED ) {
2632     errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2633     error( RtAudioError::WARNING );
2634     return;
2635   }
2636 
2637   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2638   handle->drainCounter = 2;
2639 
2640   stopStream();
2641 }
2642 
2643 // This function will be called by a spawned thread when the user
2644 // callback function signals that the stream should be stopped or
2645 // aborted.  It is necessary to handle it this way because the
2646 // callbackEvent() function must return before the jack_deactivate()
2647 // function will return.
2648 static void *jackStopStream( void *ptr )
2649 {
2650   CallbackInfo *info = (CallbackInfo *) ptr;
2651   RtApiJack *object = (RtApiJack *) info->object;
2652 
2653   object->stopStream();
2654   pthread_exit( NULL );
2655 }
2656 
2657 bool RtApiJack :: callbackEvent( unsigned long nframes )
2658 {
2659   if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2660   if ( stream_.state == STREAM_CLOSED ) {
2661     errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2662     error( RtAudioError::WARNING );
2663     return FAILURE;
2664   }
2665   if ( stream_.bufferSize != nframes ) {
2666     errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2667     error( RtAudioError::WARNING );
2668     return FAILURE;
2669   }
2670 
2671   CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2672   JackHandle *handle = (JackHandle *) stream_.apiHandle;
2673 
2674   // Check if we were draining the stream and signal is finished.
2675   if ( handle->drainCounter > 3 ) {
2676     ThreadHandle threadId;
2677 
2678     stream_.state = STREAM_STOPPING;
2679     if ( handle->internalDrain == true )
2680       pthread_create( &threadId, NULL, jackStopStream, info );
2681     else
2682       pthread_cond_signal( &handle->condition );
2683     return SUCCESS;
2684   }
2685 
2686   // Invoke user callback first, to get fresh output data.
2687   if ( handle->drainCounter == 0 ) {
2688     RtAudioCallback callback = (RtAudioCallback) info->callback;
2689     double streamTime = getStreamTime();
2690     RtAudioStreamStatus status = 0;
2691     if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2692       status |= RTAUDIO_OUTPUT_UNDERFLOW;
2693       handle->xrun[0] = false;
2694     }
2695     if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2696       status |= RTAUDIO_INPUT_OVERFLOW;
2697       handle->xrun[1] = false;
2698     }
2699     int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2700                                   stream_.bufferSize, streamTime, status, info->userData );
2701     if ( cbReturnValue == 2 ) {
2702       stream_.state = STREAM_STOPPING;
2703       handle->drainCounter = 2;
2704       ThreadHandle id;
2705       pthread_create( &id, NULL, jackStopStream, info );
2706       return SUCCESS;
2707     }
2708     else if ( cbReturnValue == 1 ) {
2709       handle->drainCounter = 1;
2710       handle->internalDrain = true;
2711     }
2712   }
2713 
2714   jack_default_audio_sample_t *jackbuffer;
2715   unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2716   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2717 
2718     if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2719 
2720       for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2721         jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2722         memset( jackbuffer, 0, bufferBytes );
2723       }
2724 
2725     }
2726     else if ( stream_.doConvertBuffer[0] ) {
2727 
2728       convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2729 
2730       for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2731         jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2732         memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2733       }
2734     }
2735     else { // no buffer conversion
2736       for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2737         jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2738         memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2739       }
2740     }
2741   }
2742 
2743   // Don't bother draining input
2744   if ( handle->drainCounter ) {
2745     handle->drainCounter++;
2746     goto unlock;
2747   }
2748 
2749   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2750 
2751     if ( stream_.doConvertBuffer[1] ) {
2752       for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2753         jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754         memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2755       }
2756       convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2757     }
2758     else { // no buffer conversion
2759       for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2760         jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2761         memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2762       }
2763     }
2764   }
2765 
2766  unlock:
2767   RtApi::tickStreamTime();
2768   return SUCCESS;
2769 }
2770   //******************** End of __UNIX_JACK__ *********************//
2771 #endif
2772 
2773 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2774 
2775 // The ASIO API is designed around a callback scheme, so this
2776 // implementation is similar to that used for OS-X CoreAudio and Linux
2777 // Jack.  The primary constraint with ASIO is that it only allows
2778 // access to a single driver at a time.  Thus, it is not possible to
2779 // have more than one simultaneous RtAudio stream.
2780 //
2781 // This implementation also requires a number of external ASIO files
2782 // and a few global variables.  The ASIO callback scheme does not
2783 // allow for the passing of user data, so we must create a global
2784 // pointer to our callbackInfo structure.
2785 //
2786 // On unix systems, we make use of a pthread condition variable.
2787 // Since there is no equivalent in Windows, I hacked something based
2788 // on information found in
2789 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2790 
2791 #include "asiosys.h"
2792 #include "asio.h"
2793 #include "iasiothiscallresolver.h"
2794 #include "asiodrivers.h"
2795 #include <cmath>
2796 
2797 static AsioDrivers drivers;
2798 static ASIOCallbacks asioCallbacks;
2799 static ASIODriverInfo driverInfo;
2800 static CallbackInfo *asioCallbackInfo;
2801 static bool asioXRun;
2802 
2803 struct AsioHandle {
2804   int drainCounter;       // Tracks callback counts when draining
2805   bool internalDrain;     // Indicates if stop is initiated from callback or not.
2806   ASIOBufferInfo *bufferInfos;
2807   HANDLE condition;
2808 
2809   AsioHandle()
2810     :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2811 };
2812 
2813 // Function declarations (definitions at end of section)
2814 static const char* getAsioErrorString( ASIOError result );
2815 static void sampleRateChanged( ASIOSampleRate sRate );
2816 static long asioMessages( long selector, long value, void* message, double* opt );
2817 
2818 RtApiAsio :: RtApiAsio()
2819 {
2820   // ASIO cannot run on a multi-threaded appartment. You can call
2821   // CoInitialize beforehand, but it must be for appartment threading
2822   // (in which case, CoInitilialize will return S_FALSE here).
2823   coInitialized_ = false;
2824   HRESULT hr = CoInitialize( NULL );
2825   if ( FAILED(hr) ) {
2826     errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2827     error( RtAudioError::WARNING );
2828   }
2829   coInitialized_ = true;
2830 
2831   drivers.removeCurrentDriver();
2832   driverInfo.asioVersion = 2;
2833 
2834   // See note in DirectSound implementation about GetDesktopWindow().
2835   driverInfo.sysRef = GetForegroundWindow();
2836 }
2837 
2838 RtApiAsio :: ~RtApiAsio()
2839 {
2840   if ( stream_.state != STREAM_CLOSED ) closeStream();
2841   if ( coInitialized_ ) CoUninitialize();
2842 }
2843 
2844 unsigned int RtApiAsio :: getDeviceCount( void )
2845 {
2846   return (unsigned int) drivers.asioGetNumDev();
2847 }
2848 
2849 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2850 {
2851   RtAudio::DeviceInfo info;
2852   info.probed = false;
2853 
2854   // Get device ID
2855   unsigned int nDevices = getDeviceCount();
2856   if ( nDevices == 0 ) {
2857     errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2858     error( RtAudioError::INVALID_USE );
2859     return info;
2860   }
2861 
2862   if ( device >= nDevices ) {
2863     errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2864     error( RtAudioError::INVALID_USE );
2865     return info;
2866   }
2867 
2868   // If a stream is already open, we cannot probe other devices.  Thus, use the saved results.
2869   if ( stream_.state != STREAM_CLOSED ) {
2870     if ( device >= devices_.size() ) {
2871       errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2872       error( RtAudioError::WARNING );
2873       return info;
2874     }
2875     return devices_[ device ];
2876   }
2877 
2878   char driverName[32];
2879   ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2880   if ( result != ASE_OK ) {
2881     errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2882     errorText_ = errorStream_.str();
2883     error( RtAudioError::WARNING );
2884     return info;
2885   }
2886 
2887   info.name = driverName;
2888 
2889   if ( !drivers.loadDriver( driverName ) ) {
2890     errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2891     errorText_ = errorStream_.str();
2892     error( RtAudioError::WARNING );
2893     return info;
2894   }
2895 
2896   result = ASIOInit( &driverInfo );
2897   if ( result != ASE_OK ) {
2898     errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2899     errorText_ = errorStream_.str();
2900     error( RtAudioError::WARNING );
2901     return info;
2902   }
2903 
2904   // Determine the device channel information.
2905   long inputChannels, outputChannels;
2906   result = ASIOGetChannels( &inputChannels, &outputChannels );
2907   if ( result != ASE_OK ) {
2908     drivers.removeCurrentDriver();
2909     errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2910     errorText_ = errorStream_.str();
2911     error( RtAudioError::WARNING );
2912     return info;
2913   }
2914 
2915   info.outputChannels = outputChannels;
2916   info.inputChannels = inputChannels;
2917   if ( info.outputChannels > 0 && info.inputChannels > 0 )
2918     info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2919 
2920   // Determine the supported sample rates.
2921   info.sampleRates.clear();
2922   for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2923     result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2924     if ( result == ASE_OK ) {
2925       info.sampleRates.push_back( SAMPLE_RATES[i] );
2926 
2927       if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2928         info.preferredSampleRate = SAMPLE_RATES[i];
2929     }
2930   }
2931 
2932   // Determine supported data types ... just check first channel and assume rest are the same.
2933   ASIOChannelInfo channelInfo;
2934   channelInfo.channel = 0;
2935   channelInfo.isInput = true;
2936   if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2937   result = ASIOGetChannelInfo( &channelInfo );
2938   if ( result != ASE_OK ) {
2939     drivers.removeCurrentDriver();
2940     errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2941     errorText_ = errorStream_.str();
2942     error( RtAudioError::WARNING );
2943     return info;
2944   }
2945 
2946   info.nativeFormats = 0;
2947   if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2948     info.nativeFormats |= RTAUDIO_SINT16;
2949   else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2950     info.nativeFormats |= RTAUDIO_SINT32;
2951   else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2952     info.nativeFormats |= RTAUDIO_FLOAT32;
2953   else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2954     info.nativeFormats |= RTAUDIO_FLOAT64;
2955   else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2956     info.nativeFormats |= RTAUDIO_SINT24;
2957 
2958   if ( info.outputChannels > 0 )
2959     if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2960   if ( info.inputChannels > 0 )
2961     if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2962 
2963   info.probed = true;
2964   drivers.removeCurrentDriver();
2965   return info;
2966 }
2967 
2968 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2969 {
2970   RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2971   object->callbackEvent( index );
2972 }
2973 
2974 void RtApiAsio :: saveDeviceInfo( void )
2975 {
2976   devices_.clear();
2977 
2978   unsigned int nDevices = getDeviceCount();
2979   devices_.resize( nDevices );
2980   for ( unsigned int i=0; i<nDevices; i++ )
2981     devices_[i] = getDeviceInfo( i );
2982 }
2983 
2984 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2985                                    unsigned int firstChannel, unsigned int sampleRate,
2986                                    RtAudioFormat format, unsigned int *bufferSize,
2987                                    RtAudio::StreamOptions *options )
2988 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2989 
2990   bool isDuplexInput =  mode == INPUT && stream_.mode == OUTPUT;
2991 
2992   // For ASIO, a duplex stream MUST use the same driver.
2993   if ( isDuplexInput && stream_.device[0] != device ) {
2994     errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2995     return FAILURE;
2996   }
2997 
2998   char driverName[32];
2999   ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3000   if ( result != ASE_OK ) {
3001     errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3002     errorText_ = errorStream_.str();
3003     return FAILURE;
3004   }
3005 
3006   // Only load the driver once for duplex stream.
3007   if ( !isDuplexInput ) {
3008     // The getDeviceInfo() function will not work when a stream is open
3009     // because ASIO does not allow multiple devices to run at the same
3010     // time.  Thus, we'll probe the system before opening a stream and
3011     // save the results for use by getDeviceInfo().
3012     this->saveDeviceInfo();
3013 
3014     if ( !drivers.loadDriver( driverName ) ) {
3015       errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3016       errorText_ = errorStream_.str();
3017       return FAILURE;
3018     }
3019 
3020     result = ASIOInit( &driverInfo );
3021     if ( result != ASE_OK ) {
3022       errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3023       errorText_ = errorStream_.str();
3024       return FAILURE;
3025     }
3026   }
3027 
3028   // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3029   bool buffersAllocated = false;
3030   AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3031   unsigned int nChannels;
3032 
3033 
3034   // Check the device channel count.
3035   long inputChannels, outputChannels;
3036   result = ASIOGetChannels( &inputChannels, &outputChannels );
3037   if ( result != ASE_OK ) {
3038     errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3039     errorText_ = errorStream_.str();
3040     goto error;
3041   }
3042 
3043   if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3044        ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3045     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3046     errorText_ = errorStream_.str();
3047     goto error;
3048   }
3049   stream_.nDeviceChannels[mode] = channels;
3050   stream_.nUserChannels[mode] = channels;
3051   stream_.channelOffset[mode] = firstChannel;
3052 
3053   // Verify the sample rate is supported.
3054   result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3055   if ( result != ASE_OK ) {
3056     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3057     errorText_ = errorStream_.str();
3058     goto error;
3059   }
3060 
3061   // Get the current sample rate
3062   ASIOSampleRate currentRate;
3063   result = ASIOGetSampleRate( &currentRate );
3064   if ( result != ASE_OK ) {
3065     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3066     errorText_ = errorStream_.str();
3067     goto error;
3068   }
3069 
3070   // Set the sample rate only if necessary
3071   if ( currentRate != sampleRate ) {
3072     result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3073     if ( result != ASE_OK ) {
3074       errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3075       errorText_ = errorStream_.str();
3076       goto error;
3077     }
3078   }
3079 
3080   // Determine the driver data type.
3081   ASIOChannelInfo channelInfo;
3082   channelInfo.channel = 0;
3083   if ( mode == OUTPUT ) channelInfo.isInput = false;
3084   else channelInfo.isInput = true;
3085   result = ASIOGetChannelInfo( &channelInfo );
3086   if ( result != ASE_OK ) {
3087     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3088     errorText_ = errorStream_.str();
3089     goto error;
3090   }
3091 
3092   // Assuming WINDOWS host is always little-endian.
3093   stream_.doByteSwap[mode] = false;
3094   stream_.userFormat = format;
3095   stream_.deviceFormat[mode] = 0;
3096   if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3097     stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3098     if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3099   }
3100   else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3101     stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3102     if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3103   }
3104   else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3105     stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3106     if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3107   }
3108   else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3109     stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3110     if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3111   }
3112   else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3113     stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3114     if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3115   }
3116 
3117   if ( stream_.deviceFormat[mode] == 0 ) {
3118     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3119     errorText_ = errorStream_.str();
3120     goto error;
3121   }
3122 
3123   // Set the buffer size.  For a duplex stream, this will end up
3124   // setting the buffer size based on the input constraints, which
3125   // should be ok.
3126   long minSize, maxSize, preferSize, granularity;
3127   result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3128   if ( result != ASE_OK ) {
3129     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3130     errorText_ = errorStream_.str();
3131     goto error;
3132   }
3133 
3134   if ( isDuplexInput ) {
3135     // When this is the duplex input (output was opened before), then we have to use the same
3136     // buffersize as the output, because it might use the preferred buffer size, which most
3137     // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3138     // So instead of throwing an error, make them equal. The caller uses the reference
3139     // to the "bufferSize" param as usual to set up processing buffers.
3140 
3141     *bufferSize = stream_.bufferSize;
3142 
3143   } else {
3144     if ( *bufferSize == 0 ) *bufferSize = preferSize;
3145     else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3146     else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3147     else if ( granularity == -1 ) {
3148       // Make sure bufferSize is a power of two.
3149       int log2_of_min_size = 0;
3150       int log2_of_max_size = 0;
3151 
3152       for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3153         if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3154         if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3155       }
3156 
3157       long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3158       int min_delta_num = log2_of_min_size;
3159 
3160       for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3161         long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3162         if (current_delta < min_delta) {
3163           min_delta = current_delta;
3164           min_delta_num = i;
3165         }
3166       }
3167 
3168       *bufferSize = ( (unsigned int)1 << min_delta_num );
3169       if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3170       else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3171     }
3172     else if ( granularity != 0 ) {
3173       // Set to an even multiple of granularity, rounding up.
3174       *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3175     }
3176   }
3177 
3178   /*
3179   // we don't use it anymore, see above!
3180   // Just left it here for the case...
3181   if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3182     errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3183     goto error;
3184   }
3185   */
3186 
3187   stream_.bufferSize = *bufferSize;
3188   stream_.nBuffers = 2;
3189 
3190   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3191   else stream_.userInterleaved = true;
3192 
3193   // ASIO always uses non-interleaved buffers.
3194   stream_.deviceInterleaved[mode] = false;
3195 
3196   // Allocate, if necessary, our AsioHandle structure for the stream.
3197   if ( handle == 0 ) {
3198     try {
3199       handle = new AsioHandle;
3200     }
3201     catch ( std::bad_alloc& ) {
3202       errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3203       goto error;
3204     }
3205     handle->bufferInfos = 0;
3206 
3207     // Create a manual-reset event.
3208     handle->condition = CreateEvent( NULL,   // no security
3209                                      TRUE,   // manual-reset
3210                                      FALSE,  // non-signaled initially
3211                                      NULL ); // unnamed
3212     stream_.apiHandle = (void *) handle;
3213   }
3214 
3215   // Create the ASIO internal buffers.  Since RtAudio sets up input
3216   // and output separately, we'll have to dispose of previously
3217   // created output buffers for a duplex stream.
3218   if ( mode == INPUT && stream_.mode == OUTPUT ) {
3219     ASIODisposeBuffers();
3220     if ( handle->bufferInfos ) free( handle->bufferInfos );
3221   }
3222 
3223   // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3224   unsigned int i;
3225   nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3226   handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3227   if ( handle->bufferInfos == NULL ) {
3228     errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3229     errorText_ = errorStream_.str();
3230     goto error;
3231   }
3232 
3233   ASIOBufferInfo *infos;
3234   infos = handle->bufferInfos;
3235   for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3236     infos->isInput = ASIOFalse;
3237     infos->channelNum = i + stream_.channelOffset[0];
3238     infos->buffers[0] = infos->buffers[1] = 0;
3239   }
3240   for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3241     infos->isInput = ASIOTrue;
3242     infos->channelNum = i + stream_.channelOffset[1];
3243     infos->buffers[0] = infos->buffers[1] = 0;
3244   }
3245 
3246   // prepare for callbacks
3247   stream_.sampleRate = sampleRate;
3248   stream_.device[mode] = device;
3249   stream_.mode = isDuplexInput ? DUPLEX : mode;
3250 
3251   // store this class instance before registering callbacks, that are going to use it
3252   asioCallbackInfo = &stream_.callbackInfo;
3253   stream_.callbackInfo.object = (void *) this;
3254 
3255   // Set up the ASIO callback structure and create the ASIO data buffers.
3256   asioCallbacks.bufferSwitch = &bufferSwitch;
3257   asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3258   asioCallbacks.asioMessage = &asioMessages;
3259   asioCallbacks.bufferSwitchTimeInfo = NULL;
3260   result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3261   if ( result != ASE_OK ) {
3262     // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3263     // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3264     // In that case, let's be naïve and try that instead.
3265     *bufferSize = preferSize;
3266     stream_.bufferSize = *bufferSize;
3267     result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3268   }
3269 
3270   if ( result != ASE_OK ) {
3271     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3272     errorText_ = errorStream_.str();
3273     goto error;
3274   }
3275   buffersAllocated = true;
3276   stream_.state = STREAM_STOPPED;
3277 
3278   // Set flags for buffer conversion.
3279   stream_.doConvertBuffer[mode] = false;
3280   if ( stream_.userFormat != stream_.deviceFormat[mode] )
3281     stream_.doConvertBuffer[mode] = true;
3282   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3283        stream_.nUserChannels[mode] > 1 )
3284     stream_.doConvertBuffer[mode] = true;
3285 
3286   // Allocate necessary internal buffers
3287   unsigned long bufferBytes;
3288   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3289   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3290   if ( stream_.userBuffer[mode] == NULL ) {
3291     errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3292     goto error;
3293   }
3294 
3295   if ( stream_.doConvertBuffer[mode] ) {
3296 
3297     bool makeBuffer = true;
3298     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3299     if ( isDuplexInput && stream_.deviceBuffer ) {
3300       unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3301       if ( bufferBytes <= bytesOut ) makeBuffer = false;
3302     }
3303 
3304     if ( makeBuffer ) {
3305       bufferBytes *= *bufferSize;
3306       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3307       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3308       if ( stream_.deviceBuffer == NULL ) {
3309         errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3310         goto error;
3311       }
3312     }
3313   }
3314 
3315   // Determine device latencies
3316   long inputLatency, outputLatency;
3317   result = ASIOGetLatencies( &inputLatency, &outputLatency );
3318   if ( result != ASE_OK ) {
3319     errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3320     errorText_ = errorStream_.str();
3321     error( RtAudioError::WARNING); // warn but don't fail
3322   }
3323   else {
3324     stream_.latency[0] = outputLatency;
3325     stream_.latency[1] = inputLatency;
3326   }
3327 
3328   // Setup the buffer conversion information structure.  We don't use
3329   // buffers to do channel offsets, so we override that parameter
3330   // here.
3331   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3332 
3333   return SUCCESS;
3334 
3335  error:
3336   if ( !isDuplexInput ) {
3337     // the cleanup for error in the duplex input, is done by RtApi::openStream
3338     // So we clean up for single channel only
3339 
3340     if ( buffersAllocated )
3341       ASIODisposeBuffers();
3342 
3343     drivers.removeCurrentDriver();
3344 
3345     if ( handle ) {
3346       CloseHandle( handle->condition );
3347       if ( handle->bufferInfos )
3348         free( handle->bufferInfos );
3349 
3350       delete handle;
3351       stream_.apiHandle = 0;
3352     }
3353 
3354 
3355     if ( stream_.userBuffer[mode] ) {
3356       free( stream_.userBuffer[mode] );
3357       stream_.userBuffer[mode] = 0;
3358     }
3359 
3360     if ( stream_.deviceBuffer ) {
3361       free( stream_.deviceBuffer );
3362       stream_.deviceBuffer = 0;
3363     }
3364   }
3365 
3366   return FAILURE;
3367 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3368 
3369 void RtApiAsio :: closeStream()
3370 {
3371   if ( stream_.state == STREAM_CLOSED ) {
3372     errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3373     error( RtAudioError::WARNING );
3374     return;
3375   }
3376 
3377   if ( stream_.state == STREAM_RUNNING ) {
3378     stream_.state = STREAM_STOPPED;
3379     ASIOStop();
3380   }
3381   ASIODisposeBuffers();
3382   drivers.removeCurrentDriver();
3383 
3384   AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3385   if ( handle ) {
3386     CloseHandle( handle->condition );
3387     if ( handle->bufferInfos )
3388       free( handle->bufferInfos );
3389     delete handle;
3390     stream_.apiHandle = 0;
3391   }
3392 
3393   for ( int i=0; i<2; i++ ) {
3394     if ( stream_.userBuffer[i] ) {
3395       free( stream_.userBuffer[i] );
3396       stream_.userBuffer[i] = 0;
3397     }
3398   }
3399 
3400   if ( stream_.deviceBuffer ) {
3401     free( stream_.deviceBuffer );
3402     stream_.deviceBuffer = 0;
3403   }
3404 
3405   stream_.mode = UNINITIALIZED;
3406   stream_.state = STREAM_CLOSED;
3407 }
3408 
3409 bool stopThreadCalled = false;
3410 
3411 void RtApiAsio :: startStream()
3412 {
3413   verifyStream();
3414   if ( stream_.state == STREAM_RUNNING ) {
3415     errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3416     error( RtAudioError::WARNING );
3417     return;
3418   }
3419 
3420   #if defined( HAVE_GETTIMEOFDAY )
3421   gettimeofday( &stream_.lastTickTimestamp, NULL );
3422   #endif
3423 
3424   AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3425   ASIOError result = ASIOStart();
3426   if ( result != ASE_OK ) {
3427     errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3428     errorText_ = errorStream_.str();
3429     goto unlock;
3430   }
3431 
3432   handle->drainCounter = 0;
3433   handle->internalDrain = false;
3434   ResetEvent( handle->condition );
3435   stream_.state = STREAM_RUNNING;
3436   asioXRun = false;
3437 
3438  unlock:
3439   stopThreadCalled = false;
3440 
3441   if ( result == ASE_OK ) return;
3442   error( RtAudioError::SYSTEM_ERROR );
3443 }
3444 
3445 void RtApiAsio :: stopStream()
3446 {
3447   verifyStream();
3448   if ( stream_.state == STREAM_STOPPED ) {
3449     errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3450     error( RtAudioError::WARNING );
3451     return;
3452   }
3453 
3454   AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3455   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3456     if ( handle->drainCounter == 0 ) {
3457       handle->drainCounter = 2;
3458       WaitForSingleObject( handle->condition, INFINITE );  // block until signaled
3459     }
3460   }
3461 
3462   stream_.state = STREAM_STOPPED;
3463 
3464   ASIOError result = ASIOStop();
3465   if ( result != ASE_OK ) {
3466     errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3467     errorText_ = errorStream_.str();
3468   }
3469 
3470   if ( result == ASE_OK ) return;
3471   error( RtAudioError::SYSTEM_ERROR );
3472 }
3473 
3474 void RtApiAsio :: abortStream()
3475 {
3476   verifyStream();
3477   if ( stream_.state == STREAM_STOPPED ) {
3478     errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3479     error( RtAudioError::WARNING );
3480     return;
3481   }
3482 
3483   // The following lines were commented-out because some behavior was
3484   // noted where the device buffers need to be zeroed to avoid
3485   // continuing sound, even when the device buffers are completely
3486   // disposed.  So now, calling abort is the same as calling stop.
3487   // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488   // handle->drainCounter = 2;
3489   stopStream();
3490 }
3491 
3492 // This function will be called by a spawned thread when the user
3493 // callback function signals that the stream should be stopped or
3494 // aborted.  It is necessary to handle it this way because the
3495 // callbackEvent() function must return before the ASIOStop()
3496 // function will return.
3497 static unsigned __stdcall asioStopStream( void *ptr )
3498 {
3499   CallbackInfo *info = (CallbackInfo *) ptr;
3500   RtApiAsio *object = (RtApiAsio *) info->object;
3501 
3502   object->stopStream();
3503   _endthreadex( 0 );
3504   return 0;
3505 }
3506 
3507 bool RtApiAsio :: callbackEvent( long bufferIndex )
3508 {
3509   if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3510   if ( stream_.state == STREAM_CLOSED ) {
3511     errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3512     error( RtAudioError::WARNING );
3513     return FAILURE;
3514   }
3515 
3516   CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3517   AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3518 
3519   // Check if we were draining the stream and signal if finished.
3520   if ( handle->drainCounter > 3 ) {
3521 
3522     stream_.state = STREAM_STOPPING;
3523     if ( handle->internalDrain == false )
3524       SetEvent( handle->condition );
3525     else { // spawn a thread to stop the stream
3526       unsigned threadId;
3527       stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3528                                                     &stream_.callbackInfo, 0, &threadId );
3529     }
3530     return SUCCESS;
3531   }
3532 
3533   // Invoke user callback to get fresh output data UNLESS we are
3534   // draining stream.
3535   if ( handle->drainCounter == 0 ) {
3536     RtAudioCallback callback = (RtAudioCallback) info->callback;
3537     double streamTime = getStreamTime();
3538     RtAudioStreamStatus status = 0;
3539     if ( stream_.mode != INPUT && asioXRun == true ) {
3540       status |= RTAUDIO_OUTPUT_UNDERFLOW;
3541       asioXRun = false;
3542     }
3543     if ( stream_.mode != OUTPUT && asioXRun == true ) {
3544       status |= RTAUDIO_INPUT_OVERFLOW;
3545       asioXRun = false;
3546     }
3547     int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3548                                      stream_.bufferSize, streamTime, status, info->userData );
3549     if ( cbReturnValue == 2 ) {
3550       stream_.state = STREAM_STOPPING;
3551       handle->drainCounter = 2;
3552       unsigned threadId;
3553       stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3554                                                     &stream_.callbackInfo, 0, &threadId );
3555       return SUCCESS;
3556     }
3557     else if ( cbReturnValue == 1 ) {
3558       handle->drainCounter = 1;
3559       handle->internalDrain = true;
3560     }
3561   }
3562 
3563   unsigned int nChannels, bufferBytes, i, j;
3564   nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3565   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3566 
3567     bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3568 
3569     if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3570 
3571       for ( i=0, j=0; i<nChannels; i++ ) {
3572         if ( handle->bufferInfos[i].isInput != ASIOTrue )
3573           memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3574       }
3575 
3576     }
3577     else if ( stream_.doConvertBuffer[0] ) {
3578 
3579       convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3580       if ( stream_.doByteSwap[0] )
3581         byteSwapBuffer( stream_.deviceBuffer,
3582                         stream_.bufferSize * stream_.nDeviceChannels[0],
3583                         stream_.deviceFormat[0] );
3584 
3585       for ( i=0, j=0; i<nChannels; i++ ) {
3586         if ( handle->bufferInfos[i].isInput != ASIOTrue )
3587           memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3588                   &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3589       }
3590 
3591     }
3592     else {
3593 
3594       if ( stream_.doByteSwap[0] )
3595         byteSwapBuffer( stream_.userBuffer[0],
3596                         stream_.bufferSize * stream_.nUserChannels[0],
3597                         stream_.userFormat );
3598 
3599       for ( i=0, j=0; i<nChannels; i++ ) {
3600         if ( handle->bufferInfos[i].isInput != ASIOTrue )
3601           memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3602                   &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3603       }
3604 
3605     }
3606   }
3607 
3608   // Don't bother draining input
3609   if ( handle->drainCounter ) {
3610     handle->drainCounter++;
3611     goto unlock;
3612   }
3613 
3614   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3615 
3616     bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3617 
3618     if (stream_.doConvertBuffer[1]) {
3619 
3620       // Always interleave ASIO input data.
3621       for ( i=0, j=0; i<nChannels; i++ ) {
3622         if ( handle->bufferInfos[i].isInput == ASIOTrue )
3623           memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3624                   handle->bufferInfos[i].buffers[bufferIndex],
3625                   bufferBytes );
3626       }
3627 
3628       if ( stream_.doByteSwap[1] )
3629         byteSwapBuffer( stream_.deviceBuffer,
3630                         stream_.bufferSize * stream_.nDeviceChannels[1],
3631                         stream_.deviceFormat[1] );
3632       convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3633 
3634     }
3635     else {
3636       for ( i=0, j=0; i<nChannels; i++ ) {
3637         if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3638           memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3639                   handle->bufferInfos[i].buffers[bufferIndex],
3640                   bufferBytes );
3641         }
3642       }
3643 
3644       if ( stream_.doByteSwap[1] )
3645         byteSwapBuffer( stream_.userBuffer[1],
3646                         stream_.bufferSize * stream_.nUserChannels[1],
3647                         stream_.userFormat );
3648     }
3649   }
3650 
3651  unlock:
3652   // The following call was suggested by Malte Clasen.  While the API
3653   // documentation indicates it should not be required, some device
3654   // drivers apparently do not function correctly without it.
3655   ASIOOutputReady();
3656 
3657   RtApi::tickStreamTime();
3658   return SUCCESS;
3659 }
3660 
3661 static void sampleRateChanged( ASIOSampleRate sRate )
3662 {
3663   // The ASIO documentation says that this usually only happens during
3664   // external sync.  Audio processing is not stopped by the driver,
3665   // actual sample rate might not have even changed, maybe only the
3666   // sample rate status of an AES/EBU or S/PDIF digital input at the
3667   // audio device.
3668 
3669   RtApi *object = (RtApi *) asioCallbackInfo->object;
3670   try {
3671     object->stopStream();
3672   }
3673   catch ( RtAudioError &exception ) {
3674     std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3675     return;
3676   }
3677 
3678   std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3679 }
3680 
3681 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3682 {
3683   long ret = 0;
3684 
3685   switch( selector ) {
3686   case kAsioSelectorSupported:
3687     if ( value == kAsioResetRequest
3688          || value == kAsioEngineVersion
3689          || value == kAsioResyncRequest
3690          || value == kAsioLatenciesChanged
3691          // The following three were added for ASIO 2.0, you don't
3692          // necessarily have to support them.
3693          || value == kAsioSupportsTimeInfo
3694          || value == kAsioSupportsTimeCode
3695          || value == kAsioSupportsInputMonitor)
3696       ret = 1L;
3697     break;
3698   case kAsioResetRequest:
3699     // Defer the task and perform the reset of the driver during the
3700     // next "safe" situation.  You cannot reset the driver right now,
3701     // as this code is called from the driver.  Reset the driver is
3702     // done by completely destruct is. I.e. ASIOStop(),
3703     // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3704     // driver again.
3705     std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3706     ret = 1L;
3707     break;
3708   case kAsioResyncRequest:
3709     // This informs the application that the driver encountered some
3710     // non-fatal data loss.  It is used for synchronization purposes
3711     // of different media.  Added mainly to work around the Win16Mutex
3712     // problems in Windows 95/98 with the Windows Multimedia system,
3713     // which could lose data because the Mutex was held too long by
3714     // another thread.  However a driver can issue it in other
3715     // situations, too.
3716     // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3717     asioXRun = true;
3718     ret = 1L;
3719     break;
3720   case kAsioLatenciesChanged:
3721     // This will inform the host application that the drivers were
3722     // latencies changed.  Beware, it this does not mean that the
3723     // buffer sizes have changed!  You might need to update internal
3724     // delay data.
3725     std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3726     ret = 1L;
3727     break;
3728   case kAsioEngineVersion:
3729     // Return the supported ASIO version of the host application.  If
3730     // a host application does not implement this selector, ASIO 1.0
3731     // is assumed by the driver.
3732     ret = 2L;
3733     break;
3734   case kAsioSupportsTimeInfo:
3735     // Informs the driver whether the
3736     // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3737     // For compatibility with ASIO 1.0 drivers the host application
3738     // should always support the "old" bufferSwitch method, too.
3739     ret = 0;
3740     break;
3741   case kAsioSupportsTimeCode:
3742     // Informs the driver whether application is interested in time
3743     // code info.  If an application does not need to know about time
3744     // code, the driver has less work to do.
3745     ret = 0;
3746     break;
3747   }
3748   return ret;
3749 }
3750 
3751 static const char* getAsioErrorString( ASIOError result )
3752 {
3753   struct Messages
3754   {
3755     ASIOError value;
3756     const char*message;
3757   };
3758 
3759   static const Messages m[] =
3760     {
3761       {   ASE_NotPresent,    "Hardware input or output is not present or available." },
3762       {   ASE_HWMalfunction,  "Hardware is malfunctioning." },
3763       {   ASE_InvalidParameter, "Invalid input parameter." },
3764       {   ASE_InvalidMode,      "Invalid mode." },
3765       {   ASE_SPNotAdvancing,     "Sample position not advancing." },
3766       {   ASE_NoClock,            "Sample clock or rate cannot be determined or is not present." },
3767       {   ASE_NoMemory,           "Not enough memory to complete the request." }
3768     };
3769 
3770   for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3771     if ( m[i].value == result ) return m[i].message;
3772 
3773   return "Unknown error.";
3774 }
3775 
3776 //******************** End of __WINDOWS_ASIO__ *********************//
3777 #endif
3778 
3779 
3780 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3781 
3782 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3783 // - Introduces support for the Windows WASAPI API
3784 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3785 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3786 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3787 
3788 #ifndef INITGUID
3789   #define INITGUID
3790 #endif
3791 
3792 #include <mfapi.h>
3793 #include <mferror.h>
3794 #include <mfplay.h>
3795 #include <mftransform.h>
3796 #include <wmcodecdsp.h>
3797 
3798 #include <audioclient.h>
3799 #include <avrt.h>
3800 #include <mmdeviceapi.h>
3801 #include <functiondiscoverykeys_devpkey.h>
3802 
3803 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3804   #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3805 #endif
3806 
3807 #ifndef MFSTARTUP_NOSOCKET
3808   #define MFSTARTUP_NOSOCKET 0x1
3809 #endif
3810 
3811 #ifdef _MSC_VER
3812   #pragma comment( lib, "ksuser" )
3813   #pragma comment( lib, "mfplat.lib" )
3814   #pragma comment( lib, "mfuuid.lib" )
3815   #pragma comment( lib, "wmcodecdspuuid" )
3816 #endif
3817 
3818 //=============================================================================
3819 
3820 #define SAFE_RELEASE( objectPtr )\
3821 if ( objectPtr )\
3822 {\
3823   objectPtr->Release();\
3824   objectPtr = NULL;\
3825 }
3826 
3827 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3828 
3829 //-----------------------------------------------------------------------------
3830 
3831 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3832 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3833 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3834 // provide intermediate storage for read / write synchronization.
3835 class WasapiBuffer
3836 {
3837 public:
3838   WasapiBuffer()
3839     : buffer_( NULL ),
3840       bufferSize_( 0 ),
3841       inIndex_( 0 ),
3842       outIndex_( 0 ) {}
3843 
3844   ~WasapiBuffer() {
3845     free( buffer_ );
3846   }
3847 
3848   // sets the length of the internal ring buffer
3849   void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3850     free( buffer_ );
3851 
3852     buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3853 
3854     bufferSize_ = bufferSize;
3855     inIndex_ = 0;
3856     outIndex_ = 0;
3857   }
3858 
3859   // attempt to push a buffer into the ring buffer at the current "in" index
3860   bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3861   {
3862     if ( !buffer ||                 // incoming buffer is NULL
3863          bufferSize == 0 ||         // incoming buffer has no data
3864          bufferSize > bufferSize_ ) // incoming buffer too large
3865     {
3866       return false;
3867     }
3868 
3869     unsigned int relOutIndex = outIndex_;
3870     unsigned int inIndexEnd = inIndex_ + bufferSize;
3871     if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3872       relOutIndex += bufferSize_;
3873     }
3874 
3875     // the "IN" index CAN BEGIN at the "OUT" index
3876     // the "IN" index CANNOT END at the "OUT" index
3877     if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3878       return false; // not enough space between "in" index and "out" index
3879     }
3880 
3881     // copy buffer from external to internal
3882     int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3883     fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3884     int fromInSize = bufferSize - fromZeroSize;
3885 
3886     switch( format )
3887       {
3888       case RTAUDIO_SINT8:
3889         memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3890         memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3891         break;
3892       case RTAUDIO_SINT16:
3893         memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3894         memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3895         break;
3896       case RTAUDIO_SINT24:
3897         memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3898         memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3899         break;
3900       case RTAUDIO_SINT32:
3901         memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3902         memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3903         break;
3904       case RTAUDIO_FLOAT32:
3905         memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3906         memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3907         break;
3908       case RTAUDIO_FLOAT64:
3909         memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3910         memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3911         break;
3912     }
3913 
3914     // update "in" index
3915     inIndex_ += bufferSize;
3916     inIndex_ %= bufferSize_;
3917 
3918     return true;
3919   }
3920 
3921   // attempt to pull a buffer from the ring buffer from the current "out" index
3922   bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3923   {
3924     if ( !buffer ||                 // incoming buffer is NULL
3925          bufferSize == 0 ||         // incoming buffer has no data
3926          bufferSize > bufferSize_ ) // incoming buffer too large
3927     {
3928       return false;
3929     }
3930 
3931     unsigned int relInIndex = inIndex_;
3932     unsigned int outIndexEnd = outIndex_ + bufferSize;
3933     if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3934       relInIndex += bufferSize_;
3935     }
3936 
3937     // the "OUT" index CANNOT BEGIN at the "IN" index
3938     // the "OUT" index CAN END at the "IN" index
3939     if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3940       return false; // not enough space between "out" index and "in" index
3941     }
3942 
3943     // copy buffer from internal to external
3944     int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3945     fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3946     int fromOutSize = bufferSize - fromZeroSize;
3947 
3948     switch( format )
3949     {
3950       case RTAUDIO_SINT8:
3951         memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3952         memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3953         break;
3954       case RTAUDIO_SINT16:
3955         memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3956         memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3957         break;
3958       case RTAUDIO_SINT24:
3959         memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3960         memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3961         break;
3962       case RTAUDIO_SINT32:
3963         memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3964         memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3965         break;
3966       case RTAUDIO_FLOAT32:
3967         memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3968         memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3969         break;
3970       case RTAUDIO_FLOAT64:
3971         memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3972         memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3973         break;
3974     }
3975 
3976     // update "out" index
3977     outIndex_ += bufferSize;
3978     outIndex_ %= bufferSize_;
3979 
3980     return true;
3981   }
3982 
3983 private:
3984   char* buffer_;
3985   unsigned int bufferSize_;
3986   unsigned int inIndex_;
3987   unsigned int outIndex_;
3988 };
3989 
3990 //-----------------------------------------------------------------------------
3991 
3992 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3993 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3994 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3995 class WasapiResampler
3996 {
3997 public:
3998   WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3999                    unsigned int inSampleRate, unsigned int outSampleRate )
4000     : _bytesPerSample( bitsPerSample / 8 )
4001     , _channelCount( channelCount )
4002     , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4003     , _transformUnk( NULL )
4004     , _transform( NULL )
4005     , _mediaType( NULL )
4006     , _inputMediaType( NULL )
4007     , _outputMediaType( NULL )
4008 
4009     #ifdef __IWMResamplerProps_FWD_DEFINED__
4010       , _resamplerProps( NULL )
4011     #endif
4012   {
4013     // 1. Initialization
4014 
4015     MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4016 
4017     // 2. Create Resampler Transform Object
4018 
4019     CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4020                       IID_IUnknown, ( void** ) &_transformUnk );
4021 
4022     _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4023 
4024     #ifdef __IWMResamplerProps_FWD_DEFINED__
4025       _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4026       _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4027     #endif
4028 
4029     // 3. Specify input / output format
4030 
4031     MFCreateMediaType( &_mediaType );
4032     _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4033     _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4034     _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4035     _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4036     _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4037     _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4038     _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4039     _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4040 
4041     MFCreateMediaType( &_inputMediaType );
4042     _mediaType->CopyAllItems( _inputMediaType );
4043 
4044     _transform->SetInputType( 0, _inputMediaType, 0 );
4045 
4046     MFCreateMediaType( &_outputMediaType );
4047     _mediaType->CopyAllItems( _outputMediaType );
4048 
4049     _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4050     _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4051 
4052     _transform->SetOutputType( 0, _outputMediaType, 0 );
4053 
4054     // 4. Send stream start messages to Resampler
4055 
4056     _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4057     _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4058     _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4059   }
4060 
4061   ~WasapiResampler()
4062   {
4063     // 8. Send stream stop messages to Resampler
4064 
4065     _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4066     _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4067 
4068     // 9. Cleanup
4069 
4070     MFShutdown();
4071 
4072     SAFE_RELEASE( _transformUnk );
4073     SAFE_RELEASE( _transform );
4074     SAFE_RELEASE( _mediaType );
4075     SAFE_RELEASE( _inputMediaType );
4076     SAFE_RELEASE( _outputMediaType );
4077 
4078     #ifdef __IWMResamplerProps_FWD_DEFINED__
4079       SAFE_RELEASE( _resamplerProps );
4080     #endif
4081   }
4082 
4083   void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4084   {
4085     unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4086     if ( _sampleRatio == 1 )
4087     {
4088       // no sample rate conversion required
4089       memcpy( outBuffer, inBuffer, inputBufferSize );
4090       outSampleCount = inSampleCount;
4091       return;
4092     }
4093 
4094     unsigned int outputBufferSize = 0;
4095     if ( maxOutSampleCount != -1 )
4096     {
4097       outputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4098     }
4099     else
4100     {
4101       outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4102     }
4103 
4104     IMFMediaBuffer* rInBuffer;
4105     IMFSample* rInSample;
4106     BYTE* rInByteBuffer = NULL;
4107 
4108     // 5. Create Sample object from input data
4109 
4110     MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4111 
4112     rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4113     memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4114     rInBuffer->Unlock();
4115     rInByteBuffer = NULL;
4116 
4117     rInBuffer->SetCurrentLength( inputBufferSize );
4118 
4119     MFCreateSample( &rInSample );
4120     rInSample->AddBuffer( rInBuffer );
4121 
4122     // 6. Pass input data to Resampler
4123 
4124     _transform->ProcessInput( 0, rInSample, 0 );
4125 
4126     SAFE_RELEASE( rInBuffer );
4127     SAFE_RELEASE( rInSample );
4128 
4129     // 7. Perform sample rate conversion
4130 
4131     IMFMediaBuffer* rOutBuffer = NULL;
4132     BYTE* rOutByteBuffer = NULL;
4133 
4134     MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4135     DWORD rStatus;
4136     DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4137 
4138     // 7.1 Create Sample object for output data
4139 
4140     memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4141     MFCreateSample( &( rOutDataBuffer.pSample ) );
4142     MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4143     rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4144     rOutDataBuffer.dwStreamID = 0;
4145     rOutDataBuffer.dwStatus = 0;
4146     rOutDataBuffer.pEvents = NULL;
4147 
4148     // 7.2 Get output data from Resampler
4149 
4150     if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4151     {
4152       outSampleCount = 0;
4153       SAFE_RELEASE( rOutBuffer );
4154       SAFE_RELEASE( rOutDataBuffer.pSample );
4155       return;
4156     }
4157 
4158     // 7.3 Write output data to outBuffer
4159 
4160     SAFE_RELEASE( rOutBuffer );
4161     rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4162     rOutBuffer->GetCurrentLength( &rBytes );
4163 
4164     rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4165     memcpy( outBuffer, rOutByteBuffer, rBytes );
4166     rOutBuffer->Unlock();
4167     rOutByteBuffer = NULL;
4168 
4169     outSampleCount = rBytes / _bytesPerSample / _channelCount;
4170     SAFE_RELEASE( rOutBuffer );
4171     SAFE_RELEASE( rOutDataBuffer.pSample );
4172   }
4173 
4174 private:
4175   unsigned int _bytesPerSample;
4176   unsigned int _channelCount;
4177   float _sampleRatio;
4178 
4179   IUnknown* _transformUnk;
4180   IMFTransform* _transform;
4181   IMFMediaType* _mediaType;
4182   IMFMediaType* _inputMediaType;
4183   IMFMediaType* _outputMediaType;
4184 
4185   #ifdef __IWMResamplerProps_FWD_DEFINED__
4186     IWMResamplerProps* _resamplerProps;
4187   #endif
4188 };
4189 
4190 //-----------------------------------------------------------------------------
4191 
4192 // A structure to hold various information related to the WASAPI implementation.
4193 struct WasapiHandle
4194 {
4195   IAudioClient* captureAudioClient;
4196   IAudioClient* renderAudioClient;
4197   IAudioCaptureClient* captureClient;
4198   IAudioRenderClient* renderClient;
4199   HANDLE captureEvent;
4200   HANDLE renderEvent;
4201 
4202   WasapiHandle()
4203   : captureAudioClient( NULL ),
4204     renderAudioClient( NULL ),
4205     captureClient( NULL ),
4206     renderClient( NULL ),
4207     captureEvent( NULL ),
4208     renderEvent( NULL ) {}
4209 };
4210 
4211 //=============================================================================
4212 
4213 RtApiWasapi::RtApiWasapi()
4214   : coInitialized_( false ), deviceEnumerator_( NULL )
4215 {
4216   // WASAPI can run either apartment or multi-threaded
4217   HRESULT hr = CoInitialize( NULL );
4218   if ( !FAILED( hr ) )
4219     coInitialized_ = true;
4220 
4221   // Instantiate device enumerator
4222   hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4223                          CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4224                          ( void** ) &deviceEnumerator_ );
4225 
4226   // If this runs on an old Windows, it will fail. Ignore and proceed.
4227   if ( FAILED( hr ) )
4228     deviceEnumerator_ = NULL;
4229 }
4230 
4231 //-----------------------------------------------------------------------------
4232 
4233 RtApiWasapi::~RtApiWasapi()
4234 {
4235   if ( stream_.state != STREAM_CLOSED )
4236     closeStream();
4237 
4238   SAFE_RELEASE( deviceEnumerator_ );
4239 
4240   // If this object previously called CoInitialize()
4241   if ( coInitialized_ )
4242     CoUninitialize();
4243 }
4244 
4245 //=============================================================================
4246 
4247 unsigned int RtApiWasapi::getDeviceCount( void )
4248 {
4249   unsigned int captureDeviceCount = 0;
4250   unsigned int renderDeviceCount = 0;
4251 
4252   IMMDeviceCollection* captureDevices = NULL;
4253   IMMDeviceCollection* renderDevices = NULL;
4254 
4255   if ( !deviceEnumerator_ )
4256     return 0;
4257 
4258   // Count capture devices
4259   errorText_.clear();
4260   HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4261   if ( FAILED( hr ) ) {
4262     errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4263     goto Exit;
4264   }
4265 
4266   hr = captureDevices->GetCount( &captureDeviceCount );
4267   if ( FAILED( hr ) ) {
4268     errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4269     goto Exit;
4270   }
4271 
4272   // Count render devices
4273   hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4274   if ( FAILED( hr ) ) {
4275     errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4276     goto Exit;
4277   }
4278 
4279   hr = renderDevices->GetCount( &renderDeviceCount );
4280   if ( FAILED( hr ) ) {
4281     errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4282     goto Exit;
4283   }
4284 
4285 Exit:
4286   // release all references
4287   SAFE_RELEASE( captureDevices );
4288   SAFE_RELEASE( renderDevices );
4289 
4290   if ( errorText_.empty() )
4291     return captureDeviceCount + renderDeviceCount;
4292 
4293   error( RtAudioError::DRIVER_ERROR );
4294   return 0;
4295 }
4296 
4297 //-----------------------------------------------------------------------------
4298 
4299 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4300 {
4301   RtAudio::DeviceInfo info;
4302   unsigned int captureDeviceCount = 0;
4303   unsigned int renderDeviceCount = 0;
4304   std::string defaultDeviceName;
4305   bool isCaptureDevice = false;
4306 
4307   PROPVARIANT deviceNameProp;
4308   PROPVARIANT defaultDeviceNameProp;
4309 
4310   IMMDeviceCollection* captureDevices = NULL;
4311   IMMDeviceCollection* renderDevices = NULL;
4312   IMMDevice* devicePtr = NULL;
4313   IMMDevice* defaultDevicePtr = NULL;
4314   IAudioClient* audioClient = NULL;
4315   IPropertyStore* devicePropStore = NULL;
4316   IPropertyStore* defaultDevicePropStore = NULL;
4317 
4318   WAVEFORMATEX* deviceFormat = NULL;
4319   WAVEFORMATEX* closestMatchFormat = NULL;
4320 
4321   // probed
4322   info.probed = false;
4323 
4324   // Count capture devices
4325   errorText_.clear();
4326   RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4327   HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4328   if ( FAILED( hr ) ) {
4329     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4330     goto Exit;
4331   }
4332 
4333   hr = captureDevices->GetCount( &captureDeviceCount );
4334   if ( FAILED( hr ) ) {
4335     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4336     goto Exit;
4337   }
4338 
4339   // Count render devices
4340   hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4341   if ( FAILED( hr ) ) {
4342     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4343     goto Exit;
4344   }
4345 
4346   hr = renderDevices->GetCount( &renderDeviceCount );
4347   if ( FAILED( hr ) ) {
4348     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4349     goto Exit;
4350   }
4351 
4352   // validate device index
4353   if ( device >= captureDeviceCount + renderDeviceCount ) {
4354     errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4355     errorType = RtAudioError::INVALID_USE;
4356     goto Exit;
4357   }
4358 
4359   // determine whether index falls within capture or render devices
4360   if ( device >= renderDeviceCount ) {
4361     hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4362     if ( FAILED( hr ) ) {
4363       errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4364       goto Exit;
4365     }
4366     isCaptureDevice = true;
4367   }
4368   else {
4369     hr = renderDevices->Item( device, &devicePtr );
4370     if ( FAILED( hr ) ) {
4371       errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4372       goto Exit;
4373     }
4374     isCaptureDevice = false;
4375   }
4376 
4377   // get default device name
4378   if ( isCaptureDevice ) {
4379     hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4380     if ( FAILED( hr ) ) {
4381       errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4382       goto Exit;
4383     }
4384   }
4385   else {
4386     hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4387     if ( FAILED( hr ) ) {
4388       errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4389       goto Exit;
4390     }
4391   }
4392 
4393   hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4394   if ( FAILED( hr ) ) {
4395     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4396     goto Exit;
4397   }
4398   PropVariantInit( &defaultDeviceNameProp );
4399 
4400   hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4401   if ( FAILED( hr ) ) {
4402     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4403     goto Exit;
4404   }
4405 
4406   defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4407 
4408   // name
4409   hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4410   if ( FAILED( hr ) ) {
4411     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4412     goto Exit;
4413   }
4414 
4415   PropVariantInit( &deviceNameProp );
4416 
4417   hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4418   if ( FAILED( hr ) ) {
4419     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4420     goto Exit;
4421   }
4422 
4423   info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4424 
4425   // is default
4426   if ( isCaptureDevice ) {
4427     info.isDefaultInput = info.name == defaultDeviceName;
4428     info.isDefaultOutput = false;
4429   }
4430   else {
4431     info.isDefaultInput = false;
4432     info.isDefaultOutput = info.name == defaultDeviceName;
4433   }
4434 
4435   // channel count
4436   hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4437   if ( FAILED( hr ) ) {
4438     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4439     goto Exit;
4440   }
4441 
4442   hr = audioClient->GetMixFormat( &deviceFormat );
4443   if ( FAILED( hr ) ) {
4444     errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4445     goto Exit;
4446   }
4447 
4448   if ( isCaptureDevice ) {
4449     info.inputChannels = deviceFormat->nChannels;
4450     info.outputChannels = 0;
4451     info.duplexChannels = 0;
4452   }
4453   else {
4454     info.inputChannels = 0;
4455     info.outputChannels = deviceFormat->nChannels;
4456     info.duplexChannels = 0;
4457   }
4458 
4459   // sample rates
4460   info.sampleRates.clear();
4461 
4462   // allow support for all sample rates as we have a built-in sample rate converter
4463   for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4464     info.sampleRates.push_back( SAMPLE_RATES[i] );
4465   }
4466   info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4467 
4468   // native format
4469   info.nativeFormats = 0;
4470 
4471   if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4472        ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4473          ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4474   {
4475     if ( deviceFormat->wBitsPerSample == 32 ) {
4476       info.nativeFormats |= RTAUDIO_FLOAT32;
4477     }
4478     else if ( deviceFormat->wBitsPerSample == 64 ) {
4479       info.nativeFormats |= RTAUDIO_FLOAT64;
4480     }
4481   }
4482   else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4483            ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4484              ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4485   {
4486     if ( deviceFormat->wBitsPerSample == 8 ) {
4487       info.nativeFormats |= RTAUDIO_SINT8;
4488     }
4489     else if ( deviceFormat->wBitsPerSample == 16 ) {
4490       info.nativeFormats |= RTAUDIO_SINT16;
4491     }
4492     else if ( deviceFormat->wBitsPerSample == 24 ) {
4493       info.nativeFormats |= RTAUDIO_SINT24;
4494     }
4495     else if ( deviceFormat->wBitsPerSample == 32 ) {
4496       info.nativeFormats |= RTAUDIO_SINT32;
4497     }
4498   }
4499 
4500   // probed
4501   info.probed = true;
4502 
4503 Exit:
4504   // release all references
4505   PropVariantClear( &deviceNameProp );
4506   PropVariantClear( &defaultDeviceNameProp );
4507 
4508   SAFE_RELEASE( captureDevices );
4509   SAFE_RELEASE( renderDevices );
4510   SAFE_RELEASE( devicePtr );
4511   SAFE_RELEASE( defaultDevicePtr );
4512   SAFE_RELEASE( audioClient );
4513   SAFE_RELEASE( devicePropStore );
4514   SAFE_RELEASE( defaultDevicePropStore );
4515 
4516   CoTaskMemFree( deviceFormat );
4517   CoTaskMemFree( closestMatchFormat );
4518 
4519   if ( !errorText_.empty() )
4520     error( errorType );
4521   return info;
4522 }
4523 
4524 //-----------------------------------------------------------------------------
4525 
4526 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4527 {
4528   for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4529     if ( getDeviceInfo( i ).isDefaultOutput ) {
4530       return i;
4531     }
4532   }
4533 
4534   return 0;
4535 }
4536 
4537 //-----------------------------------------------------------------------------
4538 
4539 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4540 {
4541   for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4542     if ( getDeviceInfo( i ).isDefaultInput ) {
4543       return i;
4544     }
4545   }
4546 
4547   return 0;
4548 }
4549 
4550 //-----------------------------------------------------------------------------
4551 
4552 void RtApiWasapi::closeStream( void )
4553 {
4554   if ( stream_.state == STREAM_CLOSED ) {
4555     errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4556     error( RtAudioError::WARNING );
4557     return;
4558   }
4559 
4560   if ( stream_.state != STREAM_STOPPED )
4561     stopStream();
4562 
4563   // clean up stream memory
4564   SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4565   SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4566 
4567   SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4568   SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4569 
4570   if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4571     CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4572 
4573   if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4574     CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4575 
4576   delete ( WasapiHandle* ) stream_.apiHandle;
4577   stream_.apiHandle = NULL;
4578 
4579   for ( int i = 0; i < 2; i++ ) {
4580     if ( stream_.userBuffer[i] ) {
4581       free( stream_.userBuffer[i] );
4582       stream_.userBuffer[i] = 0;
4583     }
4584   }
4585 
4586   if ( stream_.deviceBuffer ) {
4587     free( stream_.deviceBuffer );
4588     stream_.deviceBuffer = 0;
4589   }
4590 
4591   // update stream state
4592   stream_.state = STREAM_CLOSED;
4593 }
4594 
4595 //-----------------------------------------------------------------------------
4596 
4597 void RtApiWasapi::startStream( void )
4598 {
4599   verifyStream();
4600 
4601   if ( stream_.state == STREAM_RUNNING ) {
4602     errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4603     error( RtAudioError::WARNING );
4604     return;
4605   }
4606 
4607   #if defined( HAVE_GETTIMEOFDAY )
4608   gettimeofday( &stream_.lastTickTimestamp, NULL );
4609   #endif
4610 
4611   // update stream state
4612   stream_.state = STREAM_RUNNING;
4613 
4614   // create WASAPI stream thread
4615   stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4616 
4617   if ( !stream_.callbackInfo.thread ) {
4618     errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4619     error( RtAudioError::THREAD_ERROR );
4620   }
4621   else {
4622     SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4623     ResumeThread( ( void* ) stream_.callbackInfo.thread );
4624   }
4625 }
4626 
4627 //-----------------------------------------------------------------------------
4628 
4629 void RtApiWasapi::stopStream( void )
4630 {
4631   verifyStream();
4632 
4633   if ( stream_.state == STREAM_STOPPED ) {
4634     errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4635     error( RtAudioError::WARNING );
4636     return;
4637   }
4638 
4639   // inform stream thread by setting stream state to STREAM_STOPPING
4640   stream_.state = STREAM_STOPPING;
4641 
4642   // wait until stream thread is stopped
4643   while( stream_.state != STREAM_STOPPED ) {
4644     Sleep( 1 );
4645   }
4646 
4647   // Wait for the last buffer to play before stopping.
4648   Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4649 
4650   // close thread handle
4651   if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4652     errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4653     error( RtAudioError::THREAD_ERROR );
4654     return;
4655   }
4656 
4657   stream_.callbackInfo.thread = (ThreadHandle) NULL;
4658 }
4659 
4660 //-----------------------------------------------------------------------------
4661 
4662 void RtApiWasapi::abortStream( void )
4663 {
4664   verifyStream();
4665 
4666   if ( stream_.state == STREAM_STOPPED ) {
4667     errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4668     error( RtAudioError::WARNING );
4669     return;
4670   }
4671 
4672   // inform stream thread by setting stream state to STREAM_STOPPING
4673   stream_.state = STREAM_STOPPING;
4674 
4675   // wait until stream thread is stopped
4676   while ( stream_.state != STREAM_STOPPED ) {
4677     Sleep( 1 );
4678   }
4679 
4680   // close thread handle
4681   if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4682     errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4683     error( RtAudioError::THREAD_ERROR );
4684     return;
4685   }
4686 
4687   stream_.callbackInfo.thread = (ThreadHandle) NULL;
4688 }
4689 
4690 //-----------------------------------------------------------------------------
4691 
4692 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4693                                    unsigned int firstChannel, unsigned int sampleRate,
4694                                    RtAudioFormat format, unsigned int* bufferSize,
4695                                    RtAudio::StreamOptions* options )
4696 {
4697   bool methodResult = FAILURE;
4698   unsigned int captureDeviceCount = 0;
4699   unsigned int renderDeviceCount = 0;
4700 
4701   IMMDeviceCollection* captureDevices = NULL;
4702   IMMDeviceCollection* renderDevices = NULL;
4703   IMMDevice* devicePtr = NULL;
4704   WAVEFORMATEX* deviceFormat = NULL;
4705   unsigned int bufferBytes;
4706   stream_.state = STREAM_STOPPED;
4707 
4708   // create API Handle if not already created
4709   if ( !stream_.apiHandle )
4710     stream_.apiHandle = ( void* ) new WasapiHandle();
4711 
4712   // Count capture devices
4713   errorText_.clear();
4714   RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4715   HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4716   if ( FAILED( hr ) ) {
4717     errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4718     goto Exit;
4719   }
4720 
4721   hr = captureDevices->GetCount( &captureDeviceCount );
4722   if ( FAILED( hr ) ) {
4723     errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4724     goto Exit;
4725   }
4726 
4727   // Count render devices
4728   hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4729   if ( FAILED( hr ) ) {
4730     errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4731     goto Exit;
4732   }
4733 
4734   hr = renderDevices->GetCount( &renderDeviceCount );
4735   if ( FAILED( hr ) ) {
4736     errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4737     goto Exit;
4738   }
4739 
4740   // validate device index
4741   if ( device >= captureDeviceCount + renderDeviceCount ) {
4742     errorType = RtAudioError::INVALID_USE;
4743     errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4744     goto Exit;
4745   }
4746 
4747   // if device index falls within capture devices
4748   if ( device >= renderDeviceCount ) {
4749     if ( mode != INPUT ) {
4750       errorType = RtAudioError::INVALID_USE;
4751       errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4752       goto Exit;
4753     }
4754 
4755     // retrieve captureAudioClient from devicePtr
4756     IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4757 
4758     hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4759     if ( FAILED( hr ) ) {
4760       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4761       goto Exit;
4762     }
4763 
4764     hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4765                               NULL, ( void** ) &captureAudioClient );
4766     if ( FAILED( hr ) ) {
4767       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4768       goto Exit;
4769     }
4770 
4771     hr = captureAudioClient->GetMixFormat( &deviceFormat );
4772     if ( FAILED( hr ) ) {
4773       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4774       goto Exit;
4775     }
4776 
4777     stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4778     captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4779   }
4780 
4781   // if device index falls within render devices and is configured for loopback
4782   if ( device < renderDeviceCount && mode == INPUT )
4783   {
4784     // if renderAudioClient is not initialised, initialise it now
4785     IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786     if ( !renderAudioClient )
4787     {
4788       probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4789     }
4790 
4791     // retrieve captureAudioClient from devicePtr
4792     IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4793 
4794     hr = renderDevices->Item( device, &devicePtr );
4795     if ( FAILED( hr ) ) {
4796       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4797       goto Exit;
4798     }
4799 
4800     hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4801                               NULL, ( void** ) &captureAudioClient );
4802     if ( FAILED( hr ) ) {
4803       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4804       goto Exit;
4805     }
4806 
4807     hr = captureAudioClient->GetMixFormat( &deviceFormat );
4808     if ( FAILED( hr ) ) {
4809       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4810       goto Exit;
4811     }
4812 
4813     stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4814     captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4815   }
4816 
4817   // if device index falls within render devices and is configured for output
4818   if ( device < renderDeviceCount && mode == OUTPUT )
4819   {
4820     // if renderAudioClient is already initialised, don't initialise it again
4821     IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4822     if ( renderAudioClient )
4823     {
4824       methodResult = SUCCESS;
4825       goto Exit;
4826     }
4827 
4828     hr = renderDevices->Item( device, &devicePtr );
4829     if ( FAILED( hr ) ) {
4830       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4831       goto Exit;
4832     }
4833 
4834     hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4835                               NULL, ( void** ) &renderAudioClient );
4836     if ( FAILED( hr ) ) {
4837       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4838       goto Exit;
4839     }
4840 
4841     hr = renderAudioClient->GetMixFormat( &deviceFormat );
4842     if ( FAILED( hr ) ) {
4843       errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4844       goto Exit;
4845     }
4846 
4847     stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4848     renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4849   }
4850 
4851   // fill stream data
4852   if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4853        ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4854     stream_.mode = DUPLEX;
4855   }
4856   else {
4857     stream_.mode = mode;
4858   }
4859 
4860   stream_.device[mode] = device;
4861   stream_.doByteSwap[mode] = false;
4862   stream_.sampleRate = sampleRate;
4863   stream_.bufferSize = *bufferSize;
4864   stream_.nBuffers = 1;
4865   stream_.nUserChannels[mode] = channels;
4866   stream_.channelOffset[mode] = firstChannel;
4867   stream_.userFormat = format;
4868   stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4869 
4870   if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4871     stream_.userInterleaved = false;
4872   else
4873     stream_.userInterleaved = true;
4874   stream_.deviceInterleaved[mode] = true;
4875 
4876   // Set flags for buffer conversion.
4877   stream_.doConvertBuffer[mode] = false;
4878   if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4879        stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4880        stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4881     stream_.doConvertBuffer[mode] = true;
4882   else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4883             stream_.nUserChannels[mode] > 1 )
4884     stream_.doConvertBuffer[mode] = true;
4885 
4886   if ( stream_.doConvertBuffer[mode] )
4887     setConvertInfo( mode, firstChannel );
4888 
4889   // Allocate necessary internal buffers
4890   bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4891 
4892   stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4893   if ( !stream_.userBuffer[mode] ) {
4894     errorType = RtAudioError::MEMORY_ERROR;
4895     errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4896     goto Exit;
4897   }
4898 
4899   if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4900     stream_.callbackInfo.priority = 15;
4901   else
4902     stream_.callbackInfo.priority = 0;
4903 
4904   ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4905   ///! TODO: RTAUDIO_HOG_DEVICE       // Exclusive mode
4906 
4907   methodResult = SUCCESS;
4908 
4909 Exit:
4910   //clean up
4911   SAFE_RELEASE( captureDevices );
4912   SAFE_RELEASE( renderDevices );
4913   SAFE_RELEASE( devicePtr );
4914   CoTaskMemFree( deviceFormat );
4915 
4916   // if method failed, close the stream
4917   if ( methodResult == FAILURE )
4918     closeStream();
4919 
4920   if ( !errorText_.empty() )
4921     error( errorType );
4922   return methodResult;
4923 }
4924 
4925 //=============================================================================
4926 
4927 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4928 {
4929   if ( wasapiPtr )
4930     ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4931 
4932   return 0;
4933 }
4934 
4935 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4936 {
4937   if ( wasapiPtr )
4938     ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4939 
4940   return 0;
4941 }
4942 
4943 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4944 {
4945   if ( wasapiPtr )
4946     ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4947 
4948   return 0;
4949 }
4950 
4951 //-----------------------------------------------------------------------------
4952 
4953 void RtApiWasapi::wasapiThread()
4954 {
4955   // as this is a new thread, we must CoInitialize it
4956   CoInitialize( NULL );
4957 
4958   HRESULT hr;
4959 
4960   IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4961   IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4962   IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4963   IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4964   HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4965   HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4966 
4967   WAVEFORMATEX* captureFormat = NULL;
4968   WAVEFORMATEX* renderFormat = NULL;
4969   float captureSrRatio = 0.0f;
4970   float renderSrRatio = 0.0f;
4971   WasapiBuffer captureBuffer;
4972   WasapiBuffer renderBuffer;
4973   WasapiResampler* captureResampler = NULL;
4974   WasapiResampler* renderResampler = NULL;
4975 
4976   // declare local stream variables
4977   RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4978   BYTE* streamBuffer = NULL;
4979   DWORD captureFlags = 0;
4980   unsigned int bufferFrameCount = 0;
4981   unsigned int numFramesPadding = 0;
4982   unsigned int convBufferSize = 0;
4983   bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4984   bool callbackPushed = true;
4985   bool callbackPulled = false;
4986   bool callbackStopped = false;
4987   int callbackResult = 0;
4988 
4989   // convBuffer is used to store converted buffers between WASAPI and the user
4990   char* convBuffer = NULL;
4991   unsigned int convBuffSize = 0;
4992   unsigned int deviceBuffSize = 0;
4993 
4994   std::string errorText;
4995   RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4996 
4997   // Attempt to assign "Pro Audio" characteristic to thread
4998   HMODULE AvrtDll = LoadLibraryW( L"AVRT.dll" );
4999   if ( AvrtDll ) {
5000     DWORD taskIndex = 0;
5001     TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5002       ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5003     AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5004     FreeLibrary( AvrtDll );
5005   }
5006 
5007   // start capture stream if applicable
5008   if ( captureAudioClient ) {
5009     hr = captureAudioClient->GetMixFormat( &captureFormat );
5010     if ( FAILED( hr ) ) {
5011       errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5012       goto Exit;
5013     }
5014 
5015     // init captureResampler
5016     captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5017                                             formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5018                                             captureFormat->nSamplesPerSec, stream_.sampleRate );
5019 
5020     captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5021 
5022     if ( !captureClient ) {
5023       hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5024                                            loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5025                                            0,
5026                                            0,
5027                                            captureFormat,
5028                                            NULL );
5029       if ( FAILED( hr ) ) {
5030         errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5031         goto Exit;
5032       }
5033 
5034       hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5035                                            ( void** ) &captureClient );
5036       if ( FAILED( hr ) ) {
5037         errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5038         goto Exit;
5039       }
5040 
5041       // don't configure captureEvent if in loopback mode
5042       if ( !loopbackEnabled )
5043       {
5044         // configure captureEvent to trigger on every available capture buffer
5045         captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5046         if ( !captureEvent ) {
5047           errorType = RtAudioError::SYSTEM_ERROR;
5048           errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5049           goto Exit;
5050         }
5051 
5052         hr = captureAudioClient->SetEventHandle( captureEvent );
5053         if ( FAILED( hr ) ) {
5054           errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5055           goto Exit;
5056         }
5057 
5058         ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5059       }
5060 
5061       ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5062 
5063       // reset the capture stream
5064       hr = captureAudioClient->Reset();
5065       if ( FAILED( hr ) ) {
5066         errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5067         goto Exit;
5068       }
5069 
5070       // start the capture stream
5071       hr = captureAudioClient->Start();
5072       if ( FAILED( hr ) ) {
5073         errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5074         goto Exit;
5075       }
5076     }
5077 
5078     unsigned int inBufferSize = 0;
5079     hr = captureAudioClient->GetBufferSize( &inBufferSize );
5080     if ( FAILED( hr ) ) {
5081       errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5082       goto Exit;
5083     }
5084 
5085     // scale outBufferSize according to stream->user sample rate ratio
5086     unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5087     inBufferSize *= stream_.nDeviceChannels[INPUT];
5088 
5089     // set captureBuffer size
5090     captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5091   }
5092 
5093   // start render stream if applicable
5094   if ( renderAudioClient ) {
5095     hr = renderAudioClient->GetMixFormat( &renderFormat );
5096     if ( FAILED( hr ) ) {
5097       errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5098       goto Exit;
5099     }
5100 
5101     // init renderResampler
5102     renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5103                                            formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5104                                            stream_.sampleRate, renderFormat->nSamplesPerSec );
5105 
5106     renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5107 
5108     if ( !renderClient ) {
5109       hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5110                                           AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5111                                           0,
5112                                           0,
5113                                           renderFormat,
5114                                           NULL );
5115       if ( FAILED( hr ) ) {
5116         errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5117         goto Exit;
5118       }
5119 
5120       hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5121                                           ( void** ) &renderClient );
5122       if ( FAILED( hr ) ) {
5123         errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5124         goto Exit;
5125       }
5126 
5127       // configure renderEvent to trigger on every available render buffer
5128       renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5129       if ( !renderEvent ) {
5130         errorType = RtAudioError::SYSTEM_ERROR;
5131         errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5132         goto Exit;
5133       }
5134 
5135       hr = renderAudioClient->SetEventHandle( renderEvent );
5136       if ( FAILED( hr ) ) {
5137         errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5138         goto Exit;
5139       }
5140 
5141       ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5142       ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5143 
5144       // reset the render stream
5145       hr = renderAudioClient->Reset();
5146       if ( FAILED( hr ) ) {
5147         errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5148         goto Exit;
5149       }
5150 
5151       // start the render stream
5152       hr = renderAudioClient->Start();
5153       if ( FAILED( hr ) ) {
5154         errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5155         goto Exit;
5156       }
5157     }
5158 
5159     unsigned int outBufferSize = 0;
5160     hr = renderAudioClient->GetBufferSize( &outBufferSize );
5161     if ( FAILED( hr ) ) {
5162       errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5163       goto Exit;
5164     }
5165 
5166     // scale inBufferSize according to user->stream sample rate ratio
5167     unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5168     outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5169 
5170     // set renderBuffer size
5171     renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5172   }
5173 
5174   // malloc buffer memory
5175   if ( stream_.mode == INPUT )
5176   {
5177     using namespace std; // for ceilf
5178     convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5179     deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5180   }
5181   else if ( stream_.mode == OUTPUT )
5182   {
5183     convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5184     deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5185   }
5186   else if ( stream_.mode == DUPLEX )
5187   {
5188     convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5189                              ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5190     deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5191                                stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5192   }
5193 
5194   convBuffSize *= 2; // allow overflow for *SrRatio remainders
5195   convBuffer = ( char* ) calloc( convBuffSize, 1 );
5196   stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5197   if ( !convBuffer || !stream_.deviceBuffer ) {
5198     errorType = RtAudioError::MEMORY_ERROR;
5199     errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5200     goto Exit;
5201   }
5202 
5203   // stream process loop
5204   while ( stream_.state != STREAM_STOPPING ) {
5205     if ( !callbackPulled ) {
5206       // Callback Input
5207       // ==============
5208       // 1. Pull callback buffer from inputBuffer
5209       // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5210       //                          Convert callback buffer to user format
5211 
5212       if ( captureAudioClient )
5213       {
5214         int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5215 
5216         convBufferSize = 0;
5217         while ( convBufferSize < stream_.bufferSize )
5218         {
5219           // Pull callback buffer from inputBuffer
5220           callbackPulled = captureBuffer.pullBuffer( convBuffer,
5221                                                      samplesToPull * stream_.nDeviceChannels[INPUT],
5222                                                      stream_.deviceFormat[INPUT] );
5223 
5224           if ( !callbackPulled )
5225           {
5226             break;
5227           }
5228 
5229           // Convert callback buffer to user sample rate
5230           unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5231           unsigned int convSamples = 0;
5232 
5233           captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5234                                      convBuffer,
5235                                      samplesToPull,
5236                                      convSamples,
5237                                      convBufferSize == 0 ? -1 : stream_.bufferSize - convBufferSize );
5238 
5239           convBufferSize += convSamples;
5240           samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5241         }
5242 
5243         if ( callbackPulled )
5244         {
5245           if ( stream_.doConvertBuffer[INPUT] ) {
5246             // Convert callback buffer to user format
5247             convertBuffer( stream_.userBuffer[INPUT],
5248                            stream_.deviceBuffer,
5249                            stream_.convertInfo[INPUT] );
5250           }
5251           else {
5252             // no further conversion, simple copy deviceBuffer to userBuffer
5253             memcpy( stream_.userBuffer[INPUT],
5254                     stream_.deviceBuffer,
5255                     stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5256           }
5257         }
5258       }
5259       else {
5260         // if there is no capture stream, set callbackPulled flag
5261         callbackPulled = true;
5262       }
5263 
5264       // Execute Callback
5265       // ================
5266       // 1. Execute user callback method
5267       // 2. Handle return value from callback
5268 
5269       // if callback has not requested the stream to stop
5270       if ( callbackPulled && !callbackStopped ) {
5271         // Execute user callback method
5272         callbackResult = callback( stream_.userBuffer[OUTPUT],
5273                                    stream_.userBuffer[INPUT],
5274                                    stream_.bufferSize,
5275                                    getStreamTime(),
5276                                    captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5277                                    stream_.callbackInfo.userData );
5278 
5279         // tick stream time
5280         RtApi::tickStreamTime();
5281 
5282         // Handle return value from callback
5283         if ( callbackResult == 1 ) {
5284           // instantiate a thread to stop this thread
5285           HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5286           if ( !threadHandle ) {
5287             errorType = RtAudioError::THREAD_ERROR;
5288             errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5289             goto Exit;
5290           }
5291           else if ( !CloseHandle( threadHandle ) ) {
5292             errorType = RtAudioError::THREAD_ERROR;
5293             errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5294             goto Exit;
5295           }
5296 
5297           callbackStopped = true;
5298         }
5299         else if ( callbackResult == 2 ) {
5300           // instantiate a thread to stop this thread
5301           HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5302           if ( !threadHandle ) {
5303             errorType = RtAudioError::THREAD_ERROR;
5304             errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5305             goto Exit;
5306           }
5307           else if ( !CloseHandle( threadHandle ) ) {
5308             errorType = RtAudioError::THREAD_ERROR;
5309             errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5310             goto Exit;
5311           }
5312 
5313           callbackStopped = true;
5314         }
5315       }
5316     }
5317 
5318     // Callback Output
5319     // ===============
5320     // 1. Convert callback buffer to stream format
5321     // 2. Convert callback buffer to stream sample rate and channel count
5322     // 3. Push callback buffer into outputBuffer
5323 
5324     if ( renderAudioClient && callbackPulled )
5325     {
5326       // if the last call to renderBuffer.PushBuffer() was successful
5327       if ( callbackPushed || convBufferSize == 0 )
5328       {
5329         if ( stream_.doConvertBuffer[OUTPUT] )
5330         {
5331           // Convert callback buffer to stream format
5332           convertBuffer( stream_.deviceBuffer,
5333                          stream_.userBuffer[OUTPUT],
5334                          stream_.convertInfo[OUTPUT] );
5335 
5336         }
5337         else {
5338           // no further conversion, simple copy userBuffer to deviceBuffer
5339           memcpy( stream_.deviceBuffer,
5340                   stream_.userBuffer[OUTPUT],
5341                   stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5342         }
5343 
5344         // Convert callback buffer to stream sample rate
5345         renderResampler->Convert( convBuffer,
5346                                   stream_.deviceBuffer,
5347                                   stream_.bufferSize,
5348                                   convBufferSize );
5349       }
5350 
5351       // Push callback buffer into outputBuffer
5352       callbackPushed = renderBuffer.pushBuffer( convBuffer,
5353                                                 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5354                                                 stream_.deviceFormat[OUTPUT] );
5355     }
5356     else {
5357       // if there is no render stream, set callbackPushed flag
5358       callbackPushed = true;
5359     }
5360 
5361     // Stream Capture
5362     // ==============
5363     // 1. Get capture buffer from stream
5364     // 2. Push capture buffer into inputBuffer
5365     // 3. If 2. was successful: Release capture buffer
5366 
5367     if ( captureAudioClient ) {
5368       // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5369       if ( !callbackPulled ) {
5370         WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5371       }
5372 
5373       // Get capture buffer from stream
5374       hr = captureClient->GetBuffer( &streamBuffer,
5375                                      &bufferFrameCount,
5376                                      &captureFlags, NULL, NULL );
5377       if ( FAILED( hr ) ) {
5378         errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5379         goto Exit;
5380       }
5381 
5382       if ( bufferFrameCount != 0 ) {
5383         // Push capture buffer into inputBuffer
5384         if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5385                                        bufferFrameCount * stream_.nDeviceChannels[INPUT],
5386                                        stream_.deviceFormat[INPUT] ) )
5387         {
5388           // Release capture buffer
5389           hr = captureClient->ReleaseBuffer( bufferFrameCount );
5390           if ( FAILED( hr ) ) {
5391             errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5392             goto Exit;
5393           }
5394         }
5395         else
5396         {
5397           // Inform WASAPI that capture was unsuccessful
5398           hr = captureClient->ReleaseBuffer( 0 );
5399           if ( FAILED( hr ) ) {
5400             errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5401             goto Exit;
5402           }
5403         }
5404       }
5405       else
5406       {
5407         // Inform WASAPI that capture was unsuccessful
5408         hr = captureClient->ReleaseBuffer( 0 );
5409         if ( FAILED( hr ) ) {
5410           errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5411           goto Exit;
5412         }
5413       }
5414     }
5415 
5416     // Stream Render
5417     // =============
5418     // 1. Get render buffer from stream
5419     // 2. Pull next buffer from outputBuffer
5420     // 3. If 2. was successful: Fill render buffer with next buffer
5421     //                          Release render buffer
5422 
5423     if ( renderAudioClient ) {
5424       // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5425       if ( callbackPulled && !callbackPushed ) {
5426         WaitForSingleObject( renderEvent, INFINITE );
5427       }
5428 
5429       // Get render buffer from stream
5430       hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5431       if ( FAILED( hr ) ) {
5432         errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5433         goto Exit;
5434       }
5435 
5436       hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5437       if ( FAILED( hr ) ) {
5438         errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5439         goto Exit;
5440       }
5441 
5442       bufferFrameCount -= numFramesPadding;
5443 
5444       if ( bufferFrameCount != 0 ) {
5445         hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5446         if ( FAILED( hr ) ) {
5447           errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5448           goto Exit;
5449         }
5450 
5451         // Pull next buffer from outputBuffer
5452         // Fill render buffer with next buffer
5453         if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5454                                       bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5455                                       stream_.deviceFormat[OUTPUT] ) )
5456         {
5457           // Release render buffer
5458           hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5459           if ( FAILED( hr ) ) {
5460             errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5461             goto Exit;
5462           }
5463         }
5464         else
5465         {
5466           // Inform WASAPI that render was unsuccessful
5467           hr = renderClient->ReleaseBuffer( 0, 0 );
5468           if ( FAILED( hr ) ) {
5469             errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5470             goto Exit;
5471           }
5472         }
5473       }
5474       else
5475       {
5476         // Inform WASAPI that render was unsuccessful
5477         hr = renderClient->ReleaseBuffer( 0, 0 );
5478         if ( FAILED( hr ) ) {
5479           errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5480           goto Exit;
5481         }
5482       }
5483     }
5484 
5485     // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5486     if ( callbackPushed ) {
5487       // unsetting the callbackPulled flag lets the stream know that
5488       // the audio device is ready for another callback output buffer.
5489       callbackPulled = false;
5490     }
5491 
5492   }
5493 
5494 Exit:
5495   // clean up
5496   CoTaskMemFree( captureFormat );
5497   CoTaskMemFree( renderFormat );
5498 
5499   free ( convBuffer );
5500   delete renderResampler;
5501   delete captureResampler;
5502 
5503   CoUninitialize();
5504 
5505   // update stream state
5506   stream_.state = STREAM_STOPPED;
5507 
5508   if ( !errorText.empty() )
5509   {
5510     errorText_ = errorText;
5511     error( errorType );
5512   }
5513 }
5514 
5515 //******************** End of __WINDOWS_WASAPI__ *********************//
5516 #endif
5517 
5518 
5519 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5520 
5521 // Modified by Robin Davies, October 2005
5522 // - Improvements to DirectX pointer chasing.
5523 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5524 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5525 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5526 // Changed device query structure for RtAudio 4.0.7, January 2010
5527 
5528 #include <windows.h>
5529 #include <process.h>
5530 #include <mmsystem.h>
5531 #include <mmreg.h>
5532 #include <dsound.h>
5533 #include <assert.h>
5534 #include <algorithm>
5535 
5536 #if defined(__MINGW32__)
5537   // missing from latest mingw winapi
5538 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5539 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5540 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5541 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5542 #endif
5543 
5544 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5545 
5546 #ifdef _MSC_VER // if Microsoft Visual C++
5547 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5548 #endif
5549 
5550 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5551 {
5552   if ( pointer > bufferSize ) pointer -= bufferSize;
5553   if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5554   if ( pointer < earlierPointer ) pointer += bufferSize;
5555   return pointer >= earlierPointer && pointer < laterPointer;
5556 }
5557 
5558 // A structure to hold various information related to the DirectSound
5559 // API implementation.
5560 struct DsHandle {
5561   unsigned int drainCounter; // Tracks callback counts when draining
5562   bool internalDrain;        // Indicates if stop is initiated from callback or not.
5563   void *id[2];
5564   void *buffer[2];
5565   bool xrun[2];
5566   UINT bufferPointer[2];
5567   DWORD dsBufferSize[2];
5568   DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5569   HANDLE condition;
5570 
5571   DsHandle()
5572     :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5573 };
5574 
5575 // Declarations for utility functions, callbacks, and structures
5576 // specific to the DirectSound implementation.
5577 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5578                                           LPCTSTR description,
5579                                           LPCTSTR module,
5580                                           LPVOID lpContext );
5581 
5582 static const char* getErrorString( int code );
5583 
5584 static unsigned __stdcall callbackHandler( void *ptr );
5585 
5586 struct DsDevice {
5587   LPGUID id[2];
5588   bool validId[2];
5589   bool found;
5590   std::string name;
5591 
5592   DsDevice()
5593   : found(false) { validId[0] = false; validId[1] = false; }
5594 };
5595 
5596 struct DsProbeData {
5597   bool isInput;
5598   std::vector<struct DsDevice>* dsDevices;
5599 };
5600 
5601 RtApiDs :: RtApiDs()
5602 {
5603   // Dsound will run both-threaded. If CoInitialize fails, then just
5604   // accept whatever the mainline chose for a threading model.
5605   coInitialized_ = false;
5606   HRESULT hr = CoInitialize( NULL );
5607   if ( !FAILED( hr ) ) coInitialized_ = true;
5608 }
5609 
5610 RtApiDs :: ~RtApiDs()
5611 {
5612   if ( stream_.state != STREAM_CLOSED ) closeStream();
5613   if ( coInitialized_ ) CoUninitialize(); // balanced call.
5614 }
5615 
5616 // The DirectSound default output is always the first device.
5617 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5618 {
5619   return 0;
5620 }
5621 
5622 // The DirectSound default input is always the first input device,
5623 // which is the first capture device enumerated.
5624 unsigned int RtApiDs :: getDefaultInputDevice( void )
5625 {
5626   return 0;
5627 }
5628 
5629 unsigned int RtApiDs :: getDeviceCount( void )
5630 {
5631   // Set query flag for previously found devices to false, so that we
5632   // can check for any devices that have disappeared.
5633   for ( unsigned int i=0; i<dsDevices.size(); i++ )
5634     dsDevices[i].found = false;
5635 
5636   // Query DirectSound devices.
5637   struct DsProbeData probeInfo;
5638   probeInfo.isInput = false;
5639   probeInfo.dsDevices = &dsDevices;
5640   HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5641   if ( FAILED( result ) ) {
5642     errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5643     errorText_ = errorStream_.str();
5644     error( RtAudioError::WARNING );
5645   }
5646 
5647   // Query DirectSoundCapture devices.
5648   probeInfo.isInput = true;
5649   result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5650   if ( FAILED( result ) ) {
5651     errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5652     errorText_ = errorStream_.str();
5653     error( RtAudioError::WARNING );
5654   }
5655 
5656   // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5657   for ( unsigned int i=0; i<dsDevices.size(); ) {
5658     if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5659     else i++;
5660   }
5661 
5662   return static_cast<unsigned int>(dsDevices.size());
5663 }
5664 
5665 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5666 {
5667   RtAudio::DeviceInfo info;
5668   info.probed = false;
5669 
5670   if ( dsDevices.size() == 0 ) {
5671     // Force a query of all devices
5672     getDeviceCount();
5673     if ( dsDevices.size() == 0 ) {
5674       errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5675       error( RtAudioError::INVALID_USE );
5676       return info;
5677     }
5678   }
5679 
5680   if ( device >= dsDevices.size() ) {
5681     errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5682     error( RtAudioError::INVALID_USE );
5683     return info;
5684   }
5685 
5686   HRESULT result;
5687   if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5688 
5689   LPDIRECTSOUND output;
5690   DSCAPS outCaps;
5691   result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5692   if ( FAILED( result ) ) {
5693     errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5694     errorText_ = errorStream_.str();
5695     error( RtAudioError::WARNING );
5696     goto probeInput;
5697   }
5698 
5699   outCaps.dwSize = sizeof( outCaps );
5700   result = output->GetCaps( &outCaps );
5701   if ( FAILED( result ) ) {
5702     output->Release();
5703     errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5704     errorText_ = errorStream_.str();
5705     error( RtAudioError::WARNING );
5706     goto probeInput;
5707   }
5708 
5709   // Get output channel information.
5710   info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5711 
5712   // Get sample rate information.
5713   info.sampleRates.clear();
5714   for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5715     if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5716          SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5717       info.sampleRates.push_back( SAMPLE_RATES[k] );
5718 
5719       if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5720         info.preferredSampleRate = SAMPLE_RATES[k];
5721     }
5722   }
5723 
5724   // Get format information.
5725   if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5726   if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5727 
5728   output->Release();
5729 
5730   if ( getDefaultOutputDevice() == device )
5731     info.isDefaultOutput = true;
5732 
5733   if ( dsDevices[ device ].validId[1] == false ) {
5734     info.name = dsDevices[ device ].name;
5735     info.probed = true;
5736     return info;
5737   }
5738 
5739  probeInput:
5740 
5741   LPDIRECTSOUNDCAPTURE input;
5742   result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5743   if ( FAILED( result ) ) {
5744     errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5745     errorText_ = errorStream_.str();
5746     error( RtAudioError::WARNING );
5747     return info;
5748   }
5749 
5750   DSCCAPS inCaps;
5751   inCaps.dwSize = sizeof( inCaps );
5752   result = input->GetCaps( &inCaps );
5753   if ( FAILED( result ) ) {
5754     input->Release();
5755     errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5756     errorText_ = errorStream_.str();
5757     error( RtAudioError::WARNING );
5758     return info;
5759   }
5760 
5761   // Get input channel information.
5762   info.inputChannels = inCaps.dwChannels;
5763 
5764   // Get sample rate and format information.
5765   std::vector<unsigned int> rates;
5766   if ( inCaps.dwChannels >= 2 ) {
5767     if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5768     if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769     if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770     if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771     if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5772     if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773     if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774     if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 
5776     if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5777       if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5778       if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5779       if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5780       if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5781     }
5782     else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5783       if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5784       if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5785       if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5786       if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5787     }
5788   }
5789   else if ( inCaps.dwChannels == 1 ) {
5790     if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5791     if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5792     if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5793     if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5794     if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5795     if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5796     if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5797     if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798 
5799     if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5800       if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5801       if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5802       if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5803       if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5804     }
5805     else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5806       if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5807       if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5808       if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5809       if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5810     }
5811   }
5812   else info.inputChannels = 0; // technically, this would be an error
5813 
5814   input->Release();
5815 
5816   if ( info.inputChannels == 0 ) return info;
5817 
5818   // Copy the supported rates to the info structure but avoid duplication.
5819   bool found;
5820   for ( unsigned int i=0; i<rates.size(); i++ ) {
5821     found = false;
5822     for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5823       if ( rates[i] == info.sampleRates[j] ) {
5824         found = true;
5825         break;
5826       }
5827     }
5828     if ( found == false ) info.sampleRates.push_back( rates[i] );
5829   }
5830   std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5831 
5832   // If device opens for both playback and capture, we determine the channels.
5833   if ( info.outputChannels > 0 && info.inputChannels > 0 )
5834     info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5835 
5836   if ( device == 0 ) info.isDefaultInput = true;
5837 
5838   // Copy name and return.
5839   info.name = dsDevices[ device ].name;
5840   info.probed = true;
5841   return info;
5842 }
5843 
5844 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5845                                  unsigned int firstChannel, unsigned int sampleRate,
5846                                  RtAudioFormat format, unsigned int *bufferSize,
5847                                  RtAudio::StreamOptions *options )
5848 {
5849   if ( channels + firstChannel > 2 ) {
5850     errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5851     return FAILURE;
5852   }
5853 
5854   size_t nDevices = dsDevices.size();
5855   if ( nDevices == 0 ) {
5856     // This should not happen because a check is made before this function is called.
5857     errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5858     return FAILURE;
5859   }
5860 
5861   if ( device >= nDevices ) {
5862     // This should not happen because a check is made before this function is called.
5863     errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5864     return FAILURE;
5865   }
5866 
5867   if ( mode == OUTPUT ) {
5868     if ( dsDevices[ device ].validId[0] == false ) {
5869       errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5870       errorText_ = errorStream_.str();
5871       return FAILURE;
5872     }
5873   }
5874   else { // mode == INPUT
5875     if ( dsDevices[ device ].validId[1] == false ) {
5876       errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5877       errorText_ = errorStream_.str();
5878       return FAILURE;
5879     }
5880   }
5881 
5882   // According to a note in PortAudio, using GetDesktopWindow()
5883   // instead of GetForegroundWindow() is supposed to avoid problems
5884   // that occur when the application's window is not the foreground
5885   // window.  Also, if the application window closes before the
5886   // DirectSound buffer, DirectSound can crash.  In the past, I had
5887   // problems when using GetDesktopWindow() but it seems fine now
5888   // (January 2010).  I'll leave it commented here.
5889   // HWND hWnd = GetForegroundWindow();
5890   HWND hWnd = GetDesktopWindow();
5891 
5892   // Check the numberOfBuffers parameter and limit the lowest value to
5893   // two.  This is a judgement call and a value of two is probably too
5894   // low for capture, but it should work for playback.
5895   int nBuffers = 0;
5896   if ( options ) nBuffers = options->numberOfBuffers;
5897   if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5898   if ( nBuffers < 2 ) nBuffers = 3;
5899 
5900   // Check the lower range of the user-specified buffer size and set
5901   // (arbitrarily) to a lower bound of 32.
5902   if ( *bufferSize < 32 ) *bufferSize = 32;
5903 
5904   // Create the wave format structure.  The data format setting will
5905   // be determined later.
5906   WAVEFORMATEX waveFormat;
5907   ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5908   waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5909   waveFormat.nChannels = channels + firstChannel;
5910   waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5911 
5912   // Determine the device buffer size. By default, we'll use the value
5913   // defined above (32K), but we will grow it to make allowances for
5914   // very large software buffer sizes.
5915   DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5916   DWORD dsPointerLeadTime = 0;
5917 
5918   void *ohandle = 0, *bhandle = 0;
5919   HRESULT result;
5920   if ( mode == OUTPUT ) {
5921 
5922     LPDIRECTSOUND output;
5923     result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5924     if ( FAILED( result ) ) {
5925       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5926       errorText_ = errorStream_.str();
5927       return FAILURE;
5928     }
5929 
5930     DSCAPS outCaps;
5931     outCaps.dwSize = sizeof( outCaps );
5932     result = output->GetCaps( &outCaps );
5933     if ( FAILED( result ) ) {
5934       output->Release();
5935       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5936       errorText_ = errorStream_.str();
5937       return FAILURE;
5938     }
5939 
5940     // Check channel information.
5941     if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5942       errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5943       errorText_ = errorStream_.str();
5944       return FAILURE;
5945     }
5946 
5947     // Check format information.  Use 16-bit format unless not
5948     // supported or user requests 8-bit.
5949     if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5950          !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5951       waveFormat.wBitsPerSample = 16;
5952       stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5953     }
5954     else {
5955       waveFormat.wBitsPerSample = 8;
5956       stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5957     }
5958     stream_.userFormat = format;
5959 
5960     // Update wave format structure and buffer information.
5961     waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5962     waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5963     dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5964 
5965     // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5966     while ( dsPointerLeadTime * 2U > dsBufferSize )
5967       dsBufferSize *= 2;
5968 
5969     // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5970     // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5971     // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5972     result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5973     if ( FAILED( result ) ) {
5974       output->Release();
5975       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5976       errorText_ = errorStream_.str();
5977       return FAILURE;
5978     }
5979 
5980     // Even though we will write to the secondary buffer, we need to
5981     // access the primary buffer to set the correct output format
5982     // (since the default is 8-bit, 22 kHz!).  Setup the DS primary
5983     // buffer description.
5984     DSBUFFERDESC bufferDescription;
5985     ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5986     bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5987     bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5988 
5989     // Obtain the primary buffer
5990     LPDIRECTSOUNDBUFFER buffer;
5991     result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5992     if ( FAILED( result ) ) {
5993       output->Release();
5994       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5995       errorText_ = errorStream_.str();
5996       return FAILURE;
5997     }
5998 
5999     // Set the primary DS buffer sound format.
6000     result = buffer->SetFormat( &waveFormat );
6001     if ( FAILED( result ) ) {
6002       output->Release();
6003       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6004       errorText_ = errorStream_.str();
6005       return FAILURE;
6006     }
6007 
6008     // Setup the secondary DS buffer description.
6009     ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6010     bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6011     bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6012                                   DSBCAPS_GLOBALFOCUS |
6013                                   DSBCAPS_GETCURRENTPOSITION2 |
6014                                   DSBCAPS_LOCHARDWARE );  // Force hardware mixing
6015     bufferDescription.dwBufferBytes = dsBufferSize;
6016     bufferDescription.lpwfxFormat = &waveFormat;
6017 
6018     // Try to create the secondary DS buffer.  If that doesn't work,
6019     // try to use software mixing.  Otherwise, there's a problem.
6020     result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6021     if ( FAILED( result ) ) {
6022       bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6023                                     DSBCAPS_GLOBALFOCUS |
6024                                     DSBCAPS_GETCURRENTPOSITION2 |
6025                                     DSBCAPS_LOCSOFTWARE );  // Force software mixing
6026       result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6027       if ( FAILED( result ) ) {
6028         output->Release();
6029         errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6030         errorText_ = errorStream_.str();
6031         return FAILURE;
6032       }
6033     }
6034 
6035     // Get the buffer size ... might be different from what we specified.
6036     DSBCAPS dsbcaps;
6037     dsbcaps.dwSize = sizeof( DSBCAPS );
6038     result = buffer->GetCaps( &dsbcaps );
6039     if ( FAILED( result ) ) {
6040       output->Release();
6041       buffer->Release();
6042       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6043       errorText_ = errorStream_.str();
6044       return FAILURE;
6045     }
6046 
6047     dsBufferSize = dsbcaps.dwBufferBytes;
6048 
6049     // Lock the DS buffer
6050     LPVOID audioPtr;
6051     DWORD dataLen;
6052     result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6053     if ( FAILED( result ) ) {
6054       output->Release();
6055       buffer->Release();
6056       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6057       errorText_ = errorStream_.str();
6058       return FAILURE;
6059     }
6060 
6061     // Zero the DS buffer
6062     ZeroMemory( audioPtr, dataLen );
6063 
6064     // Unlock the DS buffer
6065     result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6066     if ( FAILED( result ) ) {
6067       output->Release();
6068       buffer->Release();
6069       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6070       errorText_ = errorStream_.str();
6071       return FAILURE;
6072     }
6073 
6074     ohandle = (void *) output;
6075     bhandle = (void *) buffer;
6076   }
6077 
6078   if ( mode == INPUT ) {
6079 
6080     LPDIRECTSOUNDCAPTURE input;
6081     result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6082     if ( FAILED( result ) ) {
6083       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6084       errorText_ = errorStream_.str();
6085       return FAILURE;
6086     }
6087 
6088     DSCCAPS inCaps;
6089     inCaps.dwSize = sizeof( inCaps );
6090     result = input->GetCaps( &inCaps );
6091     if ( FAILED( result ) ) {
6092       input->Release();
6093       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6094       errorText_ = errorStream_.str();
6095       return FAILURE;
6096     }
6097 
6098     // Check channel information.
6099     if ( inCaps.dwChannels < channels + firstChannel ) {
6100       errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6101       return FAILURE;
6102     }
6103 
6104     // Check format information.  Use 16-bit format unless user
6105     // requests 8-bit.
6106     DWORD deviceFormats;
6107     if ( channels + firstChannel == 2 ) {
6108       deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6109       if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6110         waveFormat.wBitsPerSample = 8;
6111         stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6112       }
6113       else { // assume 16-bit is supported
6114         waveFormat.wBitsPerSample = 16;
6115         stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6116       }
6117     }
6118     else { // channel == 1
6119       deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6120       if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6121         waveFormat.wBitsPerSample = 8;
6122         stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6123       }
6124       else { // assume 16-bit is supported
6125         waveFormat.wBitsPerSample = 16;
6126         stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6127       }
6128     }
6129     stream_.userFormat = format;
6130 
6131     // Update wave format structure and buffer information.
6132     waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6133     waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6134     dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6135 
6136     // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6137     while ( dsPointerLeadTime * 2U > dsBufferSize )
6138       dsBufferSize *= 2;
6139 
6140     // Setup the secondary DS buffer description.
6141     DSCBUFFERDESC bufferDescription;
6142     ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6143     bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6144     bufferDescription.dwFlags = 0;
6145     bufferDescription.dwReserved = 0;
6146     bufferDescription.dwBufferBytes = dsBufferSize;
6147     bufferDescription.lpwfxFormat = &waveFormat;
6148 
6149     // Create the capture buffer.
6150     LPDIRECTSOUNDCAPTUREBUFFER buffer;
6151     result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6152     if ( FAILED( result ) ) {
6153       input->Release();
6154       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6155       errorText_ = errorStream_.str();
6156       return FAILURE;
6157     }
6158 
6159     // Get the buffer size ... might be different from what we specified.
6160     DSCBCAPS dscbcaps;
6161     dscbcaps.dwSize = sizeof( DSCBCAPS );
6162     result = buffer->GetCaps( &dscbcaps );
6163     if ( FAILED( result ) ) {
6164       input->Release();
6165       buffer->Release();
6166       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6167       errorText_ = errorStream_.str();
6168       return FAILURE;
6169     }
6170 
6171     dsBufferSize = dscbcaps.dwBufferBytes;
6172 
6173     // NOTE: We could have a problem here if this is a duplex stream
6174     // and the play and capture hardware buffer sizes are different
6175     // (I'm actually not sure if that is a problem or not).
6176     // Currently, we are not verifying that.
6177 
6178     // Lock the capture buffer
6179     LPVOID audioPtr;
6180     DWORD dataLen;
6181     result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6182     if ( FAILED( result ) ) {
6183       input->Release();
6184       buffer->Release();
6185       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6186       errorText_ = errorStream_.str();
6187       return FAILURE;
6188     }
6189 
6190     // Zero the buffer
6191     ZeroMemory( audioPtr, dataLen );
6192 
6193     // Unlock the buffer
6194     result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6195     if ( FAILED( result ) ) {
6196       input->Release();
6197       buffer->Release();
6198       errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6199       errorText_ = errorStream_.str();
6200       return FAILURE;
6201     }
6202 
6203     ohandle = (void *) input;
6204     bhandle = (void *) buffer;
6205   }
6206 
6207   // Set various stream parameters
6208   DsHandle *handle = 0;
6209   stream_.nDeviceChannels[mode] = channels + firstChannel;
6210   stream_.nUserChannels[mode] = channels;
6211   stream_.bufferSize = *bufferSize;
6212   stream_.channelOffset[mode] = firstChannel;
6213   stream_.deviceInterleaved[mode] = true;
6214   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6215   else stream_.userInterleaved = true;
6216 
6217   // Set flag for buffer conversion
6218   stream_.doConvertBuffer[mode] = false;
6219   if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6220     stream_.doConvertBuffer[mode] = true;
6221   if (stream_.userFormat != stream_.deviceFormat[mode])
6222     stream_.doConvertBuffer[mode] = true;
6223   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6224        stream_.nUserChannels[mode] > 1 )
6225     stream_.doConvertBuffer[mode] = true;
6226 
6227   // Allocate necessary internal buffers
6228   long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6229   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6230   if ( stream_.userBuffer[mode] == NULL ) {
6231     errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6232     goto error;
6233   }
6234 
6235   if ( stream_.doConvertBuffer[mode] ) {
6236 
6237     bool makeBuffer = true;
6238     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6239     if ( mode == INPUT ) {
6240       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6241         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6242         if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6243       }
6244     }
6245 
6246     if ( makeBuffer ) {
6247       bufferBytes *= *bufferSize;
6248       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6249       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6250       if ( stream_.deviceBuffer == NULL ) {
6251         errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6252         goto error;
6253       }
6254     }
6255   }
6256 
6257   // Allocate our DsHandle structures for the stream.
6258   if ( stream_.apiHandle == 0 ) {
6259     try {
6260       handle = new DsHandle;
6261     }
6262     catch ( std::bad_alloc& ) {
6263       errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6264       goto error;
6265     }
6266 
6267     // Create a manual-reset event.
6268     handle->condition = CreateEvent( NULL,   // no security
6269                                      TRUE,   // manual-reset
6270                                      FALSE,  // non-signaled initially
6271                                      NULL ); // unnamed
6272     stream_.apiHandle = (void *) handle;
6273   }
6274   else
6275     handle = (DsHandle *) stream_.apiHandle;
6276   handle->id[mode] = ohandle;
6277   handle->buffer[mode] = bhandle;
6278   handle->dsBufferSize[mode] = dsBufferSize;
6279   handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6280 
6281   stream_.device[mode] = device;
6282   stream_.state = STREAM_STOPPED;
6283   if ( stream_.mode == OUTPUT && mode == INPUT )
6284     // We had already set up an output stream.
6285     stream_.mode = DUPLEX;
6286   else
6287     stream_.mode = mode;
6288   stream_.nBuffers = nBuffers;
6289   stream_.sampleRate = sampleRate;
6290 
6291   // Setup the buffer conversion information structure.
6292   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6293 
6294   // Setup the callback thread.
6295   if ( stream_.callbackInfo.isRunning == false ) {
6296     unsigned threadId;
6297     stream_.callbackInfo.isRunning = true;
6298     stream_.callbackInfo.object = (void *) this;
6299     stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6300                                                   &stream_.callbackInfo, 0, &threadId );
6301     if ( stream_.callbackInfo.thread == 0 ) {
6302       errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6303       goto error;
6304     }
6305 
6306     // Boost DS thread priority
6307     SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6308   }
6309   return SUCCESS;
6310 
6311  error:
6312   if ( handle ) {
6313     if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6314       LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6315       LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6316       if ( buffer ) buffer->Release();
6317       object->Release();
6318     }
6319     if ( handle->buffer[1] ) {
6320       LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6321       LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6322       if ( buffer ) buffer->Release();
6323       object->Release();
6324     }
6325     CloseHandle( handle->condition );
6326     delete handle;
6327     stream_.apiHandle = 0;
6328   }
6329 
6330   for ( int i=0; i<2; i++ ) {
6331     if ( stream_.userBuffer[i] ) {
6332       free( stream_.userBuffer[i] );
6333       stream_.userBuffer[i] = 0;
6334     }
6335   }
6336 
6337   if ( stream_.deviceBuffer ) {
6338     free( stream_.deviceBuffer );
6339     stream_.deviceBuffer = 0;
6340   }
6341 
6342   stream_.state = STREAM_CLOSED;
6343   return FAILURE;
6344 }
6345 
6346 void RtApiDs :: closeStream()
6347 {
6348   if ( stream_.state == STREAM_CLOSED ) {
6349     errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6350     error( RtAudioError::WARNING );
6351     return;
6352   }
6353 
6354   // Stop the callback thread.
6355   stream_.callbackInfo.isRunning = false;
6356   WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6357   CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6358 
6359   DsHandle *handle = (DsHandle *) stream_.apiHandle;
6360   if ( handle ) {
6361     if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6362       LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6363       LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6364       if ( buffer ) {
6365         buffer->Stop();
6366         buffer->Release();
6367       }
6368       object->Release();
6369     }
6370     if ( handle->buffer[1] ) {
6371       LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6372       LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6373       if ( buffer ) {
6374         buffer->Stop();
6375         buffer->Release();
6376       }
6377       object->Release();
6378     }
6379     CloseHandle( handle->condition );
6380     delete handle;
6381     stream_.apiHandle = 0;
6382   }
6383 
6384   for ( int i=0; i<2; i++ ) {
6385     if ( stream_.userBuffer[i] ) {
6386       free( stream_.userBuffer[i] );
6387       stream_.userBuffer[i] = 0;
6388     }
6389   }
6390 
6391   if ( stream_.deviceBuffer ) {
6392     free( stream_.deviceBuffer );
6393     stream_.deviceBuffer = 0;
6394   }
6395 
6396   stream_.mode = UNINITIALIZED;
6397   stream_.state = STREAM_CLOSED;
6398 }
6399 
6400 void RtApiDs :: startStream()
6401 {
6402   verifyStream();
6403   if ( stream_.state == STREAM_RUNNING ) {
6404     errorText_ = "RtApiDs::startStream(): the stream is already running!";
6405     error( RtAudioError::WARNING );
6406     return;
6407   }
6408 
6409   #if defined( HAVE_GETTIMEOFDAY )
6410   gettimeofday( &stream_.lastTickTimestamp, NULL );
6411   #endif
6412 
6413   DsHandle *handle = (DsHandle *) stream_.apiHandle;
6414 
6415   // Increase scheduler frequency on lesser windows (a side-effect of
6416   // increasing timer accuracy).  On greater windows (Win2K or later),
6417   // this is already in effect.
6418   timeBeginPeriod( 1 );
6419 
6420   buffersRolling = false;
6421   duplexPrerollBytes = 0;
6422 
6423   if ( stream_.mode == DUPLEX ) {
6424     // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6425     duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6426   }
6427 
6428   HRESULT result = 0;
6429   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6430 
6431     LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6432     result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6433     if ( FAILED( result ) ) {
6434       errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6435       errorText_ = errorStream_.str();
6436       goto unlock;
6437     }
6438   }
6439 
6440   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6441 
6442     LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6443     result = buffer->Start( DSCBSTART_LOOPING );
6444     if ( FAILED( result ) ) {
6445       errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6446       errorText_ = errorStream_.str();
6447       goto unlock;
6448     }
6449   }
6450 
6451   handle->drainCounter = 0;
6452   handle->internalDrain = false;
6453   ResetEvent( handle->condition );
6454   stream_.state = STREAM_RUNNING;
6455 
6456  unlock:
6457   if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6458 }
6459 
6460 void RtApiDs :: stopStream()
6461 {
6462   verifyStream();
6463   if ( stream_.state == STREAM_STOPPED ) {
6464     errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6465     error( RtAudioError::WARNING );
6466     return;
6467   }
6468 
6469   HRESULT result = 0;
6470   LPVOID audioPtr;
6471   DWORD dataLen;
6472   DsHandle *handle = (DsHandle *) stream_.apiHandle;
6473   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6474     if ( handle->drainCounter == 0 ) {
6475       handle->drainCounter = 2;
6476       WaitForSingleObject( handle->condition, INFINITE );  // block until signaled
6477     }
6478 
6479     stream_.state = STREAM_STOPPED;
6480 
6481     MUTEX_LOCK( &stream_.mutex );
6482 
6483     // Stop the buffer and clear memory
6484     LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6485     result = buffer->Stop();
6486     if ( FAILED( result ) ) {
6487       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6488       errorText_ = errorStream_.str();
6489       goto unlock;
6490     }
6491 
6492     // Lock the buffer and clear it so that if we start to play again,
6493     // we won't have old data playing.
6494     result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6495     if ( FAILED( result ) ) {
6496       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6497       errorText_ = errorStream_.str();
6498       goto unlock;
6499     }
6500 
6501     // Zero the DS buffer
6502     ZeroMemory( audioPtr, dataLen );
6503 
6504     // Unlock the DS buffer
6505     result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6506     if ( FAILED( result ) ) {
6507       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6508       errorText_ = errorStream_.str();
6509       goto unlock;
6510     }
6511 
6512     // If we start playing again, we must begin at beginning of buffer.
6513     handle->bufferPointer[0] = 0;
6514   }
6515 
6516   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6517     LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6518     audioPtr = NULL;
6519     dataLen = 0;
6520 
6521     stream_.state = STREAM_STOPPED;
6522 
6523     if ( stream_.mode != DUPLEX )
6524       MUTEX_LOCK( &stream_.mutex );
6525 
6526     result = buffer->Stop();
6527     if ( FAILED( result ) ) {
6528       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6529       errorText_ = errorStream_.str();
6530       goto unlock;
6531     }
6532 
6533     // Lock the buffer and clear it so that if we start to play again,
6534     // we won't have old data playing.
6535     result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6536     if ( FAILED( result ) ) {
6537       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6538       errorText_ = errorStream_.str();
6539       goto unlock;
6540     }
6541 
6542     // Zero the DS buffer
6543     ZeroMemory( audioPtr, dataLen );
6544 
6545     // Unlock the DS buffer
6546     result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6547     if ( FAILED( result ) ) {
6548       errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6549       errorText_ = errorStream_.str();
6550       goto unlock;
6551     }
6552 
6553     // If we start recording again, we must begin at beginning of buffer.
6554     handle->bufferPointer[1] = 0;
6555   }
6556 
6557  unlock:
6558   timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6559   MUTEX_UNLOCK( &stream_.mutex );
6560 
6561   if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6562 }
6563 
6564 void RtApiDs :: abortStream()
6565 {
6566   verifyStream();
6567   if ( stream_.state == STREAM_STOPPED ) {
6568     errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6569     error( RtAudioError::WARNING );
6570     return;
6571   }
6572 
6573   DsHandle *handle = (DsHandle *) stream_.apiHandle;
6574   handle->drainCounter = 2;
6575 
6576   stopStream();
6577 }
6578 
6579 void RtApiDs :: callbackEvent()
6580 {
6581   if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6582     Sleep( 50 ); // sleep 50 milliseconds
6583     return;
6584   }
6585 
6586   if ( stream_.state == STREAM_CLOSED ) {
6587     errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6588     error( RtAudioError::WARNING );
6589     return;
6590   }
6591 
6592   CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6593   DsHandle *handle = (DsHandle *) stream_.apiHandle;
6594 
6595   // Check if we were draining the stream and signal is finished.
6596   if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6597 
6598     stream_.state = STREAM_STOPPING;
6599     if ( handle->internalDrain == false )
6600       SetEvent( handle->condition );
6601     else
6602       stopStream();
6603     return;
6604   }
6605 
6606   // Invoke user callback to get fresh output data UNLESS we are
6607   // draining stream.
6608   if ( handle->drainCounter == 0 ) {
6609     RtAudioCallback callback = (RtAudioCallback) info->callback;
6610     double streamTime = getStreamTime();
6611     RtAudioStreamStatus status = 0;
6612     if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6613       status |= RTAUDIO_OUTPUT_UNDERFLOW;
6614       handle->xrun[0] = false;
6615     }
6616     if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6617       status |= RTAUDIO_INPUT_OVERFLOW;
6618       handle->xrun[1] = false;
6619     }
6620     int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6621                                   stream_.bufferSize, streamTime, status, info->userData );
6622     if ( cbReturnValue == 2 ) {
6623       stream_.state = STREAM_STOPPING;
6624       handle->drainCounter = 2;
6625       abortStream();
6626       return;
6627     }
6628     else if ( cbReturnValue == 1 ) {
6629       handle->drainCounter = 1;
6630       handle->internalDrain = true;
6631     }
6632   }
6633 
6634   HRESULT result;
6635   DWORD currentWritePointer, safeWritePointer;
6636   DWORD currentReadPointer, safeReadPointer;
6637   UINT nextWritePointer;
6638 
6639   LPVOID buffer1 = NULL;
6640   LPVOID buffer2 = NULL;
6641   DWORD bufferSize1 = 0;
6642   DWORD bufferSize2 = 0;
6643 
6644   char *buffer;
6645   long bufferBytes;
6646 
6647   MUTEX_LOCK( &stream_.mutex );
6648   if ( stream_.state == STREAM_STOPPED ) {
6649     MUTEX_UNLOCK( &stream_.mutex );
6650     return;
6651   }
6652 
6653   if ( buffersRolling == false ) {
6654     if ( stream_.mode == DUPLEX ) {
6655       //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6656 
6657       // It takes a while for the devices to get rolling. As a result,
6658       // there's no guarantee that the capture and write device pointers
6659       // will move in lockstep.  Wait here for both devices to start
6660       // rolling, and then set our buffer pointers accordingly.
6661       // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6662       // bytes later than the write buffer.
6663 
6664       // Stub: a serious risk of having a pre-emptive scheduling round
6665       // take place between the two GetCurrentPosition calls... but I'm
6666       // really not sure how to solve the problem.  Temporarily boost to
6667       // Realtime priority, maybe; but I'm not sure what priority the
6668       // DirectSound service threads run at. We *should* be roughly
6669       // within a ms or so of correct.
6670 
6671       LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6672       LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6673 
6674       DWORD startSafeWritePointer, startSafeReadPointer;
6675 
6676       result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6677       if ( FAILED( result ) ) {
6678         errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6679         errorText_ = errorStream_.str();
6680         MUTEX_UNLOCK( &stream_.mutex );
6681         error( RtAudioError::SYSTEM_ERROR );
6682         return;
6683       }
6684       result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6685       if ( FAILED( result ) ) {
6686         errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6687         errorText_ = errorStream_.str();
6688         MUTEX_UNLOCK( &stream_.mutex );
6689         error( RtAudioError::SYSTEM_ERROR );
6690         return;
6691       }
6692       while ( true ) {
6693         result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6694         if ( FAILED( result ) ) {
6695           errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6696           errorText_ = errorStream_.str();
6697           MUTEX_UNLOCK( &stream_.mutex );
6698           error( RtAudioError::SYSTEM_ERROR );
6699           return;
6700         }
6701         result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6702         if ( FAILED( result ) ) {
6703           errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6704           errorText_ = errorStream_.str();
6705           MUTEX_UNLOCK( &stream_.mutex );
6706           error( RtAudioError::SYSTEM_ERROR );
6707           return;
6708         }
6709         if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6710         Sleep( 1 );
6711       }
6712 
6713       //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6714 
6715       handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6716       if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6717       handle->bufferPointer[1] = safeReadPointer;
6718     }
6719     else if ( stream_.mode == OUTPUT ) {
6720 
6721       // Set the proper nextWritePosition after initial startup.
6722       LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6723       result = dsWriteBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6724       if ( FAILED( result ) ) {
6725         errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6726         errorText_ = errorStream_.str();
6727         MUTEX_UNLOCK( &stream_.mutex );
6728         error( RtAudioError::SYSTEM_ERROR );
6729         return;
6730       }
6731       handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6732       if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6733     }
6734 
6735     buffersRolling = true;
6736   }
6737 
6738   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6739 
6740     LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6741 
6742     if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6743       bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6744       bufferBytes *= formatBytes( stream_.userFormat );
6745       memset( stream_.userBuffer[0], 0, bufferBytes );
6746     }
6747 
6748     // Setup parameters and do buffer conversion if necessary.
6749     if ( stream_.doConvertBuffer[0] ) {
6750       buffer = stream_.deviceBuffer;
6751       convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6752       bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6753       bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6754     }
6755     else {
6756       buffer = stream_.userBuffer[0];
6757       bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6758       bufferBytes *= formatBytes( stream_.userFormat );
6759     }
6760 
6761     // No byte swapping necessary in DirectSound implementation.
6762 
6763     // Ahhh ... windoze.  16-bit data is signed but 8-bit data is
6764     // unsigned.  So, we need to convert our signed 8-bit data here to
6765     // unsigned.
6766     if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6767       for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6768 
6769     DWORD dsBufferSize = handle->dsBufferSize[0];
6770     nextWritePointer = handle->bufferPointer[0];
6771 
6772     DWORD endWrite, leadPointer;
6773     while ( true ) {
6774       // Find out where the read and "safe write" pointers are.
6775       result = dsBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6776       if ( FAILED( result ) ) {
6777         errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6778         errorText_ = errorStream_.str();
6779         MUTEX_UNLOCK( &stream_.mutex );
6780         error( RtAudioError::SYSTEM_ERROR );
6781         return;
6782       }
6783 
6784       // We will copy our output buffer into the region between
6785       // safeWritePointer and leadPointer.  If leadPointer is not
6786       // beyond the next endWrite position, wait until it is.
6787       leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6788       //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6789       if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6790       if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6791       endWrite = nextWritePointer + bufferBytes;
6792 
6793       // Check whether the entire write region is behind the play pointer.
6794       if ( leadPointer >= endWrite ) break;
6795 
6796       // If we are here, then we must wait until the leadPointer advances
6797       // beyond the end of our next write region. We use the
6798       // Sleep() function to suspend operation until that happens.
6799       double millis = ( endWrite - leadPointer ) * 1000.0;
6800       millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6801       if ( millis < 1.0 ) millis = 1.0;
6802       Sleep( (DWORD) millis );
6803     }
6804 
6805     if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6806          || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6807       // We've strayed into the forbidden zone ... resync the read pointer.
6808       handle->xrun[0] = true;
6809       nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6810       if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6811       handle->bufferPointer[0] = nextWritePointer;
6812       endWrite = nextWritePointer + bufferBytes;
6813     }
6814 
6815     // Lock free space in the buffer
6816     result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6817                              &bufferSize1, &buffer2, &bufferSize2, 0 );
6818     if ( FAILED( result ) ) {
6819       errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6820       errorText_ = errorStream_.str();
6821       MUTEX_UNLOCK( &stream_.mutex );
6822       error( RtAudioError::SYSTEM_ERROR );
6823       return;
6824     }
6825 
6826     // Copy our buffer into the DS buffer
6827     CopyMemory( buffer1, buffer, bufferSize1 );
6828     if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6829 
6830     // Update our buffer offset and unlock sound buffer
6831     dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6832     if ( FAILED( result ) ) {
6833       errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6834       errorText_ = errorStream_.str();
6835       MUTEX_UNLOCK( &stream_.mutex );
6836       error( RtAudioError::SYSTEM_ERROR );
6837       return;
6838     }
6839     nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6840     handle->bufferPointer[0] = nextWritePointer;
6841   }
6842 
6843   // Don't bother draining input
6844   if ( handle->drainCounter ) {
6845     handle->drainCounter++;
6846     goto unlock;
6847   }
6848 
6849   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6850 
6851     // Setup parameters.
6852     if ( stream_.doConvertBuffer[1] ) {
6853       buffer = stream_.deviceBuffer;
6854       bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6855       bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6856     }
6857     else {
6858       buffer = stream_.userBuffer[1];
6859       bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6860       bufferBytes *= formatBytes( stream_.userFormat );
6861     }
6862 
6863     LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6864     long nextReadPointer = handle->bufferPointer[1];
6865     DWORD dsBufferSize = handle->dsBufferSize[1];
6866 
6867     // Find out where the write and "safe read" pointers are.
6868     result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6869     if ( FAILED( result ) ) {
6870       errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6871       errorText_ = errorStream_.str();
6872       MUTEX_UNLOCK( &stream_.mutex );
6873       error( RtAudioError::SYSTEM_ERROR );
6874       return;
6875     }
6876 
6877     if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6878     DWORD endRead = nextReadPointer + bufferBytes;
6879 
6880     // Handling depends on whether we are INPUT or DUPLEX.
6881     // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6882     // then a wait here will drag the write pointers into the forbidden zone.
6883     //
6884     // In DUPLEX mode, rather than wait, we will back off the read pointer until
6885     // it's in a safe position. This causes dropouts, but it seems to be the only
6886     // practical way to sync up the read and write pointers reliably, given the
6887     // the very complex relationship between phase and increment of the read and write
6888     // pointers.
6889     //
6890     // In order to minimize audible dropouts in DUPLEX mode, we will
6891     // provide a pre-roll period of 0.5 seconds in which we return
6892     // zeros from the read buffer while the pointers sync up.
6893 
6894     if ( stream_.mode == DUPLEX ) {
6895       if ( safeReadPointer < endRead ) {
6896         if ( duplexPrerollBytes <= 0 ) {
6897           // Pre-roll time over. Be more agressive.
6898           int adjustment = endRead-safeReadPointer;
6899 
6900           handle->xrun[1] = true;
6901           // Two cases:
6902           //   - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6903           //     and perform fine adjustments later.
6904           //   - small adjustments: back off by twice as much.
6905           if ( adjustment >= 2*bufferBytes )
6906             nextReadPointer = safeReadPointer-2*bufferBytes;
6907           else
6908             nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6909 
6910           if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6911 
6912         }
6913         else {
6914           // In pre=roll time. Just do it.
6915           nextReadPointer = safeReadPointer - bufferBytes;
6916           while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6917         }
6918         endRead = nextReadPointer + bufferBytes;
6919       }
6920     }
6921     else { // mode == INPUT
6922       while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6923         // See comments for playback.
6924         double millis = (endRead - safeReadPointer) * 1000.0;
6925         millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6926         if ( millis < 1.0 ) millis = 1.0;
6927         Sleep( (DWORD) millis );
6928 
6929         // Wake up and find out where we are now.
6930         result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6931         if ( FAILED( result ) ) {
6932           errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6933           errorText_ = errorStream_.str();
6934           MUTEX_UNLOCK( &stream_.mutex );
6935           error( RtAudioError::SYSTEM_ERROR );
6936           return;
6937         }
6938 
6939         if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6940       }
6941     }
6942 
6943     // Lock free space in the buffer
6944     result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6945                              &bufferSize1, &buffer2, &bufferSize2, 0 );
6946     if ( FAILED( result ) ) {
6947       errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6948       errorText_ = errorStream_.str();
6949       MUTEX_UNLOCK( &stream_.mutex );
6950       error( RtAudioError::SYSTEM_ERROR );
6951       return;
6952     }
6953 
6954     if ( duplexPrerollBytes <= 0 ) {
6955       // Copy our buffer into the DS buffer
6956       CopyMemory( buffer, buffer1, bufferSize1 );
6957       if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6958     }
6959     else {
6960       memset( buffer, 0, bufferSize1 );
6961       if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6962       duplexPrerollBytes -= bufferSize1 + bufferSize2;
6963     }
6964 
6965     // Update our buffer offset and unlock sound buffer
6966     nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6967     dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6968     if ( FAILED( result ) ) {
6969       errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6970       errorText_ = errorStream_.str();
6971       MUTEX_UNLOCK( &stream_.mutex );
6972       error( RtAudioError::SYSTEM_ERROR );
6973       return;
6974     }
6975     handle->bufferPointer[1] = nextReadPointer;
6976 
6977     // No byte swapping necessary in DirectSound implementation.
6978 
6979     // If necessary, convert 8-bit data from unsigned to signed.
6980     if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6981       for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6982 
6983     // Do buffer conversion if necessary.
6984     if ( stream_.doConvertBuffer[1] )
6985       convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6986   }
6987 
6988  unlock:
6989   MUTEX_UNLOCK( &stream_.mutex );
6990   RtApi::tickStreamTime();
6991 }
6992 
6993 // Definitions for utility functions and callbacks
6994 // specific to the DirectSound implementation.
6995 
6996 static unsigned __stdcall callbackHandler( void *ptr )
6997 {
6998   CallbackInfo *info = (CallbackInfo *) ptr;
6999   RtApiDs *object = (RtApiDs *) info->object;
7000   bool* isRunning = &info->isRunning;
7001 
7002   while ( *isRunning == true ) {
7003     object->callbackEvent();
7004   }
7005 
7006   _endthreadex( 0 );
7007   return 0;
7008 }
7009 
7010 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7011                                           LPCTSTR description,
7012                                           LPCTSTR /*module*/,
7013                                           LPVOID lpContext )
7014 {
7015   struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7016   std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7017 
7018   HRESULT hr;
7019   bool validDevice = false;
7020   if ( probeInfo.isInput == true ) {
7021     DSCCAPS caps;
7022     LPDIRECTSOUNDCAPTURE object;
7023 
7024     hr = DirectSoundCaptureCreate(  lpguid, &object,   NULL );
7025     if ( hr != DS_OK ) return TRUE;
7026 
7027     caps.dwSize = sizeof(caps);
7028     hr = object->GetCaps( &caps );
7029     if ( hr == DS_OK ) {
7030       if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7031         validDevice = true;
7032     }
7033     object->Release();
7034   }
7035   else {
7036     DSCAPS caps;
7037     LPDIRECTSOUND object;
7038     hr = DirectSoundCreate(  lpguid, &object,   NULL );
7039     if ( hr != DS_OK ) return TRUE;
7040 
7041     caps.dwSize = sizeof(caps);
7042     hr = object->GetCaps( &caps );
7043     if ( hr == DS_OK ) {
7044       if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7045         validDevice = true;
7046     }
7047     object->Release();
7048   }
7049 
7050   // If good device, then save its name and guid.
7051   std::string name = convertCharPointerToStdString( description );
7052   //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7053   if ( lpguid == NULL )
7054     name = "Default Device";
7055   if ( validDevice ) {
7056     for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7057       if ( dsDevices[i].name == name ) {
7058         dsDevices[i].found = true;
7059         if ( probeInfo.isInput ) {
7060           dsDevices[i].id[1] = lpguid;
7061           dsDevices[i].validId[1] = true;
7062         }
7063         else {
7064           dsDevices[i].id[0] = lpguid;
7065           dsDevices[i].validId[0] = true;
7066         }
7067         return TRUE;
7068       }
7069     }
7070 
7071     DsDevice device;
7072     device.name = name;
7073     device.found = true;
7074     if ( probeInfo.isInput ) {
7075       device.id[1] = lpguid;
7076       device.validId[1] = true;
7077     }
7078     else {
7079       device.id[0] = lpguid;
7080       device.validId[0] = true;
7081     }
7082     dsDevices.push_back( device );
7083   }
7084 
7085   return TRUE;
7086 }
7087 
7088 static const char* getErrorString( int code )
7089 {
7090   switch ( code ) {
7091 
7092   case DSERR_ALLOCATED:
7093     return "Already allocated";
7094 
7095   case DSERR_CONTROLUNAVAIL:
7096     return "Control unavailable";
7097 
7098   case DSERR_INVALIDPARAM:
7099     return "Invalid parameter";
7100 
7101   case DSERR_INVALIDCALL:
7102     return "Invalid call";
7103 
7104   case DSERR_GENERIC:
7105     return "Generic error";
7106 
7107   case DSERR_PRIOLEVELNEEDED:
7108     return "Priority level needed";
7109 
7110   case DSERR_OUTOFMEMORY:
7111     return "Out of memory";
7112 
7113   case DSERR_BADFORMAT:
7114     return "The sample rate or the channel format is not supported";
7115 
7116   case DSERR_UNSUPPORTED:
7117     return "Not supported";
7118 
7119   case DSERR_NODRIVER:
7120     return "No driver";
7121 
7122   case DSERR_ALREADYINITIALIZED:
7123     return "Already initialized";
7124 
7125   case DSERR_NOAGGREGATION:
7126     return "No aggregation";
7127 
7128   case DSERR_BUFFERLOST:
7129     return "Buffer lost";
7130 
7131   case DSERR_OTHERAPPHASPRIO:
7132     return "Another application already has priority";
7133 
7134   case DSERR_UNINITIALIZED:
7135     return "Uninitialized";
7136 
7137   default:
7138     return "DirectSound unknown error";
7139   }
7140 }
7141 //******************** End of __WINDOWS_DS__ *********************//
7142 #endif
7143 
7144 
7145 #if defined(__LINUX_ALSA__)
7146 
7147 #include <alsa/asoundlib.h>
7148 #include <unistd.h>
7149 
7150   // A structure to hold various information related to the ALSA API
7151   // implementation.
7152 struct AlsaHandle {
7153   snd_pcm_t *handles[2];
7154   bool synchronized;
7155   bool xrun[2];
7156   pthread_cond_t runnable_cv;
7157   bool runnable;
7158 
7159   AlsaHandle()
7160 #if _cplusplus >= 201103L
7161     :handles{nullptr, nullptr}, synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7162 #else
7163     : synchronized(false), runnable(false) { handles[0] = NULL; handles[1] = NULL; xrun[0] = false; xrun[1] = false; }
7164 #endif
7165 };
7166 
7167 static void *alsaCallbackHandler( void * ptr );
7168 
7169 RtApiAlsa :: RtApiAlsa()
7170 {
7171   // Nothing to do here.
7172 }
7173 
7174 RtApiAlsa :: ~RtApiAlsa()
7175 {
7176   if ( stream_.state != STREAM_CLOSED ) closeStream();
7177 }
7178 
7179 unsigned int RtApiAlsa :: getDeviceCount( void )
7180 {
7181   unsigned nDevices = 0;
7182   int result, subdevice, card;
7183   char name[64];
7184   snd_ctl_t *handle = 0;
7185 
7186   strcpy(name, "default");
7187   result = snd_ctl_open( &handle, "default", 0 );
7188   if (result == 0) {
7189     nDevices++;
7190     snd_ctl_close( handle );
7191   }
7192 
7193   // Count cards and devices
7194   card = -1;
7195   snd_card_next( &card );
7196   while ( card >= 0 ) {
7197     sprintf( name, "hw:%d", card );
7198     result = snd_ctl_open( &handle, name, 0 );
7199     if ( result < 0 ) {
7200       handle = 0;
7201       errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7202       errorText_ = errorStream_.str();
7203       error( RtAudioError::WARNING );
7204       goto nextcard;
7205     }
7206     subdevice = -1;
7207     while( 1 ) {
7208       result = snd_ctl_pcm_next_device( handle, &subdevice );
7209       if ( result < 0 ) {
7210         errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7211         errorText_ = errorStream_.str();
7212         error( RtAudioError::WARNING );
7213         break;
7214       }
7215       if ( subdevice < 0 )
7216         break;
7217       nDevices++;
7218     }
7219   nextcard:
7220     if ( handle )
7221         snd_ctl_close( handle );
7222     snd_card_next( &card );
7223   }
7224 
7225   return nDevices;
7226 }
7227 
7228 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7229 {
7230   RtAudio::DeviceInfo info;
7231   info.probed = false;
7232 
7233   unsigned nDevices = 0;
7234   int result=-1, subdevice=-1, card=-1;
7235   char name[64];
7236   snd_ctl_t *chandle = 0;
7237 
7238   result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7239   if ( result == 0 ) {
7240     if ( nDevices++ == device ) {
7241       strcpy( name, "default" );
7242       goto foundDevice;
7243     }
7244   }
7245   if ( chandle )
7246     snd_ctl_close( chandle );
7247 
7248   // Count cards and devices
7249   snd_card_next( &card );
7250   while ( card >= 0 ) {
7251     sprintf( name, "hw:%d", card );
7252     result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7253     if ( result < 0 ) {
7254       chandle = 0;
7255       errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7256       errorText_ = errorStream_.str();
7257       error( RtAudioError::WARNING );
7258       goto nextcard;
7259     }
7260     subdevice = -1;
7261     while( 1 ) {
7262       result = snd_ctl_pcm_next_device( chandle, &subdevice );
7263       if ( result < 0 ) {
7264         errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7265         errorText_ = errorStream_.str();
7266         error( RtAudioError::WARNING );
7267         break;
7268       }
7269       if ( subdevice < 0 ) break;
7270       if ( nDevices == device ) {
7271         sprintf( name, "hw:%d,%d", card, subdevice );
7272         goto foundDevice;
7273       }
7274       nDevices++;
7275     }
7276   nextcard:
7277     if ( chandle )
7278         snd_ctl_close( chandle );
7279     snd_card_next( &card );
7280   }
7281 
7282   if ( nDevices == 0 ) {
7283     errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7284     error( RtAudioError::INVALID_USE );
7285     return info;
7286   }
7287 
7288   if ( device >= nDevices ) {
7289     errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7290     error( RtAudioError::INVALID_USE );
7291     return info;
7292   }
7293 
7294  foundDevice:
7295 
7296   // If a stream is already open, we cannot probe the stream devices.
7297   // Thus, use the saved results.
7298   if ( stream_.state != STREAM_CLOSED &&
7299        ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7300     snd_ctl_close( chandle );
7301     if ( device >= devices_.size() ) {
7302       errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7303       error( RtAudioError::WARNING );
7304       return info;
7305     }
7306     return devices_[ device ];
7307   }
7308 
7309   int openMode = SND_PCM_ASYNC;
7310   snd_pcm_stream_t stream;
7311   snd_pcm_info_t *pcminfo;
7312   snd_pcm_info_alloca( &pcminfo );
7313   snd_pcm_t *phandle;
7314   snd_pcm_hw_params_t *params;
7315   snd_pcm_hw_params_alloca( &params );
7316 
7317   // First try for playback unless default device (which has subdev -1)
7318   stream = SND_PCM_STREAM_PLAYBACK;
7319   snd_pcm_info_set_stream( pcminfo, stream );
7320   if ( subdevice != -1 ) {
7321     snd_pcm_info_set_device( pcminfo, subdevice );
7322     snd_pcm_info_set_subdevice( pcminfo, 0 );
7323 
7324     result = snd_ctl_pcm_info( chandle, pcminfo );
7325     if ( result < 0 ) {
7326       // Device probably doesn't support playback.
7327       goto captureProbe;
7328     }
7329   }
7330 
7331   result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7332   if ( result < 0 ) {
7333     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7334     errorText_ = errorStream_.str();
7335     error( RtAudioError::WARNING );
7336     goto captureProbe;
7337   }
7338 
7339   // The device is open ... fill the parameter structure.
7340   result = snd_pcm_hw_params_any( phandle, params );
7341   if ( result < 0 ) {
7342     snd_pcm_close( phandle );
7343     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7344     errorText_ = errorStream_.str();
7345     error( RtAudioError::WARNING );
7346     goto captureProbe;
7347   }
7348 
7349   // Get output channel information.
7350   unsigned int value;
7351   result = snd_pcm_hw_params_get_channels_max( params, &value );
7352   if ( result < 0 ) {
7353     snd_pcm_close( phandle );
7354     errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7355     errorText_ = errorStream_.str();
7356     error( RtAudioError::WARNING );
7357     goto captureProbe;
7358   }
7359   info.outputChannels = value;
7360   snd_pcm_close( phandle );
7361 
7362  captureProbe:
7363   stream = SND_PCM_STREAM_CAPTURE;
7364   snd_pcm_info_set_stream( pcminfo, stream );
7365 
7366   // Now try for capture unless default device (with subdev = -1)
7367   if ( subdevice != -1 ) {
7368     result = snd_ctl_pcm_info( chandle, pcminfo );
7369     snd_ctl_close( chandle );
7370     if ( result < 0 ) {
7371       // Device probably doesn't support capture.
7372       if ( info.outputChannels == 0 ) return info;
7373       goto probeParameters;
7374     }
7375   }
7376   else
7377     snd_ctl_close( chandle );
7378 
7379   result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7380   if ( result < 0 ) {
7381     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7382     errorText_ = errorStream_.str();
7383     error( RtAudioError::WARNING );
7384     if ( info.outputChannels == 0 ) return info;
7385     goto probeParameters;
7386   }
7387 
7388   // The device is open ... fill the parameter structure.
7389   result = snd_pcm_hw_params_any( phandle, params );
7390   if ( result < 0 ) {
7391     snd_pcm_close( phandle );
7392     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7393     errorText_ = errorStream_.str();
7394     error( RtAudioError::WARNING );
7395     if ( info.outputChannels == 0 ) return info;
7396     goto probeParameters;
7397   }
7398 
7399   result = snd_pcm_hw_params_get_channels_max( params, &value );
7400   if ( result < 0 ) {
7401     snd_pcm_close( phandle );
7402     errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7403     errorText_ = errorStream_.str();
7404     error( RtAudioError::WARNING );
7405     if ( info.outputChannels == 0 ) return info;
7406     goto probeParameters;
7407   }
7408   info.inputChannels = value;
7409   snd_pcm_close( phandle );
7410 
7411   // If device opens for both playback and capture, we determine the channels.
7412   if ( info.outputChannels > 0 && info.inputChannels > 0 )
7413     info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7414 
7415   // ALSA doesn't provide default devices so we'll use the first available one.
7416   if ( device == 0 && info.outputChannels > 0 )
7417     info.isDefaultOutput = true;
7418   if ( device == 0 && info.inputChannels > 0 )
7419     info.isDefaultInput = true;
7420 
7421  probeParameters:
7422   // At this point, we just need to figure out the supported data
7423   // formats and sample rates.  We'll proceed by opening the device in
7424   // the direction with the maximum number of channels, or playback if
7425   // they are equal.  This might limit our sample rate options, but so
7426   // be it.
7427 
7428   if ( info.outputChannels >= info.inputChannels )
7429     stream = SND_PCM_STREAM_PLAYBACK;
7430   else
7431     stream = SND_PCM_STREAM_CAPTURE;
7432   snd_pcm_info_set_stream( pcminfo, stream );
7433 
7434   result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7435   if ( result < 0 ) {
7436     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7437     errorText_ = errorStream_.str();
7438     error( RtAudioError::WARNING );
7439     return info;
7440   }
7441 
7442   // The device is open ... fill the parameter structure.
7443   result = snd_pcm_hw_params_any( phandle, params );
7444   if ( result < 0 ) {
7445     snd_pcm_close( phandle );
7446     errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7447     errorText_ = errorStream_.str();
7448     error( RtAudioError::WARNING );
7449     return info;
7450   }
7451 
7452   // Test our discrete set of sample rate values.
7453   info.sampleRates.clear();
7454   for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7455     if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7456       info.sampleRates.push_back( SAMPLE_RATES[i] );
7457 
7458       if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7459         info.preferredSampleRate = SAMPLE_RATES[i];
7460     }
7461   }
7462   if ( info.sampleRates.size() == 0 ) {
7463     snd_pcm_close( phandle );
7464     errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7465     errorText_ = errorStream_.str();
7466     error( RtAudioError::WARNING );
7467     return info;
7468   }
7469 
7470   // Probe the supported data formats ... we don't care about endian-ness just yet
7471   snd_pcm_format_t format;
7472   info.nativeFormats = 0;
7473   format = SND_PCM_FORMAT_S8;
7474   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7475     info.nativeFormats |= RTAUDIO_SINT8;
7476   format = SND_PCM_FORMAT_S16;
7477   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7478     info.nativeFormats |= RTAUDIO_SINT16;
7479   format = SND_PCM_FORMAT_S24;
7480   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7481     info.nativeFormats |= RTAUDIO_SINT24;
7482   format = SND_PCM_FORMAT_S32;
7483   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7484     info.nativeFormats |= RTAUDIO_SINT32;
7485   format = SND_PCM_FORMAT_FLOAT;
7486   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7487     info.nativeFormats |= RTAUDIO_FLOAT32;
7488   format = SND_PCM_FORMAT_FLOAT64;
7489   if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7490     info.nativeFormats |= RTAUDIO_FLOAT64;
7491 
7492   // Check that we have at least one supported format
7493   if ( info.nativeFormats == 0 ) {
7494     snd_pcm_close( phandle );
7495     errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7496     errorText_ = errorStream_.str();
7497     error( RtAudioError::WARNING );
7498     return info;
7499   }
7500 
7501   // Get the device name
7502   if (strncmp(name, "default", 7)!=0) {
7503     char *cardname;
7504     result = snd_card_get_name( card, &cardname );
7505     if ( result >= 0 ) {
7506       sprintf( name, "hw:%s,%d", cardname, subdevice );
7507       free( cardname );
7508     }
7509   }
7510   info.name = name;
7511 
7512   // That's all ... close the device and return
7513   snd_pcm_close( phandle );
7514   info.probed = true;
7515   return info;
7516 }
7517 
7518 void RtApiAlsa :: saveDeviceInfo( void )
7519 {
7520   devices_.clear();
7521 
7522   unsigned int nDevices = getDeviceCount();
7523   devices_.resize( nDevices );
7524   for ( unsigned int i=0; i<nDevices; i++ )
7525     devices_[i] = getDeviceInfo( i );
7526 }
7527 
7528 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7529                                    unsigned int firstChannel, unsigned int sampleRate,
7530                                    RtAudioFormat format, unsigned int *bufferSize,
7531                                    RtAudio::StreamOptions *options )
7532 
7533 {
7534 #if defined(__RTAUDIO_DEBUG__)
7535   struct SndOutputTdealloc {
7536     SndOutputTdealloc() : _out(NULL) { snd_output_stdio_attach(&_out, stderr, 0); }
7537     ~SndOutputTdealloc() { snd_output_close(_out); }
7538     operator snd_output_t*() { return _out; }
7539     snd_output_t *_out;
7540   } out;
7541 #endif
7542 
7543   // I'm not using the "plug" interface ... too much inconsistent behavior.
7544 
7545   unsigned nDevices = 0;
7546   int result, subdevice, card;
7547   char name[64];
7548   snd_ctl_t *chandle;
7549 
7550   if ( device == 0
7551        || (options && options->flags & RTAUDIO_ALSA_USE_DEFAULT) )
7552   {
7553     strcpy(name, "default");
7554     result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7555     if ( result == 0 ) {
7556       if ( nDevices == device ) {
7557         strcpy( name, "default" );
7558         snd_ctl_close( chandle );
7559         goto foundDevice;
7560       }
7561       nDevices++;
7562     }
7563   }
7564 
7565   else {
7566     nDevices++;
7567     // Count cards and devices
7568     card = -1;
7569     snd_card_next( &card );
7570     while ( card >= 0 ) {
7571       sprintf( name, "hw:%d", card );
7572       result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7573       if ( result < 0 ) {
7574         errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7575         errorText_ = errorStream_.str();
7576         return FAILURE;
7577       }
7578       subdevice = -1;
7579       while( 1 ) {
7580         result = snd_ctl_pcm_next_device( chandle, &subdevice );
7581         if ( result < 0 ) break;
7582         if ( subdevice < 0 ) break;
7583         if ( nDevices == device ) {
7584           sprintf( name, "hw:%d,%d", card, subdevice );
7585           snd_ctl_close( chandle );
7586           goto foundDevice;
7587         }
7588         nDevices++;
7589       }
7590       snd_ctl_close( chandle );
7591       snd_card_next( &card );
7592     }
7593 
7594     if ( nDevices == 0 ) {
7595       // This should not happen because a check is made before this function is called.
7596       errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7597       return FAILURE;
7598     }
7599 
7600     if ( device >= nDevices ) {
7601       // This should not happen because a check is made before this function is called.
7602       errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7603       return FAILURE;
7604     }
7605   }
7606 
7607  foundDevice:
7608 
7609   // The getDeviceInfo() function will not work for a device that is
7610   // already open.  Thus, we'll probe the system before opening a
7611   // stream and save the results for use by getDeviceInfo().
7612   if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7613     this->saveDeviceInfo();
7614 
7615   snd_pcm_stream_t stream;
7616   if ( mode == OUTPUT )
7617     stream = SND_PCM_STREAM_PLAYBACK;
7618   else
7619     stream = SND_PCM_STREAM_CAPTURE;
7620 
7621   snd_pcm_t *phandle;
7622   int openMode = SND_PCM_ASYNC;
7623   result = snd_pcm_open( &phandle, name, stream, openMode );
7624   if ( result < 0 ) {
7625     if ( mode == OUTPUT )
7626       errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7627     else
7628       errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7629     errorText_ = errorStream_.str();
7630     return FAILURE;
7631   }
7632 
7633   // Fill the parameter structure.
7634   snd_pcm_hw_params_t *hw_params;
7635   snd_pcm_hw_params_alloca( &hw_params );
7636   result = snd_pcm_hw_params_any( phandle, hw_params );
7637   if ( result < 0 ) {
7638     snd_pcm_close( phandle );
7639     errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7640     errorText_ = errorStream_.str();
7641     return FAILURE;
7642   }
7643 
7644 #if defined(__RTAUDIO_DEBUG__)
7645   fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7646   snd_pcm_hw_params_dump( hw_params, out );
7647 #endif
7648 
7649   // Set access ... check user preference.
7650   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7651     stream_.userInterleaved = false;
7652     result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7653     if ( result < 0 ) {
7654       result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7655       stream_.deviceInterleaved[mode] =  true;
7656     }
7657     else
7658       stream_.deviceInterleaved[mode] = false;
7659   }
7660   else {
7661     stream_.userInterleaved = true;
7662     result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7663     if ( result < 0 ) {
7664       result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7665       stream_.deviceInterleaved[mode] =  false;
7666     }
7667     else
7668       stream_.deviceInterleaved[mode] =  true;
7669   }
7670 
7671   if ( result < 0 ) {
7672     snd_pcm_close( phandle );
7673     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7674     errorText_ = errorStream_.str();
7675     return FAILURE;
7676   }
7677 
7678   // Determine how to set the device format.
7679   stream_.userFormat = format;
7680   snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7681 
7682   if ( format == RTAUDIO_SINT8 )
7683     deviceFormat = SND_PCM_FORMAT_S8;
7684   else if ( format == RTAUDIO_SINT16 )
7685     deviceFormat = SND_PCM_FORMAT_S16;
7686   else if ( format == RTAUDIO_SINT24 )
7687     deviceFormat = SND_PCM_FORMAT_S24;
7688   else if ( format == RTAUDIO_SINT32 )
7689     deviceFormat = SND_PCM_FORMAT_S32;
7690   else if ( format == RTAUDIO_FLOAT32 )
7691     deviceFormat = SND_PCM_FORMAT_FLOAT;
7692   else if ( format == RTAUDIO_FLOAT64 )
7693     deviceFormat = SND_PCM_FORMAT_FLOAT64;
7694 
7695   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7696     stream_.deviceFormat[mode] = format;
7697     goto setFormat;
7698   }
7699 
7700   // The user requested format is not natively supported by the device.
7701   deviceFormat = SND_PCM_FORMAT_FLOAT64;
7702   if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7703     stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7704     goto setFormat;
7705   }
7706 
7707   deviceFormat = SND_PCM_FORMAT_FLOAT;
7708   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7709     stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7710     goto setFormat;
7711   }
7712 
7713   deviceFormat = SND_PCM_FORMAT_S32;
7714   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7715     stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7716     goto setFormat;
7717   }
7718 
7719   deviceFormat = SND_PCM_FORMAT_S24;
7720   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7721     stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7722     goto setFormat;
7723   }
7724 
7725   deviceFormat = SND_PCM_FORMAT_S16;
7726   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7727     stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7728     goto setFormat;
7729   }
7730 
7731   deviceFormat = SND_PCM_FORMAT_S8;
7732   if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7733     stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7734     goto setFormat;
7735   }
7736 
7737   // If we get here, no supported format was found.
7738   snd_pcm_close( phandle );
7739   errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7740   errorText_ = errorStream_.str();
7741   return FAILURE;
7742 
7743  setFormat:
7744   result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7745   if ( result < 0 ) {
7746     snd_pcm_close( phandle );
7747     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7748     errorText_ = errorStream_.str();
7749     return FAILURE;
7750   }
7751 
7752   // Determine whether byte-swaping is necessary.
7753   stream_.doByteSwap[mode] = false;
7754   if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7755     result = snd_pcm_format_cpu_endian( deviceFormat );
7756     if ( result == 0 )
7757       stream_.doByteSwap[mode] = true;
7758     else if (result < 0) {
7759       snd_pcm_close( phandle );
7760       errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7761       errorText_ = errorStream_.str();
7762       return FAILURE;
7763     }
7764   }
7765 
7766   // Set the sample rate.
7767   result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7768   if ( result < 0 ) {
7769     snd_pcm_close( phandle );
7770     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7771     errorText_ = errorStream_.str();
7772     return FAILURE;
7773   }
7774 
7775   // Determine the number of channels for this device.  We support a possible
7776   // minimum device channel number > than the value requested by the user.
7777   stream_.nUserChannels[mode] = channels;
7778   unsigned int value;
7779   result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7780   unsigned int deviceChannels = value;
7781   if ( result < 0 || deviceChannels < channels + firstChannel ) {
7782     snd_pcm_close( phandle );
7783     errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7784     errorText_ = errorStream_.str();
7785     return FAILURE;
7786   }
7787 
7788   result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7789   if ( result < 0 ) {
7790     snd_pcm_close( phandle );
7791     errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7792     errorText_ = errorStream_.str();
7793     return FAILURE;
7794   }
7795   deviceChannels = value;
7796   if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7797   stream_.nDeviceChannels[mode] = deviceChannels;
7798 
7799   // Set the device channels.
7800   result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7801   if ( result < 0 ) {
7802     snd_pcm_close( phandle );
7803     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7804     errorText_ = errorStream_.str();
7805     return FAILURE;
7806   }
7807 
7808   // Set the buffer (or period) size.
7809   int dir = 0;
7810   snd_pcm_uframes_t periodSize = *bufferSize;
7811   result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7812   if ( result < 0 ) {
7813     snd_pcm_close( phandle );
7814     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7815     errorText_ = errorStream_.str();
7816     return FAILURE;
7817   }
7818   *bufferSize = periodSize;
7819 
7820   // Set the buffer number, which in ALSA is referred to as the "period".
7821   unsigned int periods = 0;
7822   if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7823   if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7824   if ( periods < 2 ) periods = 4; // a fairly safe default value
7825   result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7826   if ( result < 0 ) {
7827     snd_pcm_close( phandle );
7828     errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7829     errorText_ = errorStream_.str();
7830     return FAILURE;
7831   }
7832 
7833   // If attempting to setup a duplex stream, the bufferSize parameter
7834   // MUST be the same in both directions!
7835   if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7836     snd_pcm_close( phandle );
7837     errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7838     errorText_ = errorStream_.str();
7839     return FAILURE;
7840   }
7841 
7842   stream_.bufferSize = *bufferSize;
7843 
7844   // Install the hardware configuration
7845   result = snd_pcm_hw_params( phandle, hw_params );
7846   if ( result < 0 ) {
7847     snd_pcm_close( phandle );
7848     errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7849     errorText_ = errorStream_.str();
7850     return FAILURE;
7851   }
7852 
7853 #if defined(__RTAUDIO_DEBUG__)
7854   fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7855   snd_pcm_hw_params_dump( hw_params, out );
7856 #endif
7857 
7858   // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7859   snd_pcm_sw_params_t *sw_params = NULL;
7860   snd_pcm_sw_params_alloca( &sw_params );
7861   snd_pcm_sw_params_current( phandle, sw_params );
7862   snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7863   snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7864   snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7865 
7866   // The following two settings were suggested by Theo Veenker
7867   //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7868   //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7869 
7870   // here are two options for a fix
7871   //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7872   snd_pcm_uframes_t val;
7873   snd_pcm_sw_params_get_boundary( sw_params, &val );
7874   snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7875 
7876   result = snd_pcm_sw_params( phandle, sw_params );
7877   if ( result < 0 ) {
7878     snd_pcm_close( phandle );
7879     errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7880     errorText_ = errorStream_.str();
7881     return FAILURE;
7882   }
7883 
7884 #if defined(__RTAUDIO_DEBUG__)
7885   fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7886   snd_pcm_sw_params_dump( sw_params, out );
7887 #endif
7888 
7889   // Set flags for buffer conversion
7890   stream_.doConvertBuffer[mode] = false;
7891   if ( stream_.userFormat != stream_.deviceFormat[mode] )
7892     stream_.doConvertBuffer[mode] = true;
7893   if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7894     stream_.doConvertBuffer[mode] = true;
7895   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7896        stream_.nUserChannels[mode] > 1 )
7897     stream_.doConvertBuffer[mode] = true;
7898 
7899   // Allocate the ApiHandle if necessary and then save.
7900   AlsaHandle *apiInfo = 0;
7901   if ( stream_.apiHandle == 0 ) {
7902     try {
7903       apiInfo = (AlsaHandle *) new AlsaHandle;
7904     }
7905     catch ( std::bad_alloc& ) {
7906       errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7907       goto error;
7908     }
7909 
7910     if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7911       errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7912       goto error;
7913     }
7914 
7915     stream_.apiHandle = (void *) apiInfo;
7916     apiInfo->handles[0] = 0;
7917     apiInfo->handles[1] = 0;
7918   }
7919   else {
7920     apiInfo = (AlsaHandle *) stream_.apiHandle;
7921   }
7922   apiInfo->handles[mode] = phandle;
7923   phandle = 0;
7924 
7925   // Allocate necessary internal buffers.
7926   unsigned long bufferBytes;
7927   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7928   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7929   if ( stream_.userBuffer[mode] == NULL ) {
7930     errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7931     goto error;
7932   }
7933 
7934   if ( stream_.doConvertBuffer[mode] ) {
7935 
7936     bool makeBuffer = true;
7937     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7938     if ( mode == INPUT ) {
7939       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7940         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7941         if ( bufferBytes <= bytesOut ) makeBuffer = false;
7942       }
7943     }
7944 
7945     if ( makeBuffer ) {
7946       bufferBytes *= *bufferSize;
7947       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7948       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7949       if ( stream_.deviceBuffer == NULL ) {
7950         errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7951         goto error;
7952       }
7953     }
7954   }
7955 
7956   stream_.sampleRate = sampleRate;
7957   stream_.nBuffers = periods;
7958   stream_.device[mode] = device;
7959   stream_.state = STREAM_STOPPED;
7960 
7961   // Setup the buffer conversion information structure.
7962   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7963 
7964   // Setup thread if necessary.
7965   if ( stream_.mode == OUTPUT && mode == INPUT ) {
7966     // We had already set up an output stream.
7967     stream_.mode = DUPLEX;
7968     // Link the streams if possible.
7969     apiInfo->synchronized = false;
7970     if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7971       apiInfo->synchronized = true;
7972     else {
7973       errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7974       error( RtAudioError::WARNING );
7975     }
7976   }
7977   else {
7978     stream_.mode = mode;
7979 
7980     // Setup callback thread.
7981     stream_.callbackInfo.object = (void *) this;
7982 
7983     // Set the thread attributes for joinable and realtime scheduling
7984     // priority (optional).  The higher priority will only take affect
7985     // if the program is run as root or suid. Note, under Linux
7986     // processes with CAP_SYS_NICE privilege, a user can change
7987     // scheduling policy and priority (thus need not be root). See
7988     // POSIX "capabilities".
7989     pthread_attr_t attr;
7990     pthread_attr_init( &attr );
7991     pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7992 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7993     if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7994       stream_.callbackInfo.doRealtime = true;
7995       struct sched_param param;
7996       int priority = options->priority;
7997       int min = sched_get_priority_min( SCHED_RR );
7998       int max = sched_get_priority_max( SCHED_RR );
7999       if ( priority < min ) priority = min;
8000       else if ( priority > max ) priority = max;
8001       param.sched_priority = priority;
8002 
8003       // Set the policy BEFORE the priority. Otherwise it fails.
8004       pthread_attr_setschedpolicy(&attr, SCHED_RR);
8005       pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8006       // This is definitely required. Otherwise it fails.
8007       pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8008       pthread_attr_setschedparam(&attr, &param);
8009     }
8010     else
8011       pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8012 #else
8013     pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8014 #endif
8015 
8016     stream_.callbackInfo.isRunning = true;
8017     result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8018     pthread_attr_destroy( &attr );
8019     if ( result ) {
8020       // Failed. Try instead with default attributes.
8021       result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8022       if ( result ) {
8023         stream_.callbackInfo.isRunning = false;
8024         errorText_ = "RtApiAlsa::error creating callback thread!";
8025         goto error;
8026       }
8027     }
8028   }
8029 
8030   return SUCCESS;
8031 
8032  error:
8033   if ( apiInfo ) {
8034     pthread_cond_destroy( &apiInfo->runnable_cv );
8035     if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8036     if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8037     delete apiInfo;
8038     stream_.apiHandle = 0;
8039   }
8040 
8041   if ( phandle) snd_pcm_close( phandle );
8042 
8043   for ( int i=0; i<2; i++ ) {
8044     if ( stream_.userBuffer[i] ) {
8045       free( stream_.userBuffer[i] );
8046       stream_.userBuffer[i] = 0;
8047     }
8048   }
8049 
8050   if ( stream_.deviceBuffer ) {
8051     free( stream_.deviceBuffer );
8052     stream_.deviceBuffer = 0;
8053   }
8054 
8055   stream_.state = STREAM_CLOSED;
8056   return FAILURE;
8057 }
8058 
8059 void RtApiAlsa :: closeStream()
8060 {
8061   if ( stream_.state == STREAM_CLOSED ) {
8062     errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8063     error( RtAudioError::WARNING );
8064     return;
8065   }
8066 
8067   AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8068   stream_.callbackInfo.isRunning = false;
8069   MUTEX_LOCK( &stream_.mutex );
8070   if ( stream_.state == STREAM_STOPPED ) {
8071     apiInfo->runnable = true;
8072     pthread_cond_signal( &apiInfo->runnable_cv );
8073   }
8074   MUTEX_UNLOCK( &stream_.mutex );
8075   pthread_join( stream_.callbackInfo.thread, NULL );
8076 
8077   if ( stream_.state == STREAM_RUNNING ) {
8078     stream_.state = STREAM_STOPPED;
8079     if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8080       snd_pcm_drop( apiInfo->handles[0] );
8081     if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8082       snd_pcm_drop( apiInfo->handles[1] );
8083   }
8084 
8085   if ( apiInfo ) {
8086     pthread_cond_destroy( &apiInfo->runnable_cv );
8087     if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8088     if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8089     delete apiInfo;
8090     stream_.apiHandle = 0;
8091   }
8092 
8093   for ( int i=0; i<2; i++ ) {
8094     if ( stream_.userBuffer[i] ) {
8095       free( stream_.userBuffer[i] );
8096       stream_.userBuffer[i] = 0;
8097     }
8098   }
8099 
8100   if ( stream_.deviceBuffer ) {
8101     free( stream_.deviceBuffer );
8102     stream_.deviceBuffer = 0;
8103   }
8104 
8105   stream_.mode = UNINITIALIZED;
8106   stream_.state = STREAM_CLOSED;
8107 }
8108 
8109 void RtApiAlsa :: startStream()
8110 {
8111   // This method calls snd_pcm_prepare if the device isn't already in that state.
8112 
8113   verifyStream();
8114   if ( stream_.state == STREAM_RUNNING ) {
8115     errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8116     error( RtAudioError::WARNING );
8117     return;
8118   }
8119 
8120   MUTEX_LOCK( &stream_.mutex );
8121 
8122   #if defined( HAVE_GETTIMEOFDAY )
8123   gettimeofday( &stream_.lastTickTimestamp, NULL );
8124   #endif
8125 
8126   int result = 0;
8127   snd_pcm_state_t state;
8128   AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8129   snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8130   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8131     state = snd_pcm_state( handle[0] );
8132     if ( state != SND_PCM_STATE_PREPARED ) {
8133       result = snd_pcm_prepare( handle[0] );
8134       if ( result < 0 ) {
8135         errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8136         errorText_ = errorStream_.str();
8137         goto unlock;
8138       }
8139     }
8140   }
8141 
8142   if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8143     result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8144     state = snd_pcm_state( handle[1] );
8145     if ( state != SND_PCM_STATE_PREPARED ) {
8146       result = snd_pcm_prepare( handle[1] );
8147       if ( result < 0 ) {
8148         errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8149         errorText_ = errorStream_.str();
8150         goto unlock;
8151       }
8152     }
8153   }
8154 
8155   stream_.state = STREAM_RUNNING;
8156 
8157  unlock:
8158   apiInfo->runnable = true;
8159   pthread_cond_signal( &apiInfo->runnable_cv );
8160   MUTEX_UNLOCK( &stream_.mutex );
8161 
8162   if ( result >= 0 ) return;
8163   error( RtAudioError::SYSTEM_ERROR );
8164 }
8165 
8166 void RtApiAlsa :: stopStream()
8167 {
8168   verifyStream();
8169   if ( stream_.state == STREAM_STOPPED ) {
8170     errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8171     error( RtAudioError::WARNING );
8172     return;
8173   }
8174 
8175   stream_.state = STREAM_STOPPED;
8176   MUTEX_LOCK( &stream_.mutex );
8177 
8178   int result = 0;
8179   AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8180   snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8181   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8182     if ( apiInfo->synchronized )
8183       result = snd_pcm_drop( handle[0] );
8184     else
8185       result = snd_pcm_drain( handle[0] );
8186     if ( result < 0 ) {
8187       errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8188       errorText_ = errorStream_.str();
8189       goto unlock;
8190     }
8191   }
8192 
8193   if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8194     result = snd_pcm_drop( handle[1] );
8195     if ( result < 0 ) {
8196       errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8197       errorText_ = errorStream_.str();
8198       goto unlock;
8199     }
8200   }
8201 
8202  unlock:
8203   apiInfo->runnable = false; // fixes high CPU usage when stopped
8204   MUTEX_UNLOCK( &stream_.mutex );
8205 
8206   if ( result >= 0 ) return;
8207   error( RtAudioError::SYSTEM_ERROR );
8208 }
8209 
8210 void RtApiAlsa :: abortStream()
8211 {
8212   verifyStream();
8213   if ( stream_.state == STREAM_STOPPED ) {
8214     errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8215     error( RtAudioError::WARNING );
8216     return;
8217   }
8218 
8219   stream_.state = STREAM_STOPPED;
8220   MUTEX_LOCK( &stream_.mutex );
8221 
8222   int result = 0;
8223   AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8224   snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8225   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8226     result = snd_pcm_drop( handle[0] );
8227     if ( result < 0 ) {
8228       errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8229       errorText_ = errorStream_.str();
8230       goto unlock;
8231     }
8232   }
8233 
8234   if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8235     result = snd_pcm_drop( handle[1] );
8236     if ( result < 0 ) {
8237       errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8238       errorText_ = errorStream_.str();
8239       goto unlock;
8240     }
8241   }
8242 
8243  unlock:
8244   apiInfo->runnable = false; // fixes high CPU usage when stopped
8245   MUTEX_UNLOCK( &stream_.mutex );
8246 
8247   if ( result >= 0 ) return;
8248   error( RtAudioError::SYSTEM_ERROR );
8249 }
8250 
8251 void RtApiAlsa :: callbackEvent()
8252 {
8253   AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8254   if ( stream_.state == STREAM_STOPPED ) {
8255     MUTEX_LOCK( &stream_.mutex );
8256     while ( !apiInfo->runnable )
8257       pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8258 
8259     if ( stream_.state != STREAM_RUNNING ) {
8260       MUTEX_UNLOCK( &stream_.mutex );
8261       return;
8262     }
8263     MUTEX_UNLOCK( &stream_.mutex );
8264   }
8265 
8266   if ( stream_.state == STREAM_CLOSED ) {
8267     errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8268     error( RtAudioError::WARNING );
8269     return;
8270   }
8271 
8272   int doStopStream = 0;
8273   RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8274   double streamTime = getStreamTime();
8275   RtAudioStreamStatus status = 0;
8276   if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8277     status |= RTAUDIO_OUTPUT_UNDERFLOW;
8278     apiInfo->xrun[0] = false;
8279   }
8280   if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8281     status |= RTAUDIO_INPUT_OVERFLOW;
8282     apiInfo->xrun[1] = false;
8283   }
8284   doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8285                            stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8286 
8287   if ( doStopStream == 2 ) {
8288     abortStream();
8289     return;
8290   }
8291 
8292   MUTEX_LOCK( &stream_.mutex );
8293 
8294   // The state might change while waiting on a mutex.
8295   if ( stream_.state == STREAM_STOPPED ) goto unlock;
8296 
8297   int result;
8298   char *buffer;
8299   int channels;
8300   snd_pcm_t **handle;
8301   snd_pcm_sframes_t frames;
8302   RtAudioFormat format;
8303   handle = (snd_pcm_t **) apiInfo->handles;
8304 
8305   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8306 
8307     // Setup parameters.
8308     if ( stream_.doConvertBuffer[1] ) {
8309       buffer = stream_.deviceBuffer;
8310       channels = stream_.nDeviceChannels[1];
8311       format = stream_.deviceFormat[1];
8312     }
8313     else {
8314       buffer = stream_.userBuffer[1];
8315       channels = stream_.nUserChannels[1];
8316       format = stream_.userFormat;
8317     }
8318 
8319     // Read samples from device in interleaved/non-interleaved format.
8320     if ( stream_.deviceInterleaved[1] )
8321       result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8322     else {
8323       void *bufs[channels];
8324       size_t offset = stream_.bufferSize * formatBytes( format );
8325       for ( int i=0; i<channels; i++ )
8326         bufs[i] = (void *) (buffer + (i * offset));
8327       result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8328     }
8329 
8330     if ( result < (int) stream_.bufferSize ) {
8331       // Either an error or overrun occured.
8332       if ( result == -EPIPE ) {
8333         snd_pcm_state_t state = snd_pcm_state( handle[1] );
8334         if ( state == SND_PCM_STATE_XRUN ) {
8335           apiInfo->xrun[1] = true;
8336           result = snd_pcm_prepare( handle[1] );
8337           if ( result < 0 ) {
8338             errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8339             errorText_ = errorStream_.str();
8340           }
8341         }
8342         else {
8343           errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8344           errorText_ = errorStream_.str();
8345         }
8346       }
8347       else {
8348         errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8349         errorText_ = errorStream_.str();
8350       }
8351       error( RtAudioError::WARNING );
8352       goto tryOutput;
8353     }
8354 
8355     // Do byte swapping if necessary.
8356     if ( stream_.doByteSwap[1] )
8357       byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8358 
8359     // Do buffer conversion if necessary.
8360     if ( stream_.doConvertBuffer[1] )
8361       convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8362 
8363     // Check stream latency
8364     result = snd_pcm_delay( handle[1], &frames );
8365     if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8366   }
8367 
8368  tryOutput:
8369 
8370   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8371 
8372     // Setup parameters and do buffer conversion if necessary.
8373     if ( stream_.doConvertBuffer[0] ) {
8374       buffer = stream_.deviceBuffer;
8375       convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8376       channels = stream_.nDeviceChannels[0];
8377       format = stream_.deviceFormat[0];
8378     }
8379     else {
8380       buffer = stream_.userBuffer[0];
8381       channels = stream_.nUserChannels[0];
8382       format = stream_.userFormat;
8383     }
8384 
8385     // Do byte swapping if necessary.
8386     if ( stream_.doByteSwap[0] )
8387       byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8388 
8389     // Write samples to device in interleaved/non-interleaved format.
8390     if ( stream_.deviceInterleaved[0] )
8391       result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8392     else {
8393       void *bufs[channels];
8394       size_t offset = stream_.bufferSize * formatBytes( format );
8395       for ( int i=0; i<channels; i++ )
8396         bufs[i] = (void *) (buffer + (i * offset));
8397       result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8398     }
8399 
8400     if ( result < (int) stream_.bufferSize ) {
8401       // Either an error or underrun occured.
8402       if ( result == -EPIPE ) {
8403         snd_pcm_state_t state = snd_pcm_state( handle[0] );
8404         if ( state == SND_PCM_STATE_XRUN ) {
8405           apiInfo->xrun[0] = true;
8406           result = snd_pcm_prepare( handle[0] );
8407           if ( result < 0 ) {
8408             errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8409             errorText_ = errorStream_.str();
8410           }
8411           else
8412             errorText_ =  "RtApiAlsa::callbackEvent: audio write error, underrun.";
8413         }
8414         else {
8415           errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8416           errorText_ = errorStream_.str();
8417         }
8418       }
8419       else {
8420         errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8421         errorText_ = errorStream_.str();
8422       }
8423       error( RtAudioError::WARNING );
8424       goto unlock;
8425     }
8426 
8427     // Check stream latency
8428     result = snd_pcm_delay( handle[0], &frames );
8429     if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8430   }
8431 
8432  unlock:
8433   MUTEX_UNLOCK( &stream_.mutex );
8434 
8435   RtApi::tickStreamTime();
8436   if ( doStopStream == 1 ) this->stopStream();
8437 }
8438 
8439 static void *alsaCallbackHandler( void *ptr )
8440 {
8441   CallbackInfo *info = (CallbackInfo *) ptr;
8442   RtApiAlsa *object = (RtApiAlsa *) info->object;
8443   bool *isRunning = &info->isRunning;
8444 
8445 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8446   if ( info->doRealtime ) {
8447     std::cerr << "RtAudio alsa: " <<
8448              (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8449              "running realtime scheduling" << std::endl;
8450   }
8451 #endif
8452 
8453   while ( *isRunning == true ) {
8454     pthread_testcancel();
8455     object->callbackEvent();
8456   }
8457 
8458   pthread_exit( NULL );
8459 }
8460 
8461 //******************** End of __LINUX_ALSA__ *********************//
8462 #endif
8463 
8464 #if defined(__LINUX_PULSE__)
8465 
8466 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8467 // and Tristan Matthews.
8468 
8469 #include <pulse/error.h>
8470 #include <pulse/simple.h>
8471 #include <pulse/pulseaudio.h>
8472 #include <cstdio>
8473 
8474 static pa_mainloop_api *rt_pa_mainloop_api = NULL;
8475 struct PaDeviceInfo {
8476   PaDeviceInfo() : sink_index(-1), source_index(-1) {}
8477   int sink_index;
8478   int source_index;
8479   std::string sink_name;
8480   std::string source_name;
8481   RtAudio::DeviceInfo info;
8482 };
8483 static struct {
8484   std::vector<PaDeviceInfo> dev;
8485   std::string default_sink_name;
8486   std::string default_source_name;
8487   int default_rate;
8488 } rt_pa_info;
8489 
8490 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8491                                                       44100, 48000, 96000, 0};
8492 
8493 struct rtaudio_pa_format_mapping_t {
8494   RtAudioFormat rtaudio_format;
8495   pa_sample_format_t pa_format;
8496 };
8497 
8498 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8499   {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8500   {RTAUDIO_SINT24, PA_SAMPLE_S24LE},
8501   {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8502   {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8503   {0, PA_SAMPLE_INVALID}};
8504 
8505 struct PulseAudioHandle {
8506   pa_simple *s_play;
8507   pa_simple *s_rec;
8508   pthread_t thread;
8509   pthread_cond_t runnable_cv;
8510   bool runnable;
8511   PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8512 };
8513 
8514 static void rt_pa_mainloop_api_quit(int ret) {
8515     rt_pa_mainloop_api->quit(rt_pa_mainloop_api, ret);
8516 }
8517 
8518 static void rt_pa_server_callback(pa_context *context, const pa_server_info *info, void *data){
8519   (void)context;
8520   (void)data;
8521   pa_sample_spec ss;
8522 
8523   if (!info)
8524     rt_pa_mainloop_api_quit(1);
8525 
8526   ss = info->sample_spec;
8527 
8528   rt_pa_info.default_rate = ss.rate;
8529   rt_pa_info.default_sink_name = info->default_sink_name;
8530   rt_pa_info.default_source_name = info->default_source_name;
8531   rt_pa_mainloop_api_quit(0);
8532 }
8533 
8534 static void rt_pa_sink_info_cb(pa_context * /*c*/, const pa_sink_info *i,
8535                                int eol, void * /*userdata*/)
8536 {
8537   if (eol) return;
8538   PaDeviceInfo inf;
8539   inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8540   inf.info.probed = true;
8541   inf.info.outputChannels = i->sample_spec.channels;
8542   inf.info.preferredSampleRate = i->sample_spec.rate;
8543   inf.info.isDefaultOutput = (rt_pa_info.default_sink_name == i->name);
8544   inf.sink_index = i->index;
8545   inf.sink_name = i->name;
8546   for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8547     inf.info.sampleRates.push_back( *sr );
8548   for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8549         fm->rtaudio_format; ++fm )
8550     inf.info.nativeFormats |= fm->rtaudio_format;
8551   for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8552   {
8553     /* Attempt to match up sink and source records by device description. */
8554     if (rt_pa_info.dev[i].info.name == inf.info.name) {
8555       rt_pa_info.dev[i].sink_index = inf.sink_index;
8556       rt_pa_info.dev[i].sink_name = inf.sink_name;
8557       rt_pa_info.dev[i].info.outputChannels = inf.info.outputChannels;
8558       rt_pa_info.dev[i].info.isDefaultOutput = inf.info.isDefaultOutput;
8559       /* Assume duplex channels are minimum of input and output channels. */
8560       /* Uncomment if we add support for DUPLEX
8561       if (rt_pa_info.dev[i].source_index > -1)
8562         (inf.info.outputChannels < rt_pa_info.dev[i].info.inputChannels)
8563           ? inf.info.outputChannels : rt_pa_info.dev[i].info.inputChannels;
8564       */
8565       return;
8566     }
8567   }
8568   /* try to ensure device #0 is the default */
8569   if (inf.info.isDefaultOutput)
8570     rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8571   else
8572     rt_pa_info.dev.push_back(inf);
8573 }
8574 
8575 static void rt_pa_source_info_cb(pa_context * /*c*/, const pa_source_info *i,
8576                                  int eol, void * /*userdata*/)
8577 {
8578   if (eol) return;
8579   PaDeviceInfo inf;
8580   inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8581   inf.info.probed = true;
8582   inf.info.inputChannels = i->sample_spec.channels;
8583   inf.info.preferredSampleRate = i->sample_spec.rate;
8584   inf.info.isDefaultInput = (rt_pa_info.default_source_name == i->name);
8585   inf.source_index = i->index;
8586   inf.source_name = i->name;
8587   for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8588     inf.info.sampleRates.push_back( *sr );
8589   for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8590         fm->rtaudio_format; ++fm )
8591     inf.info.nativeFormats |= fm->rtaudio_format;
8592 
8593   for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8594   {
8595     /* Attempt to match up sink and source records by device description. */
8596     if (rt_pa_info.dev[i].info.name == inf.info.name) {
8597       rt_pa_info.dev[i].source_index = inf.source_index;
8598       rt_pa_info.dev[i].source_name = inf.source_name;
8599       rt_pa_info.dev[i].info.inputChannels = inf.info.inputChannels;
8600       rt_pa_info.dev[i].info.isDefaultInput = inf.info.isDefaultInput;
8601       /* Assume duplex channels are minimum of input and output channels. */
8602       /* Uncomment if we add support for DUPLEX
8603       if (rt_pa_info.dev[i].sink_index > -1) {
8604         rt_pa_info.dev[i].info.duplexChannels =
8605           (inf.info.inputChannels < rt_pa_info.dev[i].info.outputChannels)
8606           ? inf.info.inputChannels : rt_pa_info.dev[i].info.outputChannels;
8607       }
8608       */
8609       return;
8610     }
8611   }
8612   /* try to ensure device #0 is the default */
8613   if (inf.info.isDefaultInput)
8614     rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8615   else
8616     rt_pa_info.dev.push_back(inf);
8617 }
8618 
8619 static void rt_pa_context_state_callback(pa_context *context, void *userdata) {
8620   (void)userdata;
8621 
8622   switch (pa_context_get_state(context)) {
8623     case PA_CONTEXT_CONNECTING:
8624     case PA_CONTEXT_AUTHORIZING:
8625     case PA_CONTEXT_SETTING_NAME:
8626       break;
8627 
8628     case PA_CONTEXT_READY:
8629       rt_pa_info.dev.clear();
8630       pa_context_get_server_info(context, rt_pa_server_callback, NULL);
8631       pa_context_get_sink_info_list(context, rt_pa_sink_info_cb, NULL);
8632       pa_context_get_source_info_list(context, rt_pa_source_info_cb, NULL);
8633       break;
8634 
8635     case PA_CONTEXT_TERMINATED:
8636       rt_pa_mainloop_api_quit(0);
8637       break;
8638 
8639     case PA_CONTEXT_FAILED:
8640     default:
8641       rt_pa_mainloop_api_quit(1);
8642   }
8643 }
8644 
8645 RtApiPulse::~RtApiPulse()
8646 {
8647   if ( stream_.state != STREAM_CLOSED )
8648     closeStream();
8649 }
8650 
8651 void RtApiPulse::collectDeviceInfo( void )
8652 {
8653   pa_context *context = NULL;
8654   pa_mainloop *m = NULL;
8655   char *server = NULL;
8656   int ret = 1;
8657 
8658   if (!(m = pa_mainloop_new())) {
8659     errorStream_ << "RtApiPulse::DeviceInfo pa_mainloop_new() failed.";
8660     errorText_ = errorStream_.str();
8661     error( RtAudioError::WARNING );
8662     goto quit;
8663   }
8664 
8665   rt_pa_mainloop_api = pa_mainloop_get_api(m);
8666 
8667   if (!(context = pa_context_new_with_proplist(rt_pa_mainloop_api, NULL, NULL))) {
8668     errorStream_ << "pa_context_new() failed.";
8669     errorText_ = errorStream_.str();
8670     error( RtAudioError::WARNING );
8671     goto quit;
8672   }
8673 
8674   pa_context_set_state_callback(context, rt_pa_context_state_callback, NULL);
8675 
8676   if (pa_context_connect(context, server, PA_CONTEXT_NOFLAGS, NULL) < 0) {
8677     errorStream_ << "RtApiPulse::DeviceInfo pa_context_connect() failed: "
8678       << pa_strerror(pa_context_errno(context));
8679     errorText_ = errorStream_.str();
8680     error( RtAudioError::WARNING );
8681     goto quit;
8682   }
8683 
8684   if (pa_mainloop_run(m, &ret) < 0) {
8685     errorStream_ << "pa_mainloop_run() failed.";
8686     errorText_ = errorStream_.str();
8687     error( RtAudioError::WARNING );
8688     goto quit;
8689   }
8690 
8691 quit:
8692   if (context)
8693     pa_context_unref(context);
8694 
8695   if (m) {
8696     pa_mainloop_free(m);
8697   }
8698 
8699   pa_xfree(server);
8700 }
8701 
8702 unsigned int RtApiPulse::getDeviceCount( void )
8703 {
8704   collectDeviceInfo();
8705   return rt_pa_info.dev.size();
8706 }
8707 
8708 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
8709 {
8710   if (rt_pa_info.dev.size()==0)
8711       collectDeviceInfo();
8712   if (device < rt_pa_info.dev.size())
8713     return rt_pa_info.dev[device].info;
8714   return RtAudio::DeviceInfo();
8715 }
8716 
8717 static void *pulseaudio_callback( void * user )
8718 {
8719   CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8720   RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8721   volatile bool *isRunning = &cbi->isRunning;
8722 
8723 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8724   if (cbi->doRealtime) {
8725     std::cerr << "RtAudio pulse: " <<
8726              (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8727              "running realtime scheduling" << std::endl;
8728   }
8729 #endif
8730 
8731   while ( *isRunning ) {
8732     pthread_testcancel();
8733     context->callbackEvent();
8734   }
8735 
8736   pthread_exit( NULL );
8737 }
8738 
8739 void RtApiPulse::closeStream( void )
8740 {
8741   PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8742 
8743   stream_.callbackInfo.isRunning = false;
8744   if ( pah ) {
8745     MUTEX_LOCK( &stream_.mutex );
8746     if ( stream_.state == STREAM_STOPPED ) {
8747       pah->runnable = true;
8748       pthread_cond_signal( &pah->runnable_cv );
8749     }
8750     MUTEX_UNLOCK( &stream_.mutex );
8751 
8752     pthread_join( pah->thread, 0 );
8753     if ( pah->s_play ) {
8754       pa_simple_flush( pah->s_play, NULL );
8755       pa_simple_free( pah->s_play );
8756     }
8757     if ( pah->s_rec )
8758       pa_simple_free( pah->s_rec );
8759 
8760     pthread_cond_destroy( &pah->runnable_cv );
8761     delete pah;
8762     stream_.apiHandle = 0;
8763   }
8764 
8765   if ( stream_.userBuffer[0] ) {
8766     free( stream_.userBuffer[0] );
8767     stream_.userBuffer[0] = 0;
8768   }
8769   if ( stream_.userBuffer[1] ) {
8770     free( stream_.userBuffer[1] );
8771     stream_.userBuffer[1] = 0;
8772   }
8773 
8774   stream_.state = STREAM_CLOSED;
8775   stream_.mode = UNINITIALIZED;
8776 }
8777 
8778 void RtApiPulse::callbackEvent( void )
8779 {
8780   PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8781 
8782   if ( stream_.state == STREAM_STOPPED ) {
8783     MUTEX_LOCK( &stream_.mutex );
8784     while ( !pah->runnable )
8785       pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8786 
8787     if ( stream_.state != STREAM_RUNNING ) {
8788       MUTEX_UNLOCK( &stream_.mutex );
8789       return;
8790     }
8791     MUTEX_UNLOCK( &stream_.mutex );
8792   }
8793 
8794   if ( stream_.state == STREAM_CLOSED ) {
8795     errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8796       "this shouldn't happen!";
8797     error( RtAudioError::WARNING );
8798     return;
8799   }
8800 
8801   RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8802   double streamTime = getStreamTime();
8803   RtAudioStreamStatus status = 0;
8804   int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8805                                stream_.bufferSize, streamTime, status,
8806                                stream_.callbackInfo.userData );
8807 
8808   if ( doStopStream == 2 ) {
8809     abortStream();
8810     return;
8811   }
8812 
8813   MUTEX_LOCK( &stream_.mutex );
8814   void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8815   void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8816 
8817   if ( stream_.state != STREAM_RUNNING )
8818     goto unlock;
8819 
8820   int pa_error;
8821   size_t bytes;
8822   if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8823     if ( stream_.doConvertBuffer[OUTPUT] ) {
8824         convertBuffer( stream_.deviceBuffer,
8825                        stream_.userBuffer[OUTPUT],
8826                        stream_.convertInfo[OUTPUT] );
8827         bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8828                 formatBytes( stream_.deviceFormat[OUTPUT] );
8829     } else
8830         bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8831                 formatBytes( stream_.userFormat );
8832 
8833     if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8834       errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8835         pa_strerror( pa_error ) << ".";
8836       errorText_ = errorStream_.str();
8837       error( RtAudioError::WARNING );
8838     }
8839   }
8840 
8841   if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8842     if ( stream_.doConvertBuffer[INPUT] )
8843       bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8844         formatBytes( stream_.deviceFormat[INPUT] );
8845     else
8846       bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8847         formatBytes( stream_.userFormat );
8848 
8849     if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8850       errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8851         pa_strerror( pa_error ) << ".";
8852       errorText_ = errorStream_.str();
8853       error( RtAudioError::WARNING );
8854     }
8855     if ( stream_.doConvertBuffer[INPUT] ) {
8856       convertBuffer( stream_.userBuffer[INPUT],
8857                      stream_.deviceBuffer,
8858                      stream_.convertInfo[INPUT] );
8859     }
8860   }
8861 
8862  unlock:
8863   MUTEX_UNLOCK( &stream_.mutex );
8864   RtApi::tickStreamTime();
8865 
8866   if ( doStopStream == 1 )
8867     stopStream();
8868 }
8869 
8870 void RtApiPulse::startStream( void )
8871 {
8872   PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8873 
8874   if ( stream_.state == STREAM_CLOSED ) {
8875     errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8876     error( RtAudioError::INVALID_USE );
8877     return;
8878   }
8879   if ( stream_.state == STREAM_RUNNING ) {
8880     errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8881     error( RtAudioError::WARNING );
8882     return;
8883   }
8884 
8885   MUTEX_LOCK( &stream_.mutex );
8886 
8887   #if defined( HAVE_GETTIMEOFDAY )
8888   gettimeofday( &stream_.lastTickTimestamp, NULL );
8889   #endif
8890 
8891   stream_.state = STREAM_RUNNING;
8892 
8893   pah->runnable = true;
8894   pthread_cond_signal( &pah->runnable_cv );
8895   MUTEX_UNLOCK( &stream_.mutex );
8896 }
8897 
8898 void RtApiPulse::stopStream( void )
8899 {
8900   PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8901 
8902   if ( stream_.state == STREAM_CLOSED ) {
8903     errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8904     error( RtAudioError::INVALID_USE );
8905     return;
8906   }
8907   if ( stream_.state == STREAM_STOPPED ) {
8908     errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8909     error( RtAudioError::WARNING );
8910     return;
8911   }
8912 
8913   stream_.state = STREAM_STOPPED;
8914   MUTEX_LOCK( &stream_.mutex );
8915 
8916   if ( pah ) {
8917     pah->runnable = false;
8918     if ( pah->s_play ) {
8919       int pa_error;
8920       if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8921         errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8922           pa_strerror( pa_error ) << ".";
8923         errorText_ = errorStream_.str();
8924         MUTEX_UNLOCK( &stream_.mutex );
8925         error( RtAudioError::SYSTEM_ERROR );
8926         return;
8927       }
8928     }
8929   }
8930 
8931   stream_.state = STREAM_STOPPED;
8932   MUTEX_UNLOCK( &stream_.mutex );
8933 }
8934 
8935 void RtApiPulse::abortStream( void )
8936 {
8937   PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8938 
8939   if ( stream_.state == STREAM_CLOSED ) {
8940     errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8941     error( RtAudioError::INVALID_USE );
8942     return;
8943   }
8944   if ( stream_.state == STREAM_STOPPED ) {
8945     errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8946     error( RtAudioError::WARNING );
8947     return;
8948   }
8949 
8950   stream_.state = STREAM_STOPPED;
8951   MUTEX_LOCK( &stream_.mutex );
8952 
8953   if ( pah ) {
8954     pah->runnable = false;
8955     if ( pah->s_play ) {
8956       int pa_error;
8957       if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8958         errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8959           pa_strerror( pa_error ) << ".";
8960         errorText_ = errorStream_.str();
8961         MUTEX_UNLOCK( &stream_.mutex );
8962         error( RtAudioError::SYSTEM_ERROR );
8963         return;
8964       }
8965     }
8966   }
8967 
8968   stream_.state = STREAM_STOPPED;
8969   MUTEX_UNLOCK( &stream_.mutex );
8970 }
8971 
8972 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8973                                   unsigned int channels, unsigned int firstChannel,
8974                                   unsigned int sampleRate, RtAudioFormat format,
8975                                   unsigned int *bufferSize, RtAudio::StreamOptions *options )
8976 {
8977   PulseAudioHandle *pah = 0;
8978   unsigned long bufferBytes = 0;
8979   pa_sample_spec ss;
8980 
8981   if ( device >= rt_pa_info.dev.size() ) return false;
8982   if ( firstChannel != 0 ) {
8983     errorText_ = "PulseAudio does not support channel offset mapping.";
8984     return false;
8985   }
8986 
8987   /* these may be NULL for default, but we've already got the names */
8988   const char *dev_input = NULL;
8989   const char *dev_output = NULL;
8990   if (!rt_pa_info.dev[device].source_name.empty())
8991     dev_input = rt_pa_info.dev[device].source_name.c_str();
8992   if (!rt_pa_info.dev[device].sink_name.empty())
8993     dev_output = rt_pa_info.dev[device].sink_name.c_str();
8994 
8995   if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels == 0) {
8996     errorText_ = "PulseAudio device does not support input.";
8997     return false;
8998   }
8999   if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels == 0) {
9000     errorText_ = "PulseAudio device does not support output.";
9001     return false;
9002   }
9003   if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels == 0) {
9004     /* Note: will always error, DUPLEX not yet supported */
9005     errorText_ = "PulseAudio device does not support duplex.";
9006     return false;
9007   }
9008 
9009   if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels < channels) {
9010     errorText_ = "PulseAudio: unsupported number of input channels.";
9011     return false;
9012   }
9013 
9014   if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels < channels) {
9015     errorText_ = "PulseAudio: unsupported number of output channels.";
9016     return false;
9017   }
9018 
9019   if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels < channels) {
9020     /* Note: will always error, DUPLEX not yet supported */
9021     errorText_ = "PulseAudio: unsupported number of duplex channels.";
9022     return false;
9023   }
9024 
9025   ss.channels = channels;
9026 
9027   bool sr_found = false;
9028   for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
9029     if ( sampleRate == *sr ) {
9030       sr_found = true;
9031       stream_.sampleRate = sampleRate;
9032       ss.rate = sampleRate;
9033       break;
9034     }
9035   }
9036   if ( !sr_found ) {
9037     errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
9038     return false;
9039   }
9040 
9041   bool sf_found = 0;
9042   for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
9043         sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
9044     if ( format == sf->rtaudio_format ) {
9045       sf_found = true;
9046       stream_.userFormat = sf->rtaudio_format;
9047       stream_.deviceFormat[mode] = stream_.userFormat;
9048       ss.format = sf->pa_format;
9049       break;
9050     }
9051   }
9052   if ( !sf_found ) { // Use internal data format conversion.
9053     stream_.userFormat = format;
9054     stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
9055     ss.format = PA_SAMPLE_FLOAT32LE;
9056   }
9057 
9058   // Set other stream parameters.
9059   if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
9060   else stream_.userInterleaved = true;
9061   stream_.deviceInterleaved[mode] = true;
9062   stream_.nBuffers = options ? options->numberOfBuffers : 1;
9063   stream_.doByteSwap[mode] = false;
9064   stream_.nUserChannels[mode] = channels;
9065   stream_.nDeviceChannels[mode] = channels + firstChannel;
9066   stream_.channelOffset[mode] = 0;
9067   std::string streamName = "RtAudio";
9068 
9069   // Set flags for buffer conversion.
9070   stream_.doConvertBuffer[mode] = false;
9071   if ( stream_.userFormat != stream_.deviceFormat[mode] )
9072     stream_.doConvertBuffer[mode] = true;
9073   if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9074     stream_.doConvertBuffer[mode] = true;
9075   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
9076     stream_.doConvertBuffer[mode] = true;
9077 
9078   // Allocate necessary internal buffers.
9079   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9080   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9081   if ( stream_.userBuffer[mode] == NULL ) {
9082     errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
9083     goto error;
9084   }
9085   stream_.bufferSize = *bufferSize;
9086 
9087   if ( stream_.doConvertBuffer[mode] ) {
9088 
9089     bool makeBuffer = true;
9090     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9091     if ( mode == INPUT ) {
9092       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9093         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9094         if ( bufferBytes <= bytesOut ) makeBuffer = false;
9095       }
9096     }
9097 
9098     if ( makeBuffer ) {
9099       bufferBytes *= *bufferSize;
9100       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9101       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9102       if ( stream_.deviceBuffer == NULL ) {
9103         errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
9104         goto error;
9105       }
9106     }
9107   }
9108 
9109   stream_.device[mode] = device;
9110 
9111   // Setup the buffer conversion information structure.
9112   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9113 
9114   if ( !stream_.apiHandle ) {
9115     PulseAudioHandle *pah = new PulseAudioHandle;
9116     if ( !pah ) {
9117       errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
9118       goto error;
9119     }
9120 
9121     stream_.apiHandle = pah;
9122     if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
9123       errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
9124       goto error;
9125     }
9126   }
9127   pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
9128 
9129   int error;
9130   if ( options && !options->streamName.empty() ) streamName = options->streamName;
9131   switch ( mode ) {
9132     pa_buffer_attr buffer_attr;
9133   case INPUT:
9134     buffer_attr.fragsize = bufferBytes;
9135     buffer_attr.maxlength = -1;
9136 
9137     pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD,
9138                                 dev_input, "Record", &ss, NULL, &buffer_attr, &error );
9139     if ( !pah->s_rec ) {
9140       errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
9141       goto error;
9142     }
9143     break;
9144   case OUTPUT: {
9145     pa_buffer_attr * attr_ptr;
9146 
9147     if ( options && options->numberOfBuffers > 0 ) {
9148       // pa_buffer_attr::fragsize is recording-only.
9149       // Hopefully PortAudio won't access uninitialized fields.
9150       buffer_attr.maxlength = bufferBytes * options->numberOfBuffers;
9151       buffer_attr.minreq = -1;
9152       buffer_attr.prebuf = -1;
9153       buffer_attr.tlength = -1;
9154       attr_ptr = &buffer_attr;
9155     } else {
9156       attr_ptr = nullptr;
9157     }
9158 
9159     pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK,
9160                                  dev_output, "Playback", &ss, NULL, attr_ptr, &error );
9161     if ( !pah->s_play ) {
9162       errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
9163       goto error;
9164     }
9165     break;
9166   }
9167   case DUPLEX:
9168     /* Note: We could add DUPLEX by synchronizing multiple streams,
9169        but it would mean moving from Simple API to Asynchronous API:
9170        https://freedesktop.org/software/pulseaudio/doxygen/streams.html#sync_streams */
9171     errorText_ = "RtApiPulse::probeDeviceOpen: duplex not supported for PulseAudio.";
9172     goto error;
9173   default:
9174     goto error;
9175   }
9176 
9177   if ( stream_.mode == UNINITIALIZED )
9178     stream_.mode = mode;
9179   else if ( stream_.mode == mode )
9180     goto error;
9181   else
9182     stream_.mode = DUPLEX;
9183 
9184   if ( !stream_.callbackInfo.isRunning ) {
9185     stream_.callbackInfo.object = this;
9186 
9187     stream_.state = STREAM_STOPPED;
9188     // Set the thread attributes for joinable and realtime scheduling
9189     // priority (optional).  The higher priority will only take affect
9190     // if the program is run as root or suid. Note, under Linux
9191     // processes with CAP_SYS_NICE privilege, a user can change
9192     // scheduling policy and priority (thus need not be root). See
9193     // POSIX "capabilities".
9194     pthread_attr_t attr;
9195     pthread_attr_init( &attr );
9196     pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9197 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9198     if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9199       stream_.callbackInfo.doRealtime = true;
9200       struct sched_param param;
9201       int priority = options->priority;
9202       int min = sched_get_priority_min( SCHED_RR );
9203       int max = sched_get_priority_max( SCHED_RR );
9204       if ( priority < min ) priority = min;
9205       else if ( priority > max ) priority = max;
9206       param.sched_priority = priority;
9207 
9208       // Set the policy BEFORE the priority. Otherwise it fails.
9209       pthread_attr_setschedpolicy(&attr, SCHED_RR);
9210       pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9211       // This is definitely required. Otherwise it fails.
9212       pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9213       pthread_attr_setschedparam(&attr, &param);
9214     }
9215     else
9216       pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9217 #else
9218     pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9219 #endif
9220 
9221     stream_.callbackInfo.isRunning = true;
9222     int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
9223     pthread_attr_destroy(&attr);
9224     if(result != 0) {
9225       // Failed. Try instead with default attributes.
9226       result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
9227       if(result != 0) {
9228         stream_.callbackInfo.isRunning = false;
9229         errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9230         goto error;
9231       }
9232     }
9233   }
9234 
9235   return SUCCESS;
9236 
9237  error:
9238   if ( pah && stream_.callbackInfo.isRunning ) {
9239     pthread_cond_destroy( &pah->runnable_cv );
9240     delete pah;
9241     stream_.apiHandle = 0;
9242   }
9243 
9244   for ( int i=0; i<2; i++ ) {
9245     if ( stream_.userBuffer[i] ) {
9246       free( stream_.userBuffer[i] );
9247       stream_.userBuffer[i] = 0;
9248     }
9249   }
9250 
9251   if ( stream_.deviceBuffer ) {
9252     free( stream_.deviceBuffer );
9253     stream_.deviceBuffer = 0;
9254   }
9255 
9256   stream_.state = STREAM_CLOSED;
9257   return FAILURE;
9258 }
9259 
9260 //******************** End of __LINUX_PULSE__ *********************//
9261 #endif
9262 
9263 #if defined(__LINUX_OSS__)
9264 
9265 #include <unistd.h>
9266 #include <sys/ioctl.h>
9267 #include <unistd.h>
9268 #include <fcntl.h>
9269 #include <sys/soundcard.h>
9270 #include <errno.h>
9271 #include <math.h>
9272 
9273 static void *ossCallbackHandler(void * ptr);
9274 
9275 // A structure to hold various information related to the OSS API
9276 // implementation.
9277 struct OssHandle {
9278   int id[2];    // device ids
9279   bool xrun[2];
9280   bool triggered;
9281   pthread_cond_t runnable;
9282 
9283   OssHandle()
9284     :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9285 };
9286 
9287 RtApiOss :: RtApiOss()
9288 {
9289   // Nothing to do here.
9290 }
9291 
9292 RtApiOss :: ~RtApiOss()
9293 {
9294   if ( stream_.state != STREAM_CLOSED ) closeStream();
9295 }
9296 
9297 unsigned int RtApiOss :: getDeviceCount( void )
9298 {
9299   int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9300   if ( mixerfd == -1 ) {
9301     errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9302     error( RtAudioError::WARNING );
9303     return 0;
9304   }
9305 
9306   oss_sysinfo sysinfo;
9307   if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9308     close( mixerfd );
9309     errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9310     error( RtAudioError::WARNING );
9311     return 0;
9312   }
9313 
9314   close( mixerfd );
9315   return sysinfo.numaudios;
9316 }
9317 
9318 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9319 {
9320   RtAudio::DeviceInfo info;
9321   info.probed = false;
9322 
9323   int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9324   if ( mixerfd == -1 ) {
9325     errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9326     error( RtAudioError::WARNING );
9327     return info;
9328   }
9329 
9330   oss_sysinfo sysinfo;
9331   int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9332   if ( result == -1 ) {
9333     close( mixerfd );
9334     errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9335     error( RtAudioError::WARNING );
9336     return info;
9337   }
9338 
9339   unsigned nDevices = sysinfo.numaudios;
9340   if ( nDevices == 0 ) {
9341     close( mixerfd );
9342     errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9343     error( RtAudioError::INVALID_USE );
9344     return info;
9345   }
9346 
9347   if ( device >= nDevices ) {
9348     close( mixerfd );
9349     errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9350     error( RtAudioError::INVALID_USE );
9351     return info;
9352   }
9353 
9354   oss_audioinfo ainfo;
9355   ainfo.dev = device;
9356   result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9357   close( mixerfd );
9358   if ( result == -1 ) {
9359     errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9360     errorText_ = errorStream_.str();
9361     error( RtAudioError::WARNING );
9362     return info;
9363   }
9364 
9365   // Probe channels
9366   if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9367   if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9368   if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9369     if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9370       info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9371   }
9372 
9373   // Probe data formats ... do for input
9374   unsigned long mask = ainfo.iformats;
9375   if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9376     info.nativeFormats |= RTAUDIO_SINT16;
9377   if ( mask & AFMT_S8 )
9378     info.nativeFormats |= RTAUDIO_SINT8;
9379   if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9380     info.nativeFormats |= RTAUDIO_SINT32;
9381 #ifdef AFMT_FLOAT
9382   if ( mask & AFMT_FLOAT )
9383     info.nativeFormats |= RTAUDIO_FLOAT32;
9384 #endif
9385   if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9386     info.nativeFormats |= RTAUDIO_SINT24;
9387 
9388   // Check that we have at least one supported format
9389   if ( info.nativeFormats == 0 ) {
9390     errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9391     errorText_ = errorStream_.str();
9392     error( RtAudioError::WARNING );
9393     return info;
9394   }
9395 
9396   // Probe the supported sample rates.
9397   info.sampleRates.clear();
9398   if ( ainfo.nrates ) {
9399     for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9400       for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9401         if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9402           info.sampleRates.push_back( SAMPLE_RATES[k] );
9403 
9404           if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9405             info.preferredSampleRate = SAMPLE_RATES[k];
9406 
9407           break;
9408         }
9409       }
9410     }
9411   }
9412   else {
9413     // Check min and max rate values;
9414     for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9415       if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9416         info.sampleRates.push_back( SAMPLE_RATES[k] );
9417 
9418         if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9419           info.preferredSampleRate = SAMPLE_RATES[k];
9420       }
9421     }
9422   }
9423 
9424   if ( info.sampleRates.size() == 0 ) {
9425     errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9426     errorText_ = errorStream_.str();
9427     error( RtAudioError::WARNING );
9428   }
9429   else {
9430     info.probed = true;
9431     info.name = ainfo.name;
9432   }
9433 
9434   return info;
9435 }
9436 
9437 
9438 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9439                                   unsigned int firstChannel, unsigned int sampleRate,
9440                                   RtAudioFormat format, unsigned int *bufferSize,
9441                                   RtAudio::StreamOptions *options )
9442 {
9443   int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9444   if ( mixerfd == -1 ) {
9445     errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9446     return FAILURE;
9447   }
9448 
9449   oss_sysinfo sysinfo;
9450   int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9451   if ( result == -1 ) {
9452     close( mixerfd );
9453     errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9454     return FAILURE;
9455   }
9456 
9457   unsigned nDevices = sysinfo.numaudios;
9458   if ( nDevices == 0 ) {
9459     // This should not happen because a check is made before this function is called.
9460     close( mixerfd );
9461     errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9462     return FAILURE;
9463   }
9464 
9465   if ( device >= nDevices ) {
9466     // This should not happen because a check is made before this function is called.
9467     close( mixerfd );
9468     errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9469     return FAILURE;
9470   }
9471 
9472   oss_audioinfo ainfo;
9473   ainfo.dev = device;
9474   result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9475   close( mixerfd );
9476   if ( result == -1 ) {
9477     errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9478     errorText_ = errorStream_.str();
9479     return FAILURE;
9480   }
9481 
9482   // Check if device supports input or output
9483   if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9484        ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9485     if ( mode == OUTPUT )
9486       errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9487     else
9488       errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9489     errorText_ = errorStream_.str();
9490     return FAILURE;
9491   }
9492 
9493   int flags = 0;
9494   OssHandle *handle = (OssHandle *) stream_.apiHandle;
9495   if ( mode == OUTPUT )
9496     flags |= O_WRONLY;
9497   else { // mode == INPUT
9498     if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9499       // We just set the same device for playback ... close and reopen for duplex (OSS only).
9500       close( handle->id[0] );
9501       handle->id[0] = 0;
9502       if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9503         errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9504         errorText_ = errorStream_.str();
9505         return FAILURE;
9506       }
9507       // Check that the number previously set channels is the same.
9508       if ( stream_.nUserChannels[0] != channels ) {
9509         errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9510         errorText_ = errorStream_.str();
9511         return FAILURE;
9512       }
9513       flags |= O_RDWR;
9514     }
9515     else
9516       flags |= O_RDONLY;
9517   }
9518 
9519   // Set exclusive access if specified.
9520   if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9521 
9522   // Try to open the device.
9523   int fd;
9524   fd = open( ainfo.devnode, flags, 0 );
9525   if ( fd == -1 ) {
9526     if ( errno == EBUSY )
9527       errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9528     else
9529       errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9530     errorText_ = errorStream_.str();
9531     return FAILURE;
9532   }
9533 
9534   // For duplex operation, specifically set this mode (this doesn't seem to work).
9535   /*
9536     if ( flags | O_RDWR ) {
9537     result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9538     if ( result == -1) {
9539     errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9540     errorText_ = errorStream_.str();
9541     return FAILURE;
9542     }
9543     }
9544   */
9545 
9546   // Check the device channel support.
9547   stream_.nUserChannels[mode] = channels;
9548   if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9549     close( fd );
9550     errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9551     errorText_ = errorStream_.str();
9552     return FAILURE;
9553   }
9554 
9555   // Set the number of channels.
9556   int deviceChannels = channels + firstChannel;
9557   result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9558   if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9559     close( fd );
9560     errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9561     errorText_ = errorStream_.str();
9562     return FAILURE;
9563   }
9564   stream_.nDeviceChannels[mode] = deviceChannels;
9565 
9566   // Get the data format mask
9567   int mask;
9568   result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9569   if ( result == -1 ) {
9570     close( fd );
9571     errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9572     errorText_ = errorStream_.str();
9573     return FAILURE;
9574   }
9575 
9576   // Determine how to set the device format.
9577   stream_.userFormat = format;
9578   int deviceFormat = -1;
9579   stream_.doByteSwap[mode] = false;
9580   if ( format == RTAUDIO_SINT8 ) {
9581     if ( mask & AFMT_S8 ) {
9582       deviceFormat = AFMT_S8;
9583       stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9584     }
9585   }
9586   else if ( format == RTAUDIO_SINT16 ) {
9587     if ( mask & AFMT_S16_NE ) {
9588       deviceFormat = AFMT_S16_NE;
9589       stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9590     }
9591     else if ( mask & AFMT_S16_OE ) {
9592       deviceFormat = AFMT_S16_OE;
9593       stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9594       stream_.doByteSwap[mode] = true;
9595     }
9596   }
9597   else if ( format == RTAUDIO_SINT24 ) {
9598     if ( mask & AFMT_S24_NE ) {
9599       deviceFormat = AFMT_S24_NE;
9600       stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9601     }
9602     else if ( mask & AFMT_S24_OE ) {
9603       deviceFormat = AFMT_S24_OE;
9604       stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9605       stream_.doByteSwap[mode] = true;
9606     }
9607   }
9608   else if ( format == RTAUDIO_SINT32 ) {
9609     if ( mask & AFMT_S32_NE ) {
9610       deviceFormat = AFMT_S32_NE;
9611       stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9612     }
9613     else if ( mask & AFMT_S32_OE ) {
9614       deviceFormat = AFMT_S32_OE;
9615       stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9616       stream_.doByteSwap[mode] = true;
9617     }
9618   }
9619 
9620   if ( deviceFormat == -1 ) {
9621     // The user requested format is not natively supported by the device.
9622     if ( mask & AFMT_S16_NE ) {
9623       deviceFormat = AFMT_S16_NE;
9624       stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9625     }
9626     else if ( mask & AFMT_S32_NE ) {
9627       deviceFormat = AFMT_S32_NE;
9628       stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9629     }
9630     else if ( mask & AFMT_S24_NE ) {
9631       deviceFormat = AFMT_S24_NE;
9632       stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9633     }
9634     else if ( mask & AFMT_S16_OE ) {
9635       deviceFormat = AFMT_S16_OE;
9636       stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9637       stream_.doByteSwap[mode] = true;
9638     }
9639     else if ( mask & AFMT_S32_OE ) {
9640       deviceFormat = AFMT_S32_OE;
9641       stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9642       stream_.doByteSwap[mode] = true;
9643     }
9644     else if ( mask & AFMT_S24_OE ) {
9645       deviceFormat = AFMT_S24_OE;
9646       stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9647       stream_.doByteSwap[mode] = true;
9648     }
9649     else if ( mask & AFMT_S8) {
9650       deviceFormat = AFMT_S8;
9651       stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9652     }
9653   }
9654 
9655   if ( stream_.deviceFormat[mode] == 0 ) {
9656     // This really shouldn't happen ...
9657     close( fd );
9658     errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9659     errorText_ = errorStream_.str();
9660     return FAILURE;
9661   }
9662 
9663   // Set the data format.
9664   int temp = deviceFormat;
9665   result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9666   if ( result == -1 || deviceFormat != temp ) {
9667     close( fd );
9668     errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9669     errorText_ = errorStream_.str();
9670     return FAILURE;
9671   }
9672 
9673   // Attempt to set the buffer size.  According to OSS, the minimum
9674   // number of buffers is two.  The supposed minimum buffer size is 16
9675   // bytes, so that will be our lower bound.  The argument to this
9676   // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9677   // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9678   // We'll check the actual value used near the end of the setup
9679   // procedure.
9680   int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9681   if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9682   int buffers = 0;
9683   if ( options ) buffers = options->numberOfBuffers;
9684   if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9685   if ( buffers < 2 ) buffers = 3;
9686   temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9687   result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9688   if ( result == -1 ) {
9689     close( fd );
9690     errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9691     errorText_ = errorStream_.str();
9692     return FAILURE;
9693   }
9694   stream_.nBuffers = buffers;
9695 
9696   // Save buffer size (in sample frames).
9697   *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9698   stream_.bufferSize = *bufferSize;
9699 
9700   // Set the sample rate.
9701   int srate = sampleRate;
9702   result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9703   if ( result == -1 ) {
9704     close( fd );
9705     errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9706     errorText_ = errorStream_.str();
9707     return FAILURE;
9708   }
9709 
9710   // Verify the sample rate setup worked.
9711   if ( abs( srate - (int)sampleRate ) > 100 ) {
9712     close( fd );
9713     errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9714     errorText_ = errorStream_.str();
9715     return FAILURE;
9716   }
9717   stream_.sampleRate = sampleRate;
9718 
9719   if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9720     // We're doing duplex setup here.
9721     stream_.deviceFormat[0] = stream_.deviceFormat[1];
9722     stream_.nDeviceChannels[0] = deviceChannels;
9723   }
9724 
9725   // Set interleaving parameters.
9726   stream_.userInterleaved = true;
9727   stream_.deviceInterleaved[mode] =  true;
9728   if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9729     stream_.userInterleaved = false;
9730 
9731   // Set flags for buffer conversion
9732   stream_.doConvertBuffer[mode] = false;
9733   if ( stream_.userFormat != stream_.deviceFormat[mode] )
9734     stream_.doConvertBuffer[mode] = true;
9735   if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9736     stream_.doConvertBuffer[mode] = true;
9737   if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9738        stream_.nUserChannels[mode] > 1 )
9739     stream_.doConvertBuffer[mode] = true;
9740 
9741   // Allocate the stream handles if necessary and then save.
9742   if ( stream_.apiHandle == 0 ) {
9743     try {
9744       handle = new OssHandle;
9745     }
9746     catch ( std::bad_alloc& ) {
9747       errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9748       goto error;
9749     }
9750 
9751     if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9752       errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9753       goto error;
9754     }
9755 
9756     stream_.apiHandle = (void *) handle;
9757   }
9758   else {
9759     handle = (OssHandle *) stream_.apiHandle;
9760   }
9761   handle->id[mode] = fd;
9762 
9763   // Allocate necessary internal buffers.
9764   unsigned long bufferBytes;
9765   bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9766   stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9767   if ( stream_.userBuffer[mode] == NULL ) {
9768     errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9769     goto error;
9770   }
9771 
9772   if ( stream_.doConvertBuffer[mode] ) {
9773 
9774     bool makeBuffer = true;
9775     bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9776     if ( mode == INPUT ) {
9777       if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9778         unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9779         if ( bufferBytes <= bytesOut ) makeBuffer = false;
9780       }
9781     }
9782 
9783     if ( makeBuffer ) {
9784       bufferBytes *= *bufferSize;
9785       if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9786       stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9787       if ( stream_.deviceBuffer == NULL ) {
9788         errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9789         goto error;
9790       }
9791     }
9792   }
9793 
9794   stream_.device[mode] = device;
9795   stream_.state = STREAM_STOPPED;
9796 
9797   // Setup the buffer conversion information structure.
9798   if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9799 
9800   // Setup thread if necessary.
9801   if ( stream_.mode == OUTPUT && mode == INPUT ) {
9802     // We had already set up an output stream.
9803     stream_.mode = DUPLEX;
9804     if ( stream_.device[0] == device ) handle->id[0] = fd;
9805   }
9806   else {
9807     stream_.mode = mode;
9808 
9809     // Setup callback thread.
9810     stream_.callbackInfo.object = (void *) this;
9811 
9812     // Set the thread attributes for joinable and realtime scheduling
9813     // priority.  The higher priority will only take affect if the
9814     // program is run as root or suid.
9815     pthread_attr_t attr;
9816     pthread_attr_init( &attr );
9817     pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9818 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9819     if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9820       stream_.callbackInfo.doRealtime = true;
9821       struct sched_param param;
9822       int priority = options->priority;
9823       int min = sched_get_priority_min( SCHED_RR );
9824       int max = sched_get_priority_max( SCHED_RR );
9825       if ( priority < min ) priority = min;
9826       else if ( priority > max ) priority = max;
9827       param.sched_priority = priority;
9828 
9829       // Set the policy BEFORE the priority. Otherwise it fails.
9830       pthread_attr_setschedpolicy(&attr, SCHED_RR);
9831       pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9832       // This is definitely required. Otherwise it fails.
9833       pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9834       pthread_attr_setschedparam(&attr, &param);
9835     }
9836     else
9837       pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9838 #else
9839     pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9840 #endif
9841 
9842     stream_.callbackInfo.isRunning = true;
9843     result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9844     pthread_attr_destroy( &attr );
9845     if ( result ) {
9846       // Failed. Try instead with default attributes.
9847       result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9848       if ( result ) {
9849         stream_.callbackInfo.isRunning = false;
9850         errorText_ = "RtApiOss::error creating callback thread!";
9851         goto error;
9852       }
9853     }
9854   }
9855 
9856   return SUCCESS;
9857 
9858  error:
9859   if ( handle ) {
9860     pthread_cond_destroy( &handle->runnable );
9861     if ( handle->id[0] ) close( handle->id[0] );
9862     if ( handle->id[1] ) close( handle->id[1] );
9863     delete handle;
9864     stream_.apiHandle = 0;
9865   }
9866 
9867   for ( int i=0; i<2; i++ ) {
9868     if ( stream_.userBuffer[i] ) {
9869       free( stream_.userBuffer[i] );
9870       stream_.userBuffer[i] = 0;
9871     }
9872   }
9873 
9874   if ( stream_.deviceBuffer ) {
9875     free( stream_.deviceBuffer );
9876     stream_.deviceBuffer = 0;
9877   }
9878 
9879   stream_.state = STREAM_CLOSED;
9880   return FAILURE;
9881 }
9882 
9883 void RtApiOss :: closeStream()
9884 {
9885   if ( stream_.state == STREAM_CLOSED ) {
9886     errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9887     error( RtAudioError::WARNING );
9888     return;
9889   }
9890 
9891   OssHandle *handle = (OssHandle *) stream_.apiHandle;
9892   stream_.callbackInfo.isRunning = false;
9893   MUTEX_LOCK( &stream_.mutex );
9894   if ( stream_.state == STREAM_STOPPED )
9895     pthread_cond_signal( &handle->runnable );
9896   MUTEX_UNLOCK( &stream_.mutex );
9897   pthread_join( stream_.callbackInfo.thread, NULL );
9898 
9899   if ( stream_.state == STREAM_RUNNING ) {
9900     if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9901       ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9902     else
9903       ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9904     stream_.state = STREAM_STOPPED;
9905   }
9906 
9907   if ( handle ) {
9908     pthread_cond_destroy( &handle->runnable );
9909     if ( handle->id[0] ) close( handle->id[0] );
9910     if ( handle->id[1] ) close( handle->id[1] );
9911     delete handle;
9912     stream_.apiHandle = 0;
9913   }
9914 
9915   for ( int i=0; i<2; i++ ) {
9916     if ( stream_.userBuffer[i] ) {
9917       free( stream_.userBuffer[i] );
9918       stream_.userBuffer[i] = 0;
9919     }
9920   }
9921 
9922   if ( stream_.deviceBuffer ) {
9923     free( stream_.deviceBuffer );
9924     stream_.deviceBuffer = 0;
9925   }
9926 
9927   stream_.mode = UNINITIALIZED;
9928   stream_.state = STREAM_CLOSED;
9929 }
9930 
9931 void RtApiOss :: startStream()
9932 {
9933   verifyStream();
9934   if ( stream_.state == STREAM_RUNNING ) {
9935     errorText_ = "RtApiOss::startStream(): the stream is already running!";
9936     error( RtAudioError::WARNING );
9937     return;
9938   }
9939 
9940   MUTEX_LOCK( &stream_.mutex );
9941 
9942   #if defined( HAVE_GETTIMEOFDAY )
9943   gettimeofday( &stream_.lastTickTimestamp, NULL );
9944   #endif
9945 
9946   stream_.state = STREAM_RUNNING;
9947 
9948   // No need to do anything else here ... OSS automatically starts
9949   // when fed samples.
9950 
9951   MUTEX_UNLOCK( &stream_.mutex );
9952 
9953   OssHandle *handle = (OssHandle *) stream_.apiHandle;
9954   pthread_cond_signal( &handle->runnable );
9955 }
9956 
9957 void RtApiOss :: stopStream()
9958 {
9959   verifyStream();
9960   if ( stream_.state == STREAM_STOPPED ) {
9961     errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9962     error( RtAudioError::WARNING );
9963     return;
9964   }
9965 
9966   MUTEX_LOCK( &stream_.mutex );
9967 
9968   // The state might change while waiting on a mutex.
9969   if ( stream_.state == STREAM_STOPPED ) {
9970     MUTEX_UNLOCK( &stream_.mutex );
9971     return;
9972   }
9973 
9974   int result = 0;
9975   OssHandle *handle = (OssHandle *) stream_.apiHandle;
9976   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9977 
9978     // Flush the output with zeros a few times.
9979     char *buffer;
9980     int samples;
9981     RtAudioFormat format;
9982 
9983     if ( stream_.doConvertBuffer[0] ) {
9984       buffer = stream_.deviceBuffer;
9985       samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9986       format = stream_.deviceFormat[0];
9987     }
9988     else {
9989       buffer = stream_.userBuffer[0];
9990       samples = stream_.bufferSize * stream_.nUserChannels[0];
9991       format = stream_.userFormat;
9992     }
9993 
9994     memset( buffer, 0, samples * formatBytes(format) );
9995     for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9996       result = write( handle->id[0], buffer, samples * formatBytes(format) );
9997       if ( result == -1 ) {
9998         errorText_ = "RtApiOss::stopStream: audio write error.";
9999         error( RtAudioError::WARNING );
10000       }
10001     }
10002 
10003     result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10004     if ( result == -1 ) {
10005       errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10006       errorText_ = errorStream_.str();
10007       goto unlock;
10008     }
10009     handle->triggered = false;
10010   }
10011 
10012   if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10013     result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10014     if ( result == -1 ) {
10015       errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10016       errorText_ = errorStream_.str();
10017       goto unlock;
10018     }
10019   }
10020 
10021  unlock:
10022   stream_.state = STREAM_STOPPED;
10023   MUTEX_UNLOCK( &stream_.mutex );
10024 
10025   if ( result != -1 ) return;
10026   error( RtAudioError::SYSTEM_ERROR );
10027 }
10028 
10029 void RtApiOss :: abortStream()
10030 {
10031   verifyStream();
10032   if ( stream_.state == STREAM_STOPPED ) {
10033     errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
10034     error( RtAudioError::WARNING );
10035     return;
10036   }
10037 
10038   MUTEX_LOCK( &stream_.mutex );
10039 
10040   // The state might change while waiting on a mutex.
10041   if ( stream_.state == STREAM_STOPPED ) {
10042     MUTEX_UNLOCK( &stream_.mutex );
10043     return;
10044   }
10045 
10046   int result = 0;
10047   OssHandle *handle = (OssHandle *) stream_.apiHandle;
10048   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10049     result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10050     if ( result == -1 ) {
10051       errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10052       errorText_ = errorStream_.str();
10053       goto unlock;
10054     }
10055     handle->triggered = false;
10056   }
10057 
10058   if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10059     result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10060     if ( result == -1 ) {
10061       errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10062       errorText_ = errorStream_.str();
10063       goto unlock;
10064     }
10065   }
10066 
10067  unlock:
10068   stream_.state = STREAM_STOPPED;
10069   MUTEX_UNLOCK( &stream_.mutex );
10070 
10071   if ( result != -1 ) return;
10072   error( RtAudioError::SYSTEM_ERROR );
10073 }
10074 
10075 void RtApiOss :: callbackEvent()
10076 {
10077   OssHandle *handle = (OssHandle *) stream_.apiHandle;
10078   if ( stream_.state == STREAM_STOPPED ) {
10079     MUTEX_LOCK( &stream_.mutex );
10080     pthread_cond_wait( &handle->runnable, &stream_.mutex );
10081     if ( stream_.state != STREAM_RUNNING ) {
10082       MUTEX_UNLOCK( &stream_.mutex );
10083       return;
10084     }
10085     MUTEX_UNLOCK( &stream_.mutex );
10086   }
10087 
10088   if ( stream_.state == STREAM_CLOSED ) {
10089     errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
10090     error( RtAudioError::WARNING );
10091     return;
10092   }
10093 
10094   // Invoke user callback to get fresh output data.
10095   int doStopStream = 0;
10096   RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
10097   double streamTime = getStreamTime();
10098   RtAudioStreamStatus status = 0;
10099   if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
10100     status |= RTAUDIO_OUTPUT_UNDERFLOW;
10101     handle->xrun[0] = false;
10102   }
10103   if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
10104     status |= RTAUDIO_INPUT_OVERFLOW;
10105     handle->xrun[1] = false;
10106   }
10107   doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
10108                            stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
10109   if ( doStopStream == 2 ) {
10110     this->abortStream();
10111     return;
10112   }
10113 
10114   MUTEX_LOCK( &stream_.mutex );
10115 
10116   // The state might change while waiting on a mutex.
10117   if ( stream_.state == STREAM_STOPPED ) goto unlock;
10118 
10119   int result;
10120   char *buffer;
10121   int samples;
10122   RtAudioFormat format;
10123 
10124   if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10125 
10126     // Setup parameters and do buffer conversion if necessary.
10127     if ( stream_.doConvertBuffer[0] ) {
10128       buffer = stream_.deviceBuffer;
10129       convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
10130       samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10131       format = stream_.deviceFormat[0];
10132     }
10133     else {
10134       buffer = stream_.userBuffer[0];
10135       samples = stream_.bufferSize * stream_.nUserChannels[0];
10136       format = stream_.userFormat;
10137     }
10138 
10139     // Do byte swapping if necessary.
10140     if ( stream_.doByteSwap[0] )
10141       byteSwapBuffer( buffer, samples, format );
10142 
10143     if ( stream_.mode == DUPLEX && handle->triggered == false ) {
10144       int trig = 0;
10145       ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10146       result = write( handle->id[0], buffer, samples * formatBytes(format) );
10147       trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
10148       ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10149       handle->triggered = true;
10150     }
10151     else
10152       // Write samples to device.
10153       result = write( handle->id[0], buffer, samples * formatBytes(format) );
10154 
10155     if ( result == -1 ) {
10156       // We'll assume this is an underrun, though there isn't a
10157       // specific means for determining that.
10158       handle->xrun[0] = true;
10159       errorText_ = "RtApiOss::callbackEvent: audio write error.";
10160       error( RtAudioError::WARNING );
10161       // Continue on to input section.
10162     }
10163   }
10164 
10165   if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
10166 
10167     // Setup parameters.
10168     if ( stream_.doConvertBuffer[1] ) {
10169       buffer = stream_.deviceBuffer;
10170       samples = stream_.bufferSize * stream_.nDeviceChannels[1];
10171       format = stream_.deviceFormat[1];
10172     }
10173     else {
10174       buffer = stream_.userBuffer[1];
10175       samples = stream_.bufferSize * stream_.nUserChannels[1];
10176       format = stream_.userFormat;
10177     }
10178 
10179     // Read samples from device.
10180     result = read( handle->id[1], buffer, samples * formatBytes(format) );
10181 
10182     if ( result == -1 ) {
10183       // We'll assume this is an overrun, though there isn't a
10184       // specific means for determining that.
10185       handle->xrun[1] = true;
10186       errorText_ = "RtApiOss::callbackEvent: audio read error.";
10187       error( RtAudioError::WARNING );
10188       goto unlock;
10189     }
10190 
10191     // Do byte swapping if necessary.
10192     if ( stream_.doByteSwap[1] )
10193       byteSwapBuffer( buffer, samples, format );
10194 
10195     // Do buffer conversion if necessary.
10196     if ( stream_.doConvertBuffer[1] )
10197       convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
10198   }
10199 
10200  unlock:
10201   MUTEX_UNLOCK( &stream_.mutex );
10202 
10203   RtApi::tickStreamTime();
10204   if ( doStopStream == 1 ) this->stopStream();
10205 }
10206 
10207 static void *ossCallbackHandler( void *ptr )
10208 {
10209   CallbackInfo *info = (CallbackInfo *) ptr;
10210   RtApiOss *object = (RtApiOss *) info->object;
10211   bool *isRunning = &info->isRunning;
10212 
10213 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
10214   if (info->doRealtime) {
10215     std::cerr << "RtAudio oss: " <<
10216              (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
10217              "running realtime scheduling" << std::endl;
10218   }
10219 #endif
10220 
10221   while ( *isRunning == true ) {
10222     pthread_testcancel();
10223     object->callbackEvent();
10224   }
10225 
10226   pthread_exit( NULL );
10227 }
10228 
10229 //******************** End of __LINUX_OSS__ *********************//
10230 #endif
10231 
10232 
10233 // *************************************************** //
10234 //
10235 // Protected common (OS-independent) RtAudio methods.
10236 //
10237 // *************************************************** //
10238 
10239 // This method can be modified to control the behavior of error
10240 // message printing.
10241 void RtApi :: error( RtAudioError::Type type )
10242 {
10243   errorStream_.str(""); // clear the ostringstream
10244 
10245   RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
10246   if ( errorCallback ) {
10247     // abortStream() can generate new error messages. Ignore them. Just keep original one.
10248 
10249     if ( firstErrorOccurred_ )
10250       return;
10251 
10252     firstErrorOccurred_ = true;
10253     const std::string errorMessage = errorText_;
10254 
10255     if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
10256       stream_.callbackInfo.isRunning = false; // exit from the thread
10257       abortStream();
10258     }
10259 
10260     errorCallback( type, errorMessage );
10261     firstErrorOccurred_ = false;
10262     return;
10263   }
10264 
10265   if ( type == RtAudioError::WARNING && showWarnings_ == true )
10266     std::cerr << '\n' << errorText_ << "\n\n";
10267   else if ( type != RtAudioError::WARNING )
10268     throw( RtAudioError( errorText_, type ) );
10269 }
10270 
10271 void RtApi :: verifyStream()
10272 {
10273   if ( stream_.state == STREAM_CLOSED ) {
10274     errorText_ = "RtApi:: a stream is not open!";
10275     error( RtAudioError::INVALID_USE );
10276   }
10277 }
10278 
10279 void RtApi :: clearStreamInfo()
10280 {
10281   stream_.mode = UNINITIALIZED;
10282   stream_.state = STREAM_CLOSED;
10283   stream_.sampleRate = 0;
10284   stream_.bufferSize = 0;
10285   stream_.nBuffers = 0;
10286   stream_.userFormat = 0;
10287   stream_.userInterleaved = true;
10288   stream_.streamTime = 0.0;
10289   stream_.apiHandle = 0;
10290   stream_.deviceBuffer = 0;
10291   stream_.callbackInfo.callback = 0;
10292   stream_.callbackInfo.userData = 0;
10293   stream_.callbackInfo.isRunning = false;
10294   stream_.callbackInfo.errorCallback = 0;
10295   for ( int i=0; i<2; i++ ) {
10296     stream_.device[i] = 11111;
10297     stream_.doConvertBuffer[i] = false;
10298     stream_.deviceInterleaved[i] = true;
10299     stream_.doByteSwap[i] = false;
10300     stream_.nUserChannels[i] = 0;
10301     stream_.nDeviceChannels[i] = 0;
10302     stream_.channelOffset[i] = 0;
10303     stream_.deviceFormat[i] = 0;
10304     stream_.latency[i] = 0;
10305     stream_.userBuffer[i] = 0;
10306     stream_.convertInfo[i].channels = 0;
10307     stream_.convertInfo[i].inJump = 0;
10308     stream_.convertInfo[i].outJump = 0;
10309     stream_.convertInfo[i].inFormat = 0;
10310     stream_.convertInfo[i].outFormat = 0;
10311     stream_.convertInfo[i].inOffset.clear();
10312     stream_.convertInfo[i].outOffset.clear();
10313   }
10314 }
10315 
10316 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10317 {
10318   if ( format == RTAUDIO_SINT16 )
10319     return 2;
10320   else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10321     return 4;
10322   else if ( format == RTAUDIO_FLOAT64 )
10323     return 8;
10324   else if ( format == RTAUDIO_SINT24 )
10325     return 3;
10326   else if ( format == RTAUDIO_SINT8 )
10327     return 1;
10328 
10329   errorText_ = "RtApi::formatBytes: undefined format.";
10330   error( RtAudioError::WARNING );
10331 
10332   return 0;
10333 }
10334 
10335 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10336 {
10337   if ( mode == INPUT ) { // convert device to user buffer
10338     stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10339     stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10340     stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10341     stream_.convertInfo[mode].outFormat = stream_.userFormat;
10342   }
10343   else { // convert user to device buffer
10344     stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10345     stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10346     stream_.convertInfo[mode].inFormat = stream_.userFormat;
10347     stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10348   }
10349 
10350   if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10351     stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10352   else
10353     stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10354 
10355   // Set up the interleave/deinterleave offsets.
10356   if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10357     if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10358          ( mode == INPUT && stream_.userInterleaved ) ) {
10359       for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10360         stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10361         stream_.convertInfo[mode].outOffset.push_back( k );
10362         stream_.convertInfo[mode].inJump = 1;
10363       }
10364     }
10365     else {
10366       for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10367         stream_.convertInfo[mode].inOffset.push_back( k );
10368         stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10369         stream_.convertInfo[mode].outJump = 1;
10370       }
10371     }
10372   }
10373   else { // no (de)interleaving
10374     if ( stream_.userInterleaved ) {
10375       for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10376         stream_.convertInfo[mode].inOffset.push_back( k );
10377         stream_.convertInfo[mode].outOffset.push_back( k );
10378       }
10379     }
10380     else {
10381       for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10382         stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10383         stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10384         stream_.convertInfo[mode].inJump = 1;
10385         stream_.convertInfo[mode].outJump = 1;
10386       }
10387     }
10388   }
10389 
10390   // Add channel offset.
10391   if ( firstChannel > 0 ) {
10392     if ( stream_.deviceInterleaved[mode] ) {
10393       if ( mode == OUTPUT ) {
10394         for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10395           stream_.convertInfo[mode].outOffset[k] += firstChannel;
10396       }
10397       else {
10398         for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10399           stream_.convertInfo[mode].inOffset[k] += firstChannel;
10400       }
10401     }
10402     else {
10403       if ( mode == OUTPUT ) {
10404         for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10405           stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10406       }
10407       else {
10408         for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10409           stream_.convertInfo[mode].inOffset[k] += ( firstChannel  * stream_.bufferSize );
10410       }
10411     }
10412   }
10413 }
10414 
10415 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10416 {
10417   // This function does format conversion, input/output channel compensation, and
10418   // data interleaving/deinterleaving.  24-bit integers are assumed to occupy
10419   // the lower three bytes of a 32-bit integer.
10420 
10421   // Clear our duplex device output buffer if there are more device outputs than user outputs
10422   if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX && info.outJump > info.inJump )
10423     memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10424 
10425   int j;
10426   if (info.outFormat == RTAUDIO_FLOAT64) {
10427     Float64 *out = (Float64 *)outBuffer;
10428 
10429     if (info.inFormat == RTAUDIO_SINT8) {
10430       signed char *in = (signed char *)inBuffer;
10431       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10432         for (j=0; j<info.channels; j++) {
10433           out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 128.0;
10434         }
10435         in += info.inJump;
10436         out += info.outJump;
10437       }
10438     }
10439     else if (info.inFormat == RTAUDIO_SINT16) {
10440       Int16 *in = (Int16 *)inBuffer;
10441       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10442         for (j=0; j<info.channels; j++) {
10443           out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 32768.0;
10444         }
10445         in += info.inJump;
10446         out += info.outJump;
10447       }
10448     }
10449     else if (info.inFormat == RTAUDIO_SINT24) {
10450       Int24 *in = (Int24 *)inBuffer;
10451       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10452         for (j=0; j<info.channels; j++) {
10453           out[info.outOffset[j]] = (Float64) in[info.inOffset[j]].asInt() / 8388608.0;
10454         }
10455         in += info.inJump;
10456         out += info.outJump;
10457       }
10458     }
10459     else if (info.inFormat == RTAUDIO_SINT32) {
10460       Int32 *in = (Int32 *)inBuffer;
10461       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10462         for (j=0; j<info.channels; j++) {
10463           out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 2147483648.0;
10464         }
10465         in += info.inJump;
10466         out += info.outJump;
10467       }
10468     }
10469     else if (info.inFormat == RTAUDIO_FLOAT32) {
10470       Float32 *in = (Float32 *)inBuffer;
10471       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472         for (j=0; j<info.channels; j++) {
10473           out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10474         }
10475         in += info.inJump;
10476         out += info.outJump;
10477       }
10478     }
10479     else if (info.inFormat == RTAUDIO_FLOAT64) {
10480       // Channel compensation and/or (de)interleaving only.
10481       Float64 *in = (Float64 *)inBuffer;
10482       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483         for (j=0; j<info.channels; j++) {
10484           out[info.outOffset[j]] = in[info.inOffset[j]];
10485         }
10486         in += info.inJump;
10487         out += info.outJump;
10488       }
10489     }
10490   }
10491   else if (info.outFormat == RTAUDIO_FLOAT32) {
10492     Float32 *out = (Float32 *)outBuffer;
10493 
10494     if (info.inFormat == RTAUDIO_SINT8) {
10495       signed char *in = (signed char *)inBuffer;
10496       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10497         for (j=0; j<info.channels; j++) {
10498           out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 128.f;
10499         }
10500         in += info.inJump;
10501         out += info.outJump;
10502       }
10503     }
10504     else if (info.inFormat == RTAUDIO_SINT16) {
10505       Int16 *in = (Int16 *)inBuffer;
10506       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10507         for (j=0; j<info.channels; j++) {
10508           out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 32768.f;
10509         }
10510         in += info.inJump;
10511         out += info.outJump;
10512       }
10513     }
10514     else if (info.inFormat == RTAUDIO_SINT24) {
10515       Int24 *in = (Int24 *)inBuffer;
10516       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517         for (j=0; j<info.channels; j++) {
10518           out[info.outOffset[j]] = (Float32) in[info.inOffset[j]].asInt() / 8388608.f;
10519         }
10520         in += info.inJump;
10521         out += info.outJump;
10522       }
10523     }
10524     else if (info.inFormat == RTAUDIO_SINT32) {
10525       Int32 *in = (Int32 *)inBuffer;
10526       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527         for (j=0; j<info.channels; j++) {
10528           out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 2147483648.f;
10529         }
10530         in += info.inJump;
10531         out += info.outJump;
10532       }
10533     }
10534     else if (info.inFormat == RTAUDIO_FLOAT32) {
10535       // Channel compensation and/or (de)interleaving only.
10536       Float32 *in = (Float32 *)inBuffer;
10537       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10538         for (j=0; j<info.channels; j++) {
10539           out[info.outOffset[j]] = in[info.inOffset[j]];
10540         }
10541         in += info.inJump;
10542         out += info.outJump;
10543       }
10544     }
10545     else if (info.inFormat == RTAUDIO_FLOAT64) {
10546       Float64 *in = (Float64 *)inBuffer;
10547       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10548         for (j=0; j<info.channels; j++) {
10549           out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10550         }
10551         in += info.inJump;
10552         out += info.outJump;
10553       }
10554     }
10555   }
10556   else if (info.outFormat == RTAUDIO_SINT32) {
10557     Int32 *out = (Int32 *)outBuffer;
10558     if (info.inFormat == RTAUDIO_SINT8) {
10559       signed char *in = (signed char *)inBuffer;
10560       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10561         for (j=0; j<info.channels; j++) {
10562           out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10563           out[info.outOffset[j]] <<= 24;
10564         }
10565         in += info.inJump;
10566         out += info.outJump;
10567       }
10568     }
10569     else if (info.inFormat == RTAUDIO_SINT16) {
10570       Int16 *in = (Int16 *)inBuffer;
10571       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10572         for (j=0; j<info.channels; j++) {
10573           out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10574           out[info.outOffset[j]] <<= 16;
10575         }
10576         in += info.inJump;
10577         out += info.outJump;
10578       }
10579     }
10580     else if (info.inFormat == RTAUDIO_SINT24) {
10581       Int24 *in = (Int24 *)inBuffer;
10582       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10583         for (j=0; j<info.channels; j++) {
10584           out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10585           out[info.outOffset[j]] <<= 8;
10586         }
10587         in += info.inJump;
10588         out += info.outJump;
10589       }
10590     }
10591     else if (info.inFormat == RTAUDIO_SINT32) {
10592       // Channel compensation and/or (de)interleaving only.
10593       Int32 *in = (Int32 *)inBuffer;
10594       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10595         for (j=0; j<info.channels; j++) {
10596           out[info.outOffset[j]] = in[info.inOffset[j]];
10597         }
10598         in += info.inJump;
10599         out += info.outJump;
10600       }
10601     }
10602     else if (info.inFormat == RTAUDIO_FLOAT32) {
10603       Float32 *in = (Float32 *)inBuffer;
10604       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10605         for (j=0; j<info.channels; j++) {
10606           // Use llround() which returns `long long` which is guaranteed to be at least 64 bits.
10607           out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.f), 2147483647LL);
10608         }
10609         in += info.inJump;
10610         out += info.outJump;
10611       }
10612     }
10613     else if (info.inFormat == RTAUDIO_FLOAT64) {
10614       Float64 *in = (Float64 *)inBuffer;
10615       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10616         for (j=0; j<info.channels; j++) {
10617           out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.0), 2147483647LL);
10618         }
10619         in += info.inJump;
10620         out += info.outJump;
10621       }
10622     }
10623   }
10624   else if (info.outFormat == RTAUDIO_SINT24) {
10625     Int24 *out = (Int24 *)outBuffer;
10626     if (info.inFormat == RTAUDIO_SINT8) {
10627       signed char *in = (signed char *)inBuffer;
10628       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10629         for (j=0; j<info.channels; j++) {
10630           out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10631           //out[info.outOffset[j]] <<= 16;
10632         }
10633         in += info.inJump;
10634         out += info.outJump;
10635       }
10636     }
10637     else if (info.inFormat == RTAUDIO_SINT16) {
10638       Int16 *in = (Int16 *)inBuffer;
10639       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10640         for (j=0; j<info.channels; j++) {
10641           out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10642           //out[info.outOffset[j]] <<= 8;
10643         }
10644         in += info.inJump;
10645         out += info.outJump;
10646       }
10647     }
10648     else if (info.inFormat == RTAUDIO_SINT24) {
10649       // Channel compensation and/or (de)interleaving only.
10650       Int24 *in = (Int24 *)inBuffer;
10651       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10652         for (j=0; j<info.channels; j++) {
10653           out[info.outOffset[j]] = in[info.inOffset[j]];
10654         }
10655         in += info.inJump;
10656         out += info.outJump;
10657       }
10658     }
10659     else if (info.inFormat == RTAUDIO_SINT32) {
10660       Int32 *in = (Int32 *)inBuffer;
10661       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10662         for (j=0; j<info.channels; j++) {
10663           out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10664           //out[info.outOffset[j]] >>= 8;
10665         }
10666         in += info.inJump;
10667         out += info.outJump;
10668       }
10669     }
10670     else if (info.inFormat == RTAUDIO_FLOAT32) {
10671       Float32 *in = (Float32 *)inBuffer;
10672       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10673         for (j=0; j<info.channels; j++) {
10674           out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.f), 8388607LL);
10675         }
10676         in += info.inJump;
10677         out += info.outJump;
10678       }
10679     }
10680     else if (info.inFormat == RTAUDIO_FLOAT64) {
10681       Float64 *in = (Float64 *)inBuffer;
10682       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10683         for (j=0; j<info.channels; j++) {
10684           out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.0), 8388607LL);
10685         }
10686         in += info.inJump;
10687         out += info.outJump;
10688       }
10689     }
10690   }
10691   else if (info.outFormat == RTAUDIO_SINT16) {
10692     Int16 *out = (Int16 *)outBuffer;
10693     if (info.inFormat == RTAUDIO_SINT8) {
10694       signed char *in = (signed char *)inBuffer;
10695       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10696         for (j=0; j<info.channels; j++) {
10697           out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10698           out[info.outOffset[j]] <<= 8;
10699         }
10700         in += info.inJump;
10701         out += info.outJump;
10702       }
10703     }
10704     else if (info.inFormat == RTAUDIO_SINT16) {
10705       // Channel compensation and/or (de)interleaving only.
10706       Int16 *in = (Int16 *)inBuffer;
10707       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10708         for (j=0; j<info.channels; j++) {
10709           out[info.outOffset[j]] = in[info.inOffset[j]];
10710         }
10711         in += info.inJump;
10712         out += info.outJump;
10713       }
10714     }
10715     else if (info.inFormat == RTAUDIO_SINT24) {
10716       Int24 *in = (Int24 *)inBuffer;
10717       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10718         for (j=0; j<info.channels; j++) {
10719           out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10720         }
10721         in += info.inJump;
10722         out += info.outJump;
10723       }
10724     }
10725     else if (info.inFormat == RTAUDIO_SINT32) {
10726       Int32 *in = (Int32 *)inBuffer;
10727       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10728         for (j=0; j<info.channels; j++) {
10729           out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10730         }
10731         in += info.inJump;
10732         out += info.outJump;
10733       }
10734     }
10735     else if (info.inFormat == RTAUDIO_FLOAT32) {
10736       Float32 *in = (Float32 *)inBuffer;
10737       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10738         for (j=0; j<info.channels; j++) {
10739           out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.f), 32767LL);
10740         }
10741         in += info.inJump;
10742         out += info.outJump;
10743       }
10744     }
10745     else if (info.inFormat == RTAUDIO_FLOAT64) {
10746       Float64 *in = (Float64 *)inBuffer;
10747       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10748         for (j=0; j<info.channels; j++) {
10749           out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.0), 32767LL);
10750         }
10751         in += info.inJump;
10752         out += info.outJump;
10753       }
10754     }
10755   }
10756   else if (info.outFormat == RTAUDIO_SINT8) {
10757     signed char *out = (signed char *)outBuffer;
10758     if (info.inFormat == RTAUDIO_SINT8) {
10759       // Channel compensation and/or (de)interleaving only.
10760       signed char *in = (signed char *)inBuffer;
10761       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10762         for (j=0; j<info.channels; j++) {
10763           out[info.outOffset[j]] = in[info.inOffset[j]];
10764         }
10765         in += info.inJump;
10766         out += info.outJump;
10767       }
10768     }
10769     if (info.inFormat == RTAUDIO_SINT16) {
10770       Int16 *in = (Int16 *)inBuffer;
10771       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10772         for (j=0; j<info.channels; j++) {
10773           out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10774         }
10775         in += info.inJump;
10776         out += info.outJump;
10777       }
10778     }
10779     else if (info.inFormat == RTAUDIO_SINT24) {
10780       Int24 *in = (Int24 *)inBuffer;
10781       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10782         for (j=0; j<info.channels; j++) {
10783           out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10784         }
10785         in += info.inJump;
10786         out += info.outJump;
10787       }
10788     }
10789     else if (info.inFormat == RTAUDIO_SINT32) {
10790       Int32 *in = (Int32 *)inBuffer;
10791       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10792         for (j=0; j<info.channels; j++) {
10793           out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10794         }
10795         in += info.inJump;
10796         out += info.outJump;
10797       }
10798     }
10799     else if (info.inFormat == RTAUDIO_FLOAT32) {
10800       Float32 *in = (Float32 *)inBuffer;
10801       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10802         for (j=0; j<info.channels; j++) {
10803           out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.f), 127LL);
10804         }
10805         in += info.inJump;
10806         out += info.outJump;
10807       }
10808     }
10809     else if (info.inFormat == RTAUDIO_FLOAT64) {
10810       Float64 *in = (Float64 *)inBuffer;
10811       for (unsigned int i=0; i<stream_.bufferSize; i++) {
10812         for (j=0; j<info.channels; j++) {
10813           out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.0), 127LL);
10814         }
10815         in += info.inJump;
10816         out += info.outJump;
10817       }
10818     }
10819   }
10820 }
10821 
10822 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10823 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10824 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10825 
10826 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10827 {
10828   char val;
10829   char *ptr;
10830 
10831   ptr = buffer;
10832   if ( format == RTAUDIO_SINT16 ) {
10833     for ( unsigned int i=0; i<samples; i++ ) {
10834       // Swap 1st and 2nd bytes.
10835       val = *(ptr);
10836       *(ptr) = *(ptr+1);
10837       *(ptr+1) = val;
10838 
10839       // Increment 2 bytes.
10840       ptr += 2;
10841     }
10842   }
10843   else if ( format == RTAUDIO_SINT32 ||
10844             format == RTAUDIO_FLOAT32 ) {
10845     for ( unsigned int i=0; i<samples; i++ ) {
10846       // Swap 1st and 4th bytes.
10847       val = *(ptr);
10848       *(ptr) = *(ptr+3);
10849       *(ptr+3) = val;
10850 
10851       // Swap 2nd and 3rd bytes.
10852       ptr += 1;
10853       val = *(ptr);
10854       *(ptr) = *(ptr+1);
10855       *(ptr+1) = val;
10856 
10857       // Increment 3 more bytes.
10858       ptr += 3;
10859     }
10860   }
10861   else if ( format == RTAUDIO_SINT24 ) {
10862     for ( unsigned int i=0; i<samples; i++ ) {
10863       // Swap 1st and 3rd bytes.
10864       val = *(ptr);
10865       *(ptr) = *(ptr+2);
10866       *(ptr+2) = val;
10867 
10868       // Increment 2 more bytes.
10869       ptr += 2;
10870     }
10871   }
10872   else if ( format == RTAUDIO_FLOAT64 ) {
10873     for ( unsigned int i=0; i<samples; i++ ) {
10874       // Swap 1st and 8th bytes
10875       val = *(ptr);
10876       *(ptr) = *(ptr+7);
10877       *(ptr+7) = val;
10878 
10879       // Swap 2nd and 7th bytes
10880       ptr += 1;
10881       val = *(ptr);
10882       *(ptr) = *(ptr+5);
10883       *(ptr+5) = val;
10884 
10885       // Swap 3rd and 6th bytes
10886       ptr += 1;
10887       val = *(ptr);
10888       *(ptr) = *(ptr+3);
10889       *(ptr+3) = val;
10890 
10891       // Swap 4th and 5th bytes
10892       ptr += 1;
10893       val = *(ptr);
10894       *(ptr) = *(ptr+1);
10895       *(ptr+1) = val;
10896 
10897       // Increment 5 more bytes.
10898       ptr += 5;
10899     }
10900   }
10901 }
10902 
10903   // Indentation settings for Vim and Emacs
10904   //
10905   // Local Variables:
10906   // c-basic-offset: 2
10907   // indent-tabs-mode: nil
10908   // End:
10909   //
10910   // vim: et sts=2 sw=2
10911 
10912