1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
15
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
23
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
26
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
31
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 */
40 /************************************************************************/
41
42 // RtAudio: Version 5.1.0
43
44 #include "RtAudio.hpp"
45 #include <iostream>
46 #include <cstdlib>
47 #include <cstring>
48 #include <climits>
49 #include <cmath>
50 #include <algorithm>
51
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
64
65 #include "tchar.h"
66
67 /********************/
68 #if !defined(DECL_MAYBE_UNUSED) && defined(__GNUC__)
69 #define DECL_MAYBE_UNUSED __attribute__((unused))
70 #elif !defined(DECL_MAYBE_UNUSED)
71 #define DECL_MAYBE_UNUSED
72 #endif
73 /********************/
74
75 DECL_MAYBE_UNUSED
convertCharPointerToStdString(const char * text)76 static std::string convertCharPointerToStdString(const char *text)
77 {
78 return std::string(text);
79 }
80
convertCharPointerToStdString(const wchar_t * text)81 static std::string convertCharPointerToStdString(const wchar_t *text)
82 {
83 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
84 std::string s( length-1, '\0' );
85 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
86 return s;
87 }
88
89 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
90 // pthread API
91 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
92 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
93 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
94 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
95 #else
96 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
97 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
98 #endif
99
100 // *************************************************** //
101 //
102 // RtAudio definitions.
103 //
104 // *************************************************** //
105
getVersion(void)106 std::string RtAudio :: getVersion( void )
107 {
108 return RTAUDIO_VERSION;
109 }
110
111 // Define API names and display names.
112 // Must be in same order as API enum.
113 extern "C" {
114 const char* rtaudio_api_names[][2] = {
115 { "unspecified" , "Unknown" },
116 { "alsa" , "ALSA" },
117 { "pulse" , "Pulse" },
118 { "oss" , "OpenSoundSystem" },
119 { "jack" , "Jack" },
120 { "core" , "CoreAudio" },
121 { "wasapi" , "WASAPI" },
122 { "asio" , "ASIO" },
123 { "ds" , "DirectSound" },
124 { "dummy" , "Dummy" },
125 };
126 const unsigned int rtaudio_num_api_names =
127 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
128
129 // The order here will control the order of RtAudio's API search in
130 // the constructor.
131 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
132 #if defined(__UNIX_JACK__)
133 RtAudio::UNIX_JACK,
134 #endif
135 #if defined(__LINUX_PULSE__)
136 RtAudio::LINUX_PULSE,
137 #endif
138 #if defined(__LINUX_ALSA__)
139 RtAudio::LINUX_ALSA,
140 #endif
141 #if defined(__LINUX_OSS__)
142 RtAudio::LINUX_OSS,
143 #endif
144 #if defined(__WINDOWS_ASIO__)
145 RtAudio::WINDOWS_ASIO,
146 #endif
147 #if defined(__WINDOWS_WASAPI__)
148 RtAudio::WINDOWS_WASAPI,
149 #endif
150 #if defined(__WINDOWS_DS__)
151 RtAudio::WINDOWS_DS,
152 #endif
153 #if defined(__MACOSX_CORE__)
154 RtAudio::MACOSX_CORE,
155 #endif
156 #if defined(__RTAUDIO_DUMMY__)
157 RtAudio::RTAUDIO_DUMMY,
158 #endif
159 RtAudio::UNSPECIFIED,
160 };
161 extern "C" const unsigned int rtaudio_num_compiled_apis =
162 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
163 }
164
165 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
166 // If the build breaks here, check that they match.
StaticAssert()167 template<bool b> class StaticAssert { private: StaticAssert() {} };
StaticAssert()168 template<> class StaticAssert<true>{ public: StaticAssert() {} };
StaticAssertions()169 class StaticAssertions { StaticAssertions() {
170 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
171 }};
172
getCompiledApi(std::vector<RtAudio::Api> & apis)173 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
174 {
175 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
176 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
177 }
178
getApiName(RtAudio::Api api)179 std::string RtAudio :: getApiName( RtAudio::Api api )
180 {
181 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return "";
183 return rtaudio_api_names[api][0];
184 }
185
getApiDisplayName(RtAudio::Api api)186 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
187 {
188 if (api < 0 || api >= RtAudio::NUM_APIS)
189 return "Unknown";
190 return rtaudio_api_names[api][1];
191 }
192
getCompiledApiByName(const std::string & name)193 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
194 {
195 unsigned int i=0;
196 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
197 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
198 return rtaudio_compiled_apis[i];
199 return RtAudio::UNSPECIFIED;
200 }
201
openRtApi(RtAudio::Api api)202 void RtAudio :: openRtApi( RtAudio::Api api )
203 {
204 if ( rtapi_ )
205 delete rtapi_;
206 rtapi_ = 0;
207
208 #if defined(__UNIX_JACK__)
209 if ( api == UNIX_JACK )
210 rtapi_ = new RtApiJack();
211 #endif
212 #if defined(__LINUX_ALSA__)
213 if ( api == LINUX_ALSA )
214 rtapi_ = new RtApiAlsa();
215 #endif
216 #if defined(__LINUX_PULSE__)
217 if ( api == LINUX_PULSE )
218 rtapi_ = new RtApiPulse();
219 #endif
220 #if defined(__LINUX_OSS__)
221 if ( api == LINUX_OSS )
222 rtapi_ = new RtApiOss();
223 #endif
224 #if defined(__WINDOWS_ASIO__)
225 if ( api == WINDOWS_ASIO )
226 rtapi_ = new RtApiAsio();
227 #endif
228 #if defined(__WINDOWS_WASAPI__)
229 if ( api == WINDOWS_WASAPI )
230 rtapi_ = new RtApiWasapi();
231 #endif
232 #if defined(__WINDOWS_DS__)
233 if ( api == WINDOWS_DS )
234 rtapi_ = new RtApiDs();
235 #endif
236 #if defined(__MACOSX_CORE__)
237 if ( api == MACOSX_CORE )
238 rtapi_ = new RtApiCore();
239 #endif
240 #if defined(__RTAUDIO_DUMMY__)
241 if ( api == RTAUDIO_DUMMY )
242 rtapi_ = new RtApiDummy();
243 #endif
244 }
245
RtAudio(RtAudio::Api api)246 RtAudio :: RtAudio( RtAudio::Api api )
247 {
248 rtapi_ = 0;
249
250 if ( api != UNSPECIFIED ) {
251 // Attempt to open the specified API.
252 openRtApi( api );
253 if ( rtapi_ ) return;
254
255 // No compiled support for specified API value. Issue a debug
256 // warning and continue as if no API was specified.
257 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
258 }
259
260 // Iterate through the compiled APIs and return as soon as we find
261 // one with at least one device or we reach the end of the list.
262 std::vector< RtAudio::Api > apis;
263 getCompiledApi( apis );
264 for ( unsigned int i=0; i<apis.size(); i++ ) {
265 openRtApi( apis[i] );
266 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
267 }
268
269 if ( rtapi_ ) return;
270
271 // It should not be possible to get here because the preprocessor
272 // definition __RTAUDIO_DUMMY__ is automatically defined if no
273 // API-specific definitions are passed to the compiler. But just in
274 // case something weird happens, we'll thow an error.
275 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
276 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
277 }
278
~RtAudio()279 RtAudio :: ~RtAudio()
280 {
281 if ( rtapi_ )
282 delete rtapi_;
283 }
284
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)285 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
286 RtAudio::StreamParameters *inputParameters,
287 RtAudioFormat format, unsigned int sampleRate,
288 unsigned int *bufferFrames,
289 RtAudioCallback callback, void *userData,
290 RtAudio::StreamOptions *options,
291 RtAudioErrorCallback errorCallback )
292 {
293 return rtapi_->openStream( outputParameters, inputParameters, format,
294 sampleRate, bufferFrames, callback,
295 userData, options, errorCallback );
296 }
297
298 // *************************************************** //
299 //
300 // Public RtApi definitions (see end of file for
301 // private or protected utility functions).
302 //
303 // *************************************************** //
304
RtApi()305 RtApi :: RtApi()
306 {
307 stream_.state = STREAM_CLOSED;
308 stream_.mode = UNINITIALIZED;
309 stream_.apiHandle = 0;
310 stream_.userBuffer[0] = 0;
311 stream_.userBuffer[1] = 0;
312 MUTEX_INITIALIZE( &stream_.mutex );
313 showWarnings_ = true;
314 firstErrorOccurred_ = false;
315 }
316
~RtApi()317 RtApi :: ~RtApi()
318 {
319 MUTEX_DESTROY( &stream_.mutex );
320 }
321
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)322 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
323 RtAudio::StreamParameters *iParams,
324 RtAudioFormat format, unsigned int sampleRate,
325 unsigned int *bufferFrames,
326 RtAudioCallback callback, void *userData,
327 RtAudio::StreamOptions *options,
328 RtAudioErrorCallback errorCallback )
329 {
330 if ( stream_.state != STREAM_CLOSED ) {
331 errorText_ = "RtApi::openStream: a stream is already open!";
332 error( RtAudioError::INVALID_USE );
333 return;
334 }
335
336 // Clear stream information potentially left from a previously open stream.
337 clearStreamInfo();
338
339 if ( oParams && oParams->nChannels < 1 ) {
340 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
341 error( RtAudioError::INVALID_USE );
342 return;
343 }
344
345 if ( iParams && iParams->nChannels < 1 ) {
346 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
347 error( RtAudioError::INVALID_USE );
348 return;
349 }
350
351 if ( oParams == NULL && iParams == NULL ) {
352 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
353 error( RtAudioError::INVALID_USE );
354 return;
355 }
356
357 if ( formatBytes(format) == 0 ) {
358 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
359 error( RtAudioError::INVALID_USE );
360 return;
361 }
362
363 unsigned int nDevices = getDeviceCount();
364 unsigned int oChannels = 0;
365 if ( oParams ) {
366 oChannels = oParams->nChannels;
367 if ( oParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
370 return;
371 }
372 }
373
374 unsigned int iChannels = 0;
375 if ( iParams ) {
376 iChannels = iParams->nChannels;
377 if ( iParams->deviceId >= nDevices ) {
378 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
379 error( RtAudioError::INVALID_USE );
380 return;
381 }
382 }
383
384 bool result;
385
386 if ( oChannels > 0 ) {
387
388 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 error( RtAudioError::SYSTEM_ERROR );
392 return;
393 }
394 }
395
396 if ( iChannels > 0 ) {
397
398 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
399 sampleRate, format, bufferFrames, options );
400 if ( result == false ) {
401 if ( oChannels > 0 ) closeStream();
402 error( RtAudioError::SYSTEM_ERROR );
403 return;
404 }
405 }
406
407 stream_.callbackInfo.callback = (void *) callback;
408 stream_.callbackInfo.userData = userData;
409 stream_.callbackInfo.errorCallback = (void *) errorCallback;
410
411 if ( options ) options->numberOfBuffers = stream_.nBuffers;
412 stream_.state = STREAM_STOPPED;
413 }
414
getDefaultInputDevice(void)415 unsigned int RtApi :: getDefaultInputDevice( void )
416 {
417 // Should be implemented in subclasses if possible.
418 return 0;
419 }
420
getDefaultOutputDevice(void)421 unsigned int RtApi :: getDefaultOutputDevice( void )
422 {
423 // Should be implemented in subclasses if possible.
424 return 0;
425 }
426
closeStream(void)427 void RtApi :: closeStream( void )
428 {
429 // MUST be implemented in subclasses!
430 return;
431 }
432
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)433 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
434 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
435 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
436 RtAudio::StreamOptions * /*options*/ )
437 {
438 // MUST be implemented in subclasses!
439 return FAILURE;
440 }
441
tickStreamTime(void)442 void RtApi :: tickStreamTime( void )
443 {
444 // Subclasses that do not provide their own implementation of
445 // getStreamTime should call this function once per buffer I/O to
446 // provide basic stream time support.
447
448 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
449
450 #if defined( HAVE_GETTIMEOFDAY )
451 gettimeofday( &stream_.lastTickTimestamp, NULL );
452 #endif
453 }
454
getStreamLatency(void)455 long RtApi :: getStreamLatency( void )
456 {
457 verifyStream();
458
459 long totalLatency = 0;
460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
461 totalLatency = stream_.latency[0];
462 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
463 totalLatency += stream_.latency[1];
464
465 return totalLatency;
466 }
467
getStreamTime(void)468 double RtApi :: getStreamTime( void )
469 {
470 verifyStream();
471
472 #if defined( HAVE_GETTIMEOFDAY )
473 // Return a very accurate estimate of the stream time by
474 // adding in the elapsed time since the last tick.
475 struct timeval then;
476 struct timeval now;
477
478 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
479 return stream_.streamTime;
480
481 gettimeofday( &now, NULL );
482 then = stream_.lastTickTimestamp;
483 return stream_.streamTime +
484 ((now.tv_sec + 0.000001 * now.tv_usec) -
485 (then.tv_sec + 0.000001 * then.tv_usec));
486 #else
487 return stream_.streamTime;
488 #endif
489 }
490
setStreamTime(double time)491 void RtApi :: setStreamTime( double time )
492 {
493 verifyStream();
494
495 if ( time >= 0.0 )
496 stream_.streamTime = time;
497 #if defined( HAVE_GETTIMEOFDAY )
498 gettimeofday( &stream_.lastTickTimestamp, NULL );
499 #endif
500 }
501
getStreamSampleRate(void)502 unsigned int RtApi :: getStreamSampleRate( void )
503 {
504 verifyStream();
505
506 return stream_.sampleRate;
507 }
508
509
510 // *************************************************** //
511 //
512 // OS/API-specific methods.
513 //
514 // *************************************************** //
515
516 #if defined(__MACOSX_CORE__)
517
518 // The OS X CoreAudio API is designed to use a separate callback
519 // procedure for each of its audio devices. A single RtAudio duplex
520 // stream using two different devices is supported here, though it
521 // cannot be guaranteed to always behave correctly because we cannot
522 // synchronize these two callbacks.
523 //
524 // A property listener is installed for over/underrun information.
525 // However, no functionality is currently provided to allow property
526 // listeners to trigger user handlers because it is unclear what could
527 // be done if a critical stream parameter (buffer size, sample rate,
528 // device disconnect) notification arrived. The listeners entail
529 // quite a bit of extra code and most likely, a user program wouldn't
530 // be prepared for the result anyway. However, we do provide a flag
531 // to the client callback function to inform of an over/underrun.
532
533 // A structure to hold various information related to the CoreAudio API
534 // implementation.
535 struct CoreHandle {
536 AudioDeviceID id[2]; // device ids
537 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
538 AudioDeviceIOProcID procId[2];
539 #endif
540 UInt32 iStream[2]; // device stream index (or first if using multiple)
541 UInt32 nStreams[2]; // number of streams to use
542 bool xrun[2];
543 char *deviceBuffer;
544 pthread_cond_t condition;
545 int drainCounter; // Tracks callback counts when draining
546 bool internalDrain; // Indicates if stop is initiated from callback or not.
547
CoreHandleCoreHandle548 CoreHandle()
549 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
550 };
551
RtApiCore()552 RtApiCore:: RtApiCore()
553 {
554 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
555 // This is a largely undocumented but absolutely necessary
556 // requirement starting with OS-X 10.6. If not called, queries and
557 // updates to various audio device properties are not handled
558 // correctly.
559 CFRunLoopRef theRunLoop = NULL;
560 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
561 kAudioObjectPropertyScopeGlobal,
562 kAudioObjectPropertyElementMaster };
563 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
564 if ( result != noErr ) {
565 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
566 error( RtAudioError::WARNING );
567 }
568 #endif
569 }
570
~RtApiCore()571 RtApiCore :: ~RtApiCore()
572 {
573 // The subclass destructor gets called before the base class
574 // destructor, so close an existing stream before deallocating
575 // apiDeviceId memory.
576 if ( stream_.state != STREAM_CLOSED ) closeStream();
577 }
578
getDeviceCount(void)579 unsigned int RtApiCore :: getDeviceCount( void )
580 {
581 // Find out how many audio devices there are, if any.
582 UInt32 dataSize;
583 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
584 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
585 if ( result != noErr ) {
586 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
587 error( RtAudioError::WARNING );
588 return 0;
589 }
590
591 return dataSize / sizeof( AudioDeviceID );
592 }
593
getDefaultInputDevice(void)594 unsigned int RtApiCore :: getDefaultInputDevice( void )
595 {
596 unsigned int nDevices = getDeviceCount();
597 if ( nDevices <= 1 ) return 0;
598
599 AudioDeviceID id;
600 UInt32 dataSize = sizeof( AudioDeviceID );
601 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
602 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
605 error( RtAudioError::WARNING );
606 return 0;
607 }
608
609 dataSize *= nDevices;
610 AudioDeviceID deviceList[ nDevices ];
611 property.mSelector = kAudioHardwarePropertyDevices;
612 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
613 if ( result != noErr ) {
614 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
615 error( RtAudioError::WARNING );
616 return 0;
617 }
618
619 for ( unsigned int i=0; i<nDevices; i++ )
620 if ( id == deviceList[i] ) return i;
621
622 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
623 error( RtAudioError::WARNING );
624 return 0;
625 }
626
getDefaultOutputDevice(void)627 unsigned int RtApiCore :: getDefaultOutputDevice( void )
628 {
629 unsigned int nDevices = getDeviceCount();
630 if ( nDevices <= 1 ) return 0;
631
632 AudioDeviceID id;
633 UInt32 dataSize = sizeof( AudioDeviceID );
634 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
635 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
638 error( RtAudioError::WARNING );
639 return 0;
640 }
641
642 dataSize = sizeof( AudioDeviceID ) * nDevices;
643 AudioDeviceID deviceList[ nDevices ];
644 property.mSelector = kAudioHardwarePropertyDevices;
645 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
646 if ( result != noErr ) {
647 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
648 error( RtAudioError::WARNING );
649 return 0;
650 }
651
652 for ( unsigned int i=0; i<nDevices; i++ )
653 if ( id == deviceList[i] ) return i;
654
655 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
656 error( RtAudioError::WARNING );
657 return 0;
658 }
659
getDeviceInfo(unsigned int device)660 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
661 {
662 RtAudio::DeviceInfo info;
663 info.probed = false;
664
665 // Get device ID
666 unsigned int nDevices = getDeviceCount();
667 if ( nDevices == 0 ) {
668 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
669 error( RtAudioError::INVALID_USE );
670 return info;
671 }
672
673 if ( device >= nDevices ) {
674 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
675 error( RtAudioError::INVALID_USE );
676 return info;
677 }
678
679 AudioDeviceID deviceList[ nDevices ];
680 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
681 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
682 kAudioObjectPropertyScopeGlobal,
683 kAudioObjectPropertyElementMaster };
684 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
685 0, NULL, &dataSize, (void *) &deviceList );
686 if ( result != noErr ) {
687 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
688 error( RtAudioError::WARNING );
689 return info;
690 }
691
692 AudioDeviceID id = deviceList[ device ];
693
694 // Get the device name.
695 info.name.erase();
696 CFStringRef cfname;
697 dataSize = sizeof( CFStringRef );
698 property.mSelector = kAudioObjectPropertyManufacturer;
699 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
700 if ( result != noErr ) {
701 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
702 errorText_ = errorStream_.str();
703 error( RtAudioError::WARNING );
704 return info;
705 }
706
707 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
708 int length = CFStringGetLength(cfname);
709 char *mname = (char *)malloc(length * 3 + 1);
710 #if defined( UNICODE ) || defined( _UNICODE )
711 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
712 #else
713 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
714 #endif
715 info.name.append( (const char *)mname, strlen(mname) );
716 info.name.append( ": " );
717 CFRelease( cfname );
718 free(mname);
719
720 property.mSelector = kAudioObjectPropertyName;
721 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
722 if ( result != noErr ) {
723 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
724 errorText_ = errorStream_.str();
725 error( RtAudioError::WARNING );
726 return info;
727 }
728
729 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
730 length = CFStringGetLength(cfname);
731 char *name = (char *)malloc(length * 3 + 1);
732 #if defined( UNICODE ) || defined( _UNICODE )
733 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
734 #else
735 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
736 #endif
737 info.name.append( (const char *)name, strlen(name) );
738 CFRelease( cfname );
739 free(name);
740
741 // Get the output stream "configuration".
742 AudioBufferList *bufferList = nil;
743 property.mSelector = kAudioDevicePropertyStreamConfiguration;
744 property.mScope = kAudioDevicePropertyScopeOutput;
745 // property.mElement = kAudioObjectPropertyElementWildcard;
746 dataSize = 0;
747 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
748 if ( result != noErr || dataSize == 0 ) {
749 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
750 errorText_ = errorStream_.str();
751 error( RtAudioError::WARNING );
752 return info;
753 }
754
755 // Allocate the AudioBufferList.
756 bufferList = (AudioBufferList *) malloc( dataSize );
757 if ( bufferList == NULL ) {
758 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
759 error( RtAudioError::WARNING );
760 return info;
761 }
762
763 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
764 if ( result != noErr || dataSize == 0 ) {
765 free( bufferList );
766 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
767 errorText_ = errorStream_.str();
768 error( RtAudioError::WARNING );
769 return info;
770 }
771
772 // Get output channel information.
773 unsigned int i, nStreams = bufferList->mNumberBuffers;
774 for ( i=0; i<nStreams; i++ )
775 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
776 free( bufferList );
777
778 // Get the input stream "configuration".
779 property.mScope = kAudioDevicePropertyScopeInput;
780 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
781 if ( result != noErr || dataSize == 0 ) {
782 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
783 errorText_ = errorStream_.str();
784 error( RtAudioError::WARNING );
785 return info;
786 }
787
788 // Allocate the AudioBufferList.
789 bufferList = (AudioBufferList *) malloc( dataSize );
790 if ( bufferList == NULL ) {
791 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
792 error( RtAudioError::WARNING );
793 return info;
794 }
795
796 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
797 if (result != noErr || dataSize == 0) {
798 free( bufferList );
799 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
800 errorText_ = errorStream_.str();
801 error( RtAudioError::WARNING );
802 return info;
803 }
804
805 // Get input channel information.
806 nStreams = bufferList->mNumberBuffers;
807 for ( i=0; i<nStreams; i++ )
808 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
809 free( bufferList );
810
811 // If device opens for both playback and capture, we determine the channels.
812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
814
815 // Probe the device sample rates.
816 bool isInput = false;
817 if ( info.outputChannels == 0 ) isInput = true;
818
819 // Determine the supported sample rates.
820 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
821 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
822 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
823 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
827 return info;
828 }
829
830 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
831 AudioValueRange rangeList[ nRanges ];
832 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
833 if ( result != kAudioHardwareNoError ) {
834 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
835 errorText_ = errorStream_.str();
836 error( RtAudioError::WARNING );
837 return info;
838 }
839
840 // The sample rate reporting mechanism is a bit of a mystery. It
841 // seems that it can either return individual rates or a range of
842 // rates. I assume that if the min / max range values are the same,
843 // then that represents a single supported rate and if the min / max
844 // range values are different, the device supports an arbitrary
845 // range of values (though there might be multiple ranges, so we'll
846 // use the most conservative range).
847 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
848 bool haveValueRange = false;
849 info.sampleRates.clear();
850 for ( UInt32 i=0; i<nRanges; i++ ) {
851 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
852 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
853 info.sampleRates.push_back( tmpSr );
854
855 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
856 info.preferredSampleRate = tmpSr;
857
858 } else {
859 haveValueRange = true;
860 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
861 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
862 }
863 }
864
865 if ( haveValueRange ) {
866 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
867 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
868 info.sampleRates.push_back( SAMPLE_RATES[k] );
869
870 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
871 info.preferredSampleRate = SAMPLE_RATES[k];
872 }
873 }
874 }
875
876 // Sort and remove any redundant values
877 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
878 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
879
880 if ( info.sampleRates.size() == 0 ) {
881 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
882 errorText_ = errorStream_.str();
883 error( RtAudioError::WARNING );
884 return info;
885 }
886
887 // CoreAudio always uses 32-bit floating point data for PCM streams.
888 // Thus, any other "physical" formats supported by the device are of
889 // no interest to the client.
890 info.nativeFormats = RTAUDIO_FLOAT32;
891
892 if ( info.outputChannels > 0 )
893 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
894 if ( info.inputChannels > 0 )
895 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
896
897 info.probed = true;
898 return info;
899 }
900
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)901 static OSStatus callbackHandler( AudioDeviceID inDevice,
902 const AudioTimeStamp* /*inNow*/,
903 const AudioBufferList* inInputData,
904 const AudioTimeStamp* /*inInputTime*/,
905 AudioBufferList* outOutputData,
906 const AudioTimeStamp* /*inOutputTime*/,
907 void* infoPointer )
908 {
909 CallbackInfo *info = (CallbackInfo *) infoPointer;
910
911 RtApiCore *object = (RtApiCore *) info->object;
912 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
913 return kAudioHardwareUnspecifiedError;
914 else
915 return kAudioHardwareNoError;
916 }
917
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)918 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
919 UInt32 nAddresses,
920 const AudioObjectPropertyAddress properties[],
921 void* handlePointer )
922 {
923 CoreHandle *handle = (CoreHandle *) handlePointer;
924 for ( UInt32 i=0; i<nAddresses; i++ ) {
925 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
926 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
927 handle->xrun[1] = true;
928 else
929 handle->xrun[0] = true;
930 }
931 }
932
933 return kAudioHardwareNoError;
934 }
935
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)936 static OSStatus rateListener( AudioObjectID inDevice,
937 UInt32 /*nAddresses*/,
938 const AudioObjectPropertyAddress /*properties*/[],
939 void* ratePointer )
940 {
941 Float64 *rate = (Float64 *) ratePointer;
942 UInt32 dataSize = sizeof( Float64 );
943 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
944 kAudioObjectPropertyScopeGlobal,
945 kAudioObjectPropertyElementMaster };
946 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
947 return kAudioHardwareNoError;
948 }
949
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)950 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
951 unsigned int firstChannel, unsigned int sampleRate,
952 RtAudioFormat format, unsigned int *bufferSize,
953 RtAudio::StreamOptions *options )
954 {
955 // Get device ID
956 unsigned int nDevices = getDeviceCount();
957 if ( nDevices == 0 ) {
958 // This should not happen because a check is made before this function is called.
959 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
960 return FAILURE;
961 }
962
963 if ( device >= nDevices ) {
964 // This should not happen because a check is made before this function is called.
965 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
966 return FAILURE;
967 }
968
969 AudioDeviceID deviceList[ nDevices ];
970 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
971 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
972 kAudioObjectPropertyScopeGlobal,
973 kAudioObjectPropertyElementMaster };
974 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
975 0, NULL, &dataSize, (void *) &deviceList );
976 if ( result != noErr ) {
977 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
978 return FAILURE;
979 }
980
981 AudioDeviceID id = deviceList[ device ];
982
983 // Setup for stream mode.
984 bool isInput = false;
985 if ( mode == INPUT ) {
986 isInput = true;
987 property.mScope = kAudioDevicePropertyScopeInput;
988 }
989 else
990 property.mScope = kAudioDevicePropertyScopeOutput;
991
992 // Get the stream "configuration".
993 AudioBufferList *bufferList = nil;
994 dataSize = 0;
995 property.mSelector = kAudioDevicePropertyStreamConfiguration;
996 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
997 if ( result != noErr || dataSize == 0 ) {
998 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
999 errorText_ = errorStream_.str();
1000 return FAILURE;
1001 }
1002
1003 // Allocate the AudioBufferList.
1004 bufferList = (AudioBufferList *) malloc( dataSize );
1005 if ( bufferList == NULL ) {
1006 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1007 return FAILURE;
1008 }
1009
1010 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1011 if (result != noErr || dataSize == 0) {
1012 free( bufferList );
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1014 errorText_ = errorStream_.str();
1015 return FAILURE;
1016 }
1017
1018 // Search for one or more streams that contain the desired number of
1019 // channels. CoreAudio devices can have an arbitrary number of
1020 // streams and each stream can have an arbitrary number of channels.
1021 // For each stream, a single buffer of interleaved samples is
1022 // provided. RtAudio prefers the use of one stream of interleaved
1023 // data or multiple consecutive single-channel streams. However, we
1024 // now support multiple consecutive multi-channel streams of
1025 // interleaved data as well.
1026 UInt32 iStream, offsetCounter = firstChannel;
1027 UInt32 nStreams = bufferList->mNumberBuffers;
1028 bool monoMode = false;
1029 bool foundStream = false;
1030
1031 // First check that the device supports the requested number of
1032 // channels.
1033 UInt32 deviceChannels = 0;
1034 for ( iStream=0; iStream<nStreams; iStream++ )
1035 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1036
1037 if ( deviceChannels < ( channels + firstChannel ) ) {
1038 free( bufferList );
1039 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1040 errorText_ = errorStream_.str();
1041 return FAILURE;
1042 }
1043
1044 // Look for a single stream meeting our needs.
1045 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1046 for ( iStream=0; iStream<nStreams; iStream++ ) {
1047 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1048 if ( streamChannels >= channels + offsetCounter ) {
1049 firstStream = iStream;
1050 channelOffset = offsetCounter;
1051 foundStream = true;
1052 break;
1053 }
1054 if ( streamChannels > offsetCounter ) break;
1055 offsetCounter -= streamChannels;
1056 }
1057
1058 // If we didn't find a single stream above, then we should be able
1059 // to meet the channel specification with multiple streams.
1060 if ( foundStream == false ) {
1061 monoMode = true;
1062 offsetCounter = firstChannel;
1063 for ( iStream=0; iStream<nStreams; iStream++ ) {
1064 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1065 if ( streamChannels > offsetCounter ) break;
1066 offsetCounter -= streamChannels;
1067 }
1068
1069 firstStream = iStream;
1070 channelOffset = offsetCounter;
1071 Int32 channelCounter = channels + offsetCounter - streamChannels;
1072
1073 if ( streamChannels > 1 ) monoMode = false;
1074 while ( channelCounter > 0 ) {
1075 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1076 if ( streamChannels > 1 ) monoMode = false;
1077 channelCounter -= streamChannels;
1078 streamCount++;
1079 }
1080 }
1081
1082 free( bufferList );
1083
1084 // Determine the buffer size.
1085 AudioValueRange bufferRange;
1086 dataSize = sizeof( AudioValueRange );
1087 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1088 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1089
1090 if ( result != noErr ) {
1091 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1092 errorText_ = errorStream_.str();
1093 return FAILURE;
1094 }
1095
1096 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1097 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1098 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1099
1100 // Set the buffer size. For multiple streams, I'm assuming we only
1101 // need to make this setting for the master channel.
1102 UInt32 theSize = (UInt32) *bufferSize;
1103 dataSize = sizeof( UInt32 );
1104 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1105 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1106
1107 if ( result != noErr ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1109 errorText_ = errorStream_.str();
1110 return FAILURE;
1111 }
1112
1113 // If attempting to setup a duplex stream, the bufferSize parameter
1114 // MUST be the same in both directions!
1115 *bufferSize = theSize;
1116 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1117 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1118 errorText_ = errorStream_.str();
1119 return FAILURE;
1120 }
1121
1122 stream_.bufferSize = *bufferSize;
1123 stream_.nBuffers = 1;
1124
1125 // Try to set "hog" mode ... it's not clear to me this is working.
1126 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1127 pid_t hog_pid;
1128 dataSize = sizeof( hog_pid );
1129 property.mSelector = kAudioDevicePropertyHogMode;
1130 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1133 errorText_ = errorStream_.str();
1134 return FAILURE;
1135 }
1136
1137 if ( hog_pid != getpid() ) {
1138 hog_pid = getpid();
1139 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1140 if ( result != noErr ) {
1141 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1142 errorText_ = errorStream_.str();
1143 return FAILURE;
1144 }
1145 }
1146 }
1147
1148 // Check and if necessary, change the sample rate for the device.
1149 Float64 nominalRate;
1150 dataSize = sizeof( Float64 );
1151 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1152 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1153 if ( result != noErr ) {
1154 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1155 errorText_ = errorStream_.str();
1156 return FAILURE;
1157 }
1158
1159 // Only change the sample rate if off by more than 1 Hz.
1160 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1161
1162 // Set a property listener for the sample rate change
1163 Float64 reportedRate = 0.0;
1164 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1165 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 if ( result != noErr ) {
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1169 return FAILURE;
1170 }
1171
1172 nominalRate = (Float64) sampleRate;
1173 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1174 if ( result != noErr ) {
1175 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1176 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1177 errorText_ = errorStream_.str();
1178 return FAILURE;
1179 }
1180
1181 // Now wait until the reported nominal rate is what we just set.
1182 UInt32 microCounter = 0;
1183 while ( reportedRate != nominalRate ) {
1184 microCounter += 5000;
1185 if ( microCounter > 5000000 ) break;
1186 usleep( 5000 );
1187 }
1188
1189 // Remove the property listener.
1190 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1191
1192 if ( microCounter > 5000000 ) {
1193 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1194 errorText_ = errorStream_.str();
1195 return FAILURE;
1196 }
1197 }
1198
1199 // Now set the stream format for all streams. Also, check the
1200 // physical format of the device and change that if necessary.
1201 AudioStreamBasicDescription description;
1202 dataSize = sizeof( AudioStreamBasicDescription );
1203 property.mSelector = kAudioStreamPropertyVirtualFormat;
1204 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1205 if ( result != noErr ) {
1206 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1207 errorText_ = errorStream_.str();
1208 return FAILURE;
1209 }
1210
1211 // Set the sample rate and data format id. However, only make the
1212 // change if the sample rate is not within 1.0 of the desired
1213 // rate and the format is not linear pcm.
1214 bool updateFormat = false;
1215 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1216 description.mSampleRate = (Float64) sampleRate;
1217 updateFormat = true;
1218 }
1219
1220 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1221 description.mFormatID = kAudioFormatLinearPCM;
1222 updateFormat = true;
1223 }
1224
1225 if ( updateFormat ) {
1226 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1230 return FAILURE;
1231 }
1232 }
1233
1234 // Now check the physical format.
1235 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1236 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1237 if ( result != noErr ) {
1238 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1239 errorText_ = errorStream_.str();
1240 return FAILURE;
1241 }
1242
1243 //std::cout << "Current physical stream format:" << std::endl;
1244 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1245 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1246 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1247 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1248
1249 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1250 description.mFormatID = kAudioFormatLinearPCM;
1251 //description.mSampleRate = (Float64) sampleRate;
1252 AudioStreamBasicDescription testDescription = description;
1253 UInt32 formatFlags;
1254
1255 // We'll try higher bit rates first and then work our way down.
1256 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1259 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1262 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1263 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1264 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1265 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1266 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1267 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1268 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1269
1270 bool setPhysicalFormat = false;
1271 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1272 testDescription = description;
1273 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1274 testDescription.mFormatFlags = physicalFormats[i].second;
1275 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1276 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1277 else
1278 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1279 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1280 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1281 if ( result == noErr ) {
1282 setPhysicalFormat = true;
1283 //std::cout << "Updated physical stream format:" << std::endl;
1284 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1285 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1286 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1287 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1288 break;
1289 }
1290 }
1291
1292 if ( !setPhysicalFormat ) {
1293 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1294 errorText_ = errorStream_.str();
1295 return FAILURE;
1296 }
1297 } // done setting virtual/physical formats.
1298
1299 // Get the stream / device latency.
1300 UInt32 latency;
1301 dataSize = sizeof( UInt32 );
1302 property.mSelector = kAudioDevicePropertyLatency;
1303 if ( AudioObjectHasProperty( id, &property ) == true ) {
1304 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1305 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1306 else {
1307 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1308 errorText_ = errorStream_.str();
1309 error( RtAudioError::WARNING );
1310 }
1311 }
1312
1313 // Byte-swapping: According to AudioHardware.h, the stream data will
1314 // always be presented in native-endian format, so we should never
1315 // need to byte swap.
1316 stream_.doByteSwap[mode] = false;
1317
1318 // From the CoreAudio documentation, PCM data must be supplied as
1319 // 32-bit floats.
1320 stream_.userFormat = format;
1321 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1322
1323 if ( streamCount == 1 )
1324 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1325 else // multiple streams
1326 stream_.nDeviceChannels[mode] = channels;
1327 stream_.nUserChannels[mode] = channels;
1328 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1329 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1330 else stream_.userInterleaved = true;
1331 stream_.deviceInterleaved[mode] = true;
1332 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1333
1334 // Set flags for buffer conversion.
1335 stream_.doConvertBuffer[mode] = false;
1336 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1337 stream_.doConvertBuffer[mode] = true;
1338 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1339 stream_.doConvertBuffer[mode] = true;
1340 if ( streamCount == 1 ) {
1341 if ( stream_.nUserChannels[mode] > 1 &&
1342 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1343 stream_.doConvertBuffer[mode] = true;
1344 }
1345 else if ( monoMode && stream_.userInterleaved )
1346 stream_.doConvertBuffer[mode] = true;
1347
1348 // Allocate our CoreHandle structure for the stream.
1349 CoreHandle *handle = 0;
1350 if ( stream_.apiHandle == 0 ) {
1351 try {
1352 handle = new CoreHandle;
1353 }
1354 catch ( std::bad_alloc& ) {
1355 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1356 goto error;
1357 }
1358
1359 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1360 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1361 goto error;
1362 }
1363 stream_.apiHandle = (void *) handle;
1364 }
1365 else
1366 handle = (CoreHandle *) stream_.apiHandle;
1367 handle->iStream[mode] = firstStream;
1368 handle->nStreams[mode] = streamCount;
1369 handle->id[mode] = id;
1370
1371 // Allocate necessary internal buffers.
1372 unsigned long bufferBytes;
1373 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1374 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1375 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1376 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1377 if ( stream_.userBuffer[mode] == NULL ) {
1378 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1379 goto error;
1380 }
1381
1382 // If possible, we will make use of the CoreAudio stream buffers as
1383 // "device buffers". However, we can't do this if using multiple
1384 // streams.
1385 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1386
1387 bool makeBuffer = true;
1388 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1389 if ( mode == INPUT ) {
1390 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1391 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1392 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1393 }
1394 }
1395
1396 if ( makeBuffer ) {
1397 bufferBytes *= *bufferSize;
1398 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1399 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1400 if ( stream_.deviceBuffer == NULL ) {
1401 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1402 goto error;
1403 }
1404 }
1405 }
1406
1407 stream_.sampleRate = sampleRate;
1408 stream_.device[mode] = device;
1409 stream_.state = STREAM_STOPPED;
1410 stream_.callbackInfo.object = (void *) this;
1411
1412 // Setup the buffer conversion information structure.
1413 if ( stream_.doConvertBuffer[mode] ) {
1414 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1415 else setConvertInfo( mode, channelOffset );
1416 }
1417
1418 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1419 // Only one callback procedure per device.
1420 stream_.mode = DUPLEX;
1421 else {
1422 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1423 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1424 #else
1425 // deprecated in favor of AudioDeviceCreateIOProcID()
1426 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1427 #endif
1428 if ( result != noErr ) {
1429 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1430 errorText_ = errorStream_.str();
1431 goto error;
1432 }
1433 if ( stream_.mode == OUTPUT && mode == INPUT )
1434 stream_.mode = DUPLEX;
1435 else
1436 stream_.mode = mode;
1437 }
1438
1439 // Setup the device property listener for over/underload.
1440 property.mSelector = kAudioDeviceProcessorOverload;
1441 property.mScope = kAudioObjectPropertyScopeGlobal;
1442 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1443
1444 return SUCCESS;
1445
1446 error:
1447 if ( handle ) {
1448 pthread_cond_destroy( &handle->condition );
1449 delete handle;
1450 stream_.apiHandle = 0;
1451 }
1452
1453 for ( int i=0; i<2; i++ ) {
1454 if ( stream_.userBuffer[i] ) {
1455 free( stream_.userBuffer[i] );
1456 stream_.userBuffer[i] = 0;
1457 }
1458 }
1459
1460 if ( stream_.deviceBuffer ) {
1461 free( stream_.deviceBuffer );
1462 stream_.deviceBuffer = 0;
1463 }
1464
1465 stream_.state = STREAM_CLOSED;
1466 return FAILURE;
1467 }
1468
closeStream(void)1469 void RtApiCore :: closeStream( void )
1470 {
1471 if ( stream_.state == STREAM_CLOSED ) {
1472 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1473 error( RtAudioError::WARNING );
1474 return;
1475 }
1476
1477 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1479 if (handle) {
1480 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1481 kAudioObjectPropertyScopeGlobal,
1482 kAudioObjectPropertyElementMaster };
1483
1484 property.mSelector = kAudioDeviceProcessorOverload;
1485 property.mScope = kAudioObjectPropertyScopeGlobal;
1486 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1487 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1488 error( RtAudioError::WARNING );
1489 }
1490 }
1491 if ( stream_.state == STREAM_RUNNING )
1492 AudioDeviceStop( handle->id[0], callbackHandler );
1493 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1494 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1495 #else
1496 // deprecated in favor of AudioDeviceDestroyIOProcID()
1497 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1498 #endif
1499 }
1500
1501 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502 if (handle) {
1503 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1504 kAudioObjectPropertyScopeGlobal,
1505 kAudioObjectPropertyElementMaster };
1506
1507 property.mSelector = kAudioDeviceProcessorOverload;
1508 property.mScope = kAudioObjectPropertyScopeGlobal;
1509 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1510 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1511 error( RtAudioError::WARNING );
1512 }
1513 }
1514 if ( stream_.state == STREAM_RUNNING )
1515 AudioDeviceStop( handle->id[1], callbackHandler );
1516 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1517 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1518 #else
1519 // deprecated in favor of AudioDeviceDestroyIOProcID()
1520 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1521 #endif
1522 }
1523
1524 for ( int i=0; i<2; i++ ) {
1525 if ( stream_.userBuffer[i] ) {
1526 free( stream_.userBuffer[i] );
1527 stream_.userBuffer[i] = 0;
1528 }
1529 }
1530
1531 if ( stream_.deviceBuffer ) {
1532 free( stream_.deviceBuffer );
1533 stream_.deviceBuffer = 0;
1534 }
1535
1536 // Destroy pthread condition variable.
1537 pthread_cond_destroy( &handle->condition );
1538 delete handle;
1539 stream_.apiHandle = 0;
1540
1541 stream_.mode = UNINITIALIZED;
1542 stream_.state = STREAM_CLOSED;
1543 }
1544
startStream(void)1545 void RtApiCore :: startStream( void )
1546 {
1547 verifyStream();
1548 if ( stream_.state == STREAM_RUNNING ) {
1549 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1550 error( RtAudioError::WARNING );
1551 return;
1552 }
1553
1554 #if defined( HAVE_GETTIMEOFDAY )
1555 gettimeofday( &stream_.lastTickTimestamp, NULL );
1556 #endif
1557
1558 OSStatus result = noErr;
1559 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1560 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1561
1562 result = AudioDeviceStart( handle->id[0], callbackHandler );
1563 if ( result != noErr ) {
1564 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1565 errorText_ = errorStream_.str();
1566 goto unlock;
1567 }
1568 }
1569
1570 if ( stream_.mode == INPUT ||
1571 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1572
1573 result = AudioDeviceStart( handle->id[1], callbackHandler );
1574 if ( result != noErr ) {
1575 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1576 errorText_ = errorStream_.str();
1577 goto unlock;
1578 }
1579 }
1580
1581 handle->drainCounter = 0;
1582 handle->internalDrain = false;
1583 stream_.state = STREAM_RUNNING;
1584
1585 unlock:
1586 if ( result == noErr ) return;
1587 error( RtAudioError::SYSTEM_ERROR );
1588 }
1589
stopStream(void)1590 void RtApiCore :: stopStream( void )
1591 {
1592 verifyStream();
1593 if ( stream_.state == STREAM_STOPPED ) {
1594 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1595 error( RtAudioError::WARNING );
1596 return;
1597 }
1598
1599 OSStatus result = noErr;
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1601 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1602
1603 if ( handle->drainCounter == 0 ) {
1604 handle->drainCounter = 2;
1605 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1606 }
1607
1608 result = AudioDeviceStop( handle->id[0], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1611 errorText_ = errorStream_.str();
1612 goto unlock;
1613 }
1614 }
1615
1616 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1617
1618 result = AudioDeviceStop( handle->id[1], callbackHandler );
1619 if ( result != noErr ) {
1620 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1621 errorText_ = errorStream_.str();
1622 goto unlock;
1623 }
1624 }
1625
1626 stream_.state = STREAM_STOPPED;
1627
1628 unlock:
1629 if ( result == noErr ) return;
1630 error( RtAudioError::SYSTEM_ERROR );
1631 }
1632
abortStream(void)1633 void RtApiCore :: abortStream( void )
1634 {
1635 verifyStream();
1636 if ( stream_.state == STREAM_STOPPED ) {
1637 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1638 error( RtAudioError::WARNING );
1639 return;
1640 }
1641
1642 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1643 handle->drainCounter = 2;
1644
1645 stopStream();
1646 }
1647
1648 // This function will be called by a spawned thread when the user
1649 // callback function signals that the stream should be stopped or
1650 // aborted. It is better to handle it this way because the
1651 // callbackEvent() function probably should return before the AudioDeviceStop()
1652 // function is called.
coreStopStream(void * ptr)1653 static void *coreStopStream( void *ptr )
1654 {
1655 CallbackInfo *info = (CallbackInfo *) ptr;
1656 RtApiCore *object = (RtApiCore *) info->object;
1657
1658 object->stopStream();
1659 pthread_exit( NULL );
1660 }
1661
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1662 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1663 const AudioBufferList *inBufferList,
1664 const AudioBufferList *outBufferList )
1665 {
1666 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1667 if ( stream_.state == STREAM_CLOSED ) {
1668 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1669 error( RtAudioError::WARNING );
1670 return FAILURE;
1671 }
1672
1673 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1674 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1675
1676 // Check if we were draining the stream and signal is finished.
1677 if ( handle->drainCounter > 3 ) {
1678 ThreadHandle threadId;
1679
1680 stream_.state = STREAM_STOPPING;
1681 if ( handle->internalDrain == true )
1682 pthread_create( &threadId, NULL, coreStopStream, info );
1683 else // external call to stopStream()
1684 pthread_cond_signal( &handle->condition );
1685 return SUCCESS;
1686 }
1687
1688 AudioDeviceID outputDevice = handle->id[0];
1689
1690 // Invoke user callback to get fresh output data UNLESS we are
1691 // draining stream or duplex mode AND the input/output devices are
1692 // different AND this function is called for the input device.
1693 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1694 RtAudioCallback callback = (RtAudioCallback) info->callback;
1695 double streamTime = getStreamTime();
1696 RtAudioStreamStatus status = 0;
1697 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1698 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1699 handle->xrun[0] = false;
1700 }
1701 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1702 status |= RTAUDIO_INPUT_OVERFLOW;
1703 handle->xrun[1] = false;
1704 }
1705
1706 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1707 stream_.bufferSize, streamTime, status, info->userData );
1708 if ( cbReturnValue == 2 ) {
1709 stream_.state = STREAM_STOPPING;
1710 handle->drainCounter = 2;
1711 abortStream();
1712 return SUCCESS;
1713 }
1714 else if ( cbReturnValue == 1 ) {
1715 handle->drainCounter = 1;
1716 handle->internalDrain = true;
1717 }
1718 }
1719
1720 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1721
1722 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1723
1724 if ( handle->nStreams[0] == 1 ) {
1725 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1726 0,
1727 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1728 }
1729 else { // fill multiple streams with zeros
1730 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1731 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1732 0,
1733 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1734 }
1735 }
1736 }
1737 else if ( handle->nStreams[0] == 1 ) {
1738 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1739 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1740 stream_.userBuffer[0], stream_.convertInfo[0] );
1741 }
1742 else { // copy from user buffer
1743 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1744 stream_.userBuffer[0],
1745 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1746 }
1747 }
1748 else { // fill multiple streams
1749 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1750 if ( stream_.doConvertBuffer[0] ) {
1751 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1752 inBuffer = (Float32 *) stream_.deviceBuffer;
1753 }
1754
1755 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1756 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1757 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1758 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1759 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1760 }
1761 }
1762 else { // fill multiple multi-channel streams with interleaved data
1763 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1764 Float32 *out, *in;
1765
1766 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1767 UInt32 inChannels = stream_.nUserChannels[0];
1768 if ( stream_.doConvertBuffer[0] ) {
1769 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1770 inChannels = stream_.nDeviceChannels[0];
1771 }
1772
1773 if ( inInterleaved ) inOffset = 1;
1774 else inOffset = stream_.bufferSize;
1775
1776 channelsLeft = inChannels;
1777 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1778 in = inBuffer;
1779 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1780 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1781
1782 outJump = 0;
1783 // Account for possible channel offset in first stream
1784 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1785 streamChannels -= stream_.channelOffset[0];
1786 outJump = stream_.channelOffset[0];
1787 out += outJump;
1788 }
1789
1790 // Account for possible unfilled channels at end of the last stream
1791 if ( streamChannels > channelsLeft ) {
1792 outJump = streamChannels - channelsLeft;
1793 streamChannels = channelsLeft;
1794 }
1795
1796 // Determine input buffer offsets and skips
1797 if ( inInterleaved ) {
1798 inJump = inChannels;
1799 in += inChannels - channelsLeft;
1800 }
1801 else {
1802 inJump = 1;
1803 in += (inChannels - channelsLeft) * inOffset;
1804 }
1805
1806 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1807 for ( unsigned int j=0; j<streamChannels; j++ ) {
1808 *out++ = in[j*inOffset];
1809 }
1810 out += outJump;
1811 in += inJump;
1812 }
1813 channelsLeft -= streamChannels;
1814 }
1815 }
1816 }
1817 }
1818
1819 // Don't bother draining input
1820 if ( handle->drainCounter ) {
1821 handle->drainCounter++;
1822 goto unlock;
1823 }
1824
1825 AudioDeviceID inputDevice;
1826 inputDevice = handle->id[1];
1827 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1828
1829 if ( handle->nStreams[1] == 1 ) {
1830 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1831 convertBuffer( stream_.userBuffer[1],
1832 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1833 stream_.convertInfo[1] );
1834 }
1835 else { // copy to user buffer
1836 memcpy( stream_.userBuffer[1],
1837 inBufferList->mBuffers[handle->iStream[1]].mData,
1838 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1839 }
1840 }
1841 else { // read from multiple streams
1842 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1843 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1844
1845 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1846 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1847 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1848 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1849 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1850 }
1851 }
1852 else { // read from multiple multi-channel streams
1853 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1854 Float32 *out, *in;
1855
1856 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1857 UInt32 outChannels = stream_.nUserChannels[1];
1858 if ( stream_.doConvertBuffer[1] ) {
1859 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1860 outChannels = stream_.nDeviceChannels[1];
1861 }
1862
1863 if ( outInterleaved ) outOffset = 1;
1864 else outOffset = stream_.bufferSize;
1865
1866 channelsLeft = outChannels;
1867 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1868 out = outBuffer;
1869 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1870 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1871
1872 inJump = 0;
1873 // Account for possible channel offset in first stream
1874 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1875 streamChannels -= stream_.channelOffset[1];
1876 inJump = stream_.channelOffset[1];
1877 in += inJump;
1878 }
1879
1880 // Account for possible unread channels at end of the last stream
1881 if ( streamChannels > channelsLeft ) {
1882 inJump = streamChannels - channelsLeft;
1883 streamChannels = channelsLeft;
1884 }
1885
1886 // Determine output buffer offsets and skips
1887 if ( outInterleaved ) {
1888 outJump = outChannels;
1889 out += outChannels - channelsLeft;
1890 }
1891 else {
1892 outJump = 1;
1893 out += (outChannels - channelsLeft) * outOffset;
1894 }
1895
1896 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1897 for ( unsigned int j=0; j<streamChannels; j++ ) {
1898 out[j*outOffset] = *in++;
1899 }
1900 out += outJump;
1901 in += inJump;
1902 }
1903 channelsLeft -= streamChannels;
1904 }
1905 }
1906
1907 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1908 convertBuffer( stream_.userBuffer[1],
1909 stream_.deviceBuffer,
1910 stream_.convertInfo[1] );
1911 }
1912 }
1913 }
1914
1915 unlock:
1916 //MUTEX_UNLOCK( &stream_.mutex );
1917
1918 // Make sure to only tick duplex stream time once if using two devices
1919 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1920 RtApi::tickStreamTime();
1921
1922 return SUCCESS;
1923 }
1924
getErrorCode(OSStatus code)1925 const char* RtApiCore :: getErrorCode( OSStatus code )
1926 {
1927 switch( code ) {
1928
1929 case kAudioHardwareNotRunningError:
1930 return "kAudioHardwareNotRunningError";
1931
1932 case kAudioHardwareUnspecifiedError:
1933 return "kAudioHardwareUnspecifiedError";
1934
1935 case kAudioHardwareUnknownPropertyError:
1936 return "kAudioHardwareUnknownPropertyError";
1937
1938 case kAudioHardwareBadPropertySizeError:
1939 return "kAudioHardwareBadPropertySizeError";
1940
1941 case kAudioHardwareIllegalOperationError:
1942 return "kAudioHardwareIllegalOperationError";
1943
1944 case kAudioHardwareBadObjectError:
1945 return "kAudioHardwareBadObjectError";
1946
1947 case kAudioHardwareBadDeviceError:
1948 return "kAudioHardwareBadDeviceError";
1949
1950 case kAudioHardwareBadStreamError:
1951 return "kAudioHardwareBadStreamError";
1952
1953 case kAudioHardwareUnsupportedOperationError:
1954 return "kAudioHardwareUnsupportedOperationError";
1955
1956 case kAudioDeviceUnsupportedFormatError:
1957 return "kAudioDeviceUnsupportedFormatError";
1958
1959 case kAudioDevicePermissionsError:
1960 return "kAudioDevicePermissionsError";
1961
1962 default:
1963 return "CoreAudio unknown error";
1964 }
1965 }
1966
1967 //******************** End of __MACOSX_CORE__ *********************//
1968 #endif
1969
1970 #if defined(__UNIX_JACK__)
1971
1972 // JACK is a low-latency audio server, originally written for the
1973 // GNU/Linux operating system and now also ported to OS-X. It can
1974 // connect a number of different applications to an audio device, as
1975 // well as allowing them to share audio between themselves.
1976 //
1977 // When using JACK with RtAudio, "devices" refer to JACK clients that
1978 // have ports connected to the server. The JACK server is typically
1979 // started in a terminal as follows:
1980 //
1981 // .jackd -d alsa -d hw:0
1982 //
1983 // or through an interface program such as qjackctl. Many of the
1984 // parameters normally set for a stream are fixed by the JACK server
1985 // and can be specified when the JACK server is started. In
1986 // particular,
1987 //
1988 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1989 //
1990 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1991 // frames, and number of buffers = 4. Once the server is running, it
1992 // is not possible to override these values. If the values are not
1993 // specified in the command-line, the JACK server uses default values.
1994 //
1995 // The JACK server does not have to be running when an instance of
1996 // RtApiJack is created, though the function getDeviceCount() will
1997 // report 0 devices found until JACK has been started. When no
1998 // devices are available (i.e., the JACK server is not running), a
1999 // stream cannot be opened.
2000
2001 #include <jack/jack.h>
2002 #include <unistd.h>
2003 #include <cstdio>
2004
2005 // A structure to hold various information related to the Jack API
2006 // implementation.
2007 struct JackHandle {
2008 jack_client_t *client;
2009 jack_port_t **ports[2];
2010 std::string deviceName[2];
2011 bool xrun[2];
2012 pthread_cond_t condition;
2013 int drainCounter; // Tracks callback counts when draining
2014 bool internalDrain; // Indicates if stop is initiated from callback or not.
2015
JackHandleJackHandle2016 JackHandle()
2017 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2018 };
2019
2020 #if !defined(__RTAUDIO_DEBUG__)
jackSilentError(const char *)2021 static void jackSilentError( const char * ) {}
2022 #endif
2023
RtApiJack()2024 RtApiJack :: RtApiJack()
2025 :shouldAutoconnect_(true) {
2026 // Nothing to do here.
2027 #if !defined(__RTAUDIO_DEBUG__)
2028 // Turn off Jack's internal error reporting.
2029 jack_set_error_function( &jackSilentError );
2030 #endif
2031 }
2032
~RtApiJack()2033 RtApiJack :: ~RtApiJack()
2034 {
2035 if ( stream_.state != STREAM_CLOSED ) closeStream();
2036 }
2037
getDeviceCount(void)2038 unsigned int RtApiJack :: getDeviceCount( void )
2039 {
2040 // See if we can become a jack client.
2041 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2042 jack_status_t *status = NULL;
2043 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2044 if ( client == 0 ) return 0;
2045
2046 const char **ports;
2047 std::string port, previousPort;
2048 unsigned int nChannels = 0, nDevices = 0;
2049 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2050 if ( ports ) {
2051 // Parse the port names up to the first colon (:).
2052 size_t iColon = 0;
2053 do {
2054 port = (char *) ports[ nChannels ];
2055 iColon = port.find(":");
2056 if ( iColon != std::string::npos ) {
2057 port = port.substr( 0, iColon + 1 );
2058 if ( port != previousPort ) {
2059 nDevices++;
2060 previousPort = port;
2061 }
2062 }
2063 } while ( ports[++nChannels] );
2064 free( ports );
2065 }
2066
2067 jack_client_close( client );
2068 return nDevices;
2069 }
2070
getDeviceInfo(unsigned int device)2071 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2072 {
2073 RtAudio::DeviceInfo info;
2074 info.probed = false;
2075
2076 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2077 jack_status_t *status = NULL;
2078 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2079 if ( client == 0 ) {
2080 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2081 error( RtAudioError::WARNING );
2082 return info;
2083 }
2084
2085 const char **ports;
2086 std::string port, previousPort;
2087 unsigned int nPorts = 0, nDevices = 0;
2088 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2089 if ( ports ) {
2090 // Parse the port names up to the first colon (:).
2091 size_t iColon = 0;
2092 do {
2093 port = (char *) ports[ nPorts ];
2094 iColon = port.find(":");
2095 if ( iColon != std::string::npos ) {
2096 port = port.substr( 0, iColon );
2097 if ( port != previousPort ) {
2098 if ( nDevices == device ) info.name = port;
2099 nDevices++;
2100 previousPort = port;
2101 }
2102 }
2103 } while ( ports[++nPorts] );
2104 free( ports );
2105 }
2106
2107 if ( device >= nDevices ) {
2108 jack_client_close( client );
2109 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2110 error( RtAudioError::INVALID_USE );
2111 return info;
2112 }
2113
2114 // Get the current jack server sample rate.
2115 info.sampleRates.clear();
2116
2117 info.preferredSampleRate = jack_get_sample_rate( client );
2118 info.sampleRates.push_back( info.preferredSampleRate );
2119
2120 // Count the available ports containing the client name as device
2121 // channels. Jack "input ports" equal RtAudio output channels.
2122 unsigned int nChannels = 0;
2123 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2124 if ( ports ) {
2125 while ( ports[ nChannels ] ) nChannels++;
2126 free( ports );
2127 info.outputChannels = nChannels;
2128 }
2129
2130 // Jack "output ports" equal RtAudio input channels.
2131 nChannels = 0;
2132 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2133 if ( ports ) {
2134 while ( ports[ nChannels ] ) nChannels++;
2135 free( ports );
2136 info.inputChannels = nChannels;
2137 }
2138
2139 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2140 jack_client_close(client);
2141 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2142 error( RtAudioError::WARNING );
2143 return info;
2144 }
2145
2146 // If device opens for both playback and capture, we determine the channels.
2147 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2148 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2149
2150 // Jack always uses 32-bit floats.
2151 info.nativeFormats = RTAUDIO_FLOAT32;
2152
2153 // Jack doesn't provide default devices so we'll use the first available one.
2154 if ( device == 0 && info.outputChannels > 0 )
2155 info.isDefaultOutput = true;
2156 if ( device == 0 && info.inputChannels > 0 )
2157 info.isDefaultInput = true;
2158
2159 jack_client_close(client);
2160 info.probed = true;
2161 return info;
2162 }
2163
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2164 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2165 {
2166 CallbackInfo *info = (CallbackInfo *) infoPointer;
2167
2168 RtApiJack *object = (RtApiJack *) info->object;
2169 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2170
2171 return 0;
2172 }
2173
2174 // This function will be called by a spawned thread when the Jack
2175 // server signals that it is shutting down. It is necessary to handle
2176 // it this way because the jackShutdown() function must return before
2177 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2178 static void *jackCloseStream( void *ptr )
2179 {
2180 CallbackInfo *info = (CallbackInfo *) ptr;
2181 RtApiJack *object = (RtApiJack *) info->object;
2182
2183 object->closeStream();
2184
2185 pthread_exit( NULL );
2186 }
jackShutdown(void * infoPointer)2187 static void jackShutdown( void *infoPointer )
2188 {
2189 CallbackInfo *info = (CallbackInfo *) infoPointer;
2190 RtApiJack *object = (RtApiJack *) info->object;
2191
2192 // Check current stream state. If stopped, then we'll assume this
2193 // was called as a result of a call to RtApiJack::stopStream (the
2194 // deactivation of a client handle causes this function to be called).
2195 // If not, we'll assume the Jack server is shutting down or some
2196 // other problem occurred and we should close the stream.
2197 if ( object->isStreamRunning() == false ) return;
2198
2199 ThreadHandle threadId;
2200 pthread_create( &threadId, NULL, jackCloseStream, info );
2201 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2202 }
2203
jackXrun(void * infoPointer)2204 static int jackXrun( void *infoPointer )
2205 {
2206 JackHandle *handle = *((JackHandle **) infoPointer);
2207
2208 if ( handle->ports[0] ) handle->xrun[0] = true;
2209 if ( handle->ports[1] ) handle->xrun[1] = true;
2210
2211 return 0;
2212 }
2213
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2214 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2215 unsigned int firstChannel, unsigned int sampleRate,
2216 RtAudioFormat format, unsigned int *bufferSize,
2217 RtAudio::StreamOptions *options )
2218 {
2219 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2220
2221 // Look for jack server and try to become a client (only do once per stream).
2222 jack_client_t *client = 0;
2223 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2224 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2225 jack_status_t *status = NULL;
2226 if ( options && !options->streamName.empty() )
2227 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2228 else
2229 client = jack_client_open( "RtApiJack", jackoptions, status );
2230 if ( client == 0 ) {
2231 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2232 error( RtAudioError::WARNING );
2233 return FAILURE;
2234 }
2235 }
2236 else {
2237 // The handle must have been created on an earlier pass.
2238 client = handle->client;
2239 }
2240
2241 const char **ports;
2242 std::string port, previousPort, deviceName;
2243 unsigned int nPorts = 0, nDevices = 0;
2244 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2245 if ( ports ) {
2246 // Parse the port names up to the first colon (:).
2247 size_t iColon = 0;
2248 do {
2249 port = (char *) ports[ nPorts ];
2250 iColon = port.find(":");
2251 if ( iColon != std::string::npos ) {
2252 port = port.substr( 0, iColon );
2253 if ( port != previousPort ) {
2254 if ( nDevices == device ) deviceName = port;
2255 nDevices++;
2256 previousPort = port;
2257 }
2258 }
2259 } while ( ports[++nPorts] );
2260 free( ports );
2261 }
2262
2263 if ( device >= nDevices ) {
2264 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2265 return FAILURE;
2266 }
2267
2268 unsigned long flag = JackPortIsInput;
2269 if ( mode == INPUT ) flag = JackPortIsOutput;
2270
2271 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2272 // Count the available ports containing the client name as device
2273 // channels. Jack "input ports" equal RtAudio output channels.
2274 unsigned int nChannels = 0;
2275 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2276 if ( ports ) {
2277 while ( ports[ nChannels ] ) nChannels++;
2278 free( ports );
2279 }
2280 // Compare the jack ports for specified client to the requested number of channels.
2281 if ( nChannels < (channels + firstChannel) ) {
2282 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2283 errorText_ = errorStream_.str();
2284 return FAILURE;
2285 }
2286 }
2287
2288 // Check the jack server sample rate.
2289 unsigned int jackRate = jack_get_sample_rate( client );
2290 if ( sampleRate != jackRate ) {
2291 jack_client_close( client );
2292 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2293 errorText_ = errorStream_.str();
2294 return FAILURE;
2295 }
2296 stream_.sampleRate = jackRate;
2297
2298 // Get the latency of the JACK port.
2299 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2300 if ( ports[ firstChannel ] ) {
2301 // Added by Ge Wang
2302 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2303 // the range (usually the min and max are equal)
2304 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2305 // get the latency range
2306 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2307 // be optimistic, use the min!
2308 stream_.latency[mode] = latrange.min;
2309 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2310 }
2311 free( ports );
2312
2313 // The jack server always uses 32-bit floating-point data.
2314 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2315 stream_.userFormat = format;
2316
2317 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2318 else stream_.userInterleaved = true;
2319
2320 // Jack always uses non-interleaved buffers.
2321 stream_.deviceInterleaved[mode] = false;
2322
2323 // Jack always provides host byte-ordered data.
2324 stream_.doByteSwap[mode] = false;
2325
2326 // Get the buffer size. The buffer size and number of buffers
2327 // (periods) is set when the jack server is started.
2328 stream_.bufferSize = (int) jack_get_buffer_size( client );
2329 *bufferSize = stream_.bufferSize;
2330
2331 stream_.nDeviceChannels[mode] = channels;
2332 stream_.nUserChannels[mode] = channels;
2333
2334 // Set flags for buffer conversion.
2335 stream_.doConvertBuffer[mode] = false;
2336 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2337 stream_.doConvertBuffer[mode] = true;
2338 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2339 stream_.nUserChannels[mode] > 1 )
2340 stream_.doConvertBuffer[mode] = true;
2341
2342 // Allocate our JackHandle structure for the stream.
2343 if ( handle == 0 ) {
2344 try {
2345 handle = new JackHandle;
2346 }
2347 catch ( std::bad_alloc& ) {
2348 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2349 goto error;
2350 }
2351
2352 if ( pthread_cond_init(&handle->condition, NULL) ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2354 goto error;
2355 }
2356 stream_.apiHandle = (void *) handle;
2357 handle->client = client;
2358 }
2359 handle->deviceName[mode] = deviceName;
2360
2361 // Allocate necessary internal buffers.
2362 unsigned long bufferBytes;
2363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2364 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2365 if ( stream_.userBuffer[mode] == NULL ) {
2366 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2367 goto error;
2368 }
2369
2370 if ( stream_.doConvertBuffer[mode] ) {
2371
2372 bool makeBuffer = true;
2373 if ( mode == OUTPUT )
2374 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2375 else { // mode == INPUT
2376 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2377 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2378 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2379 if ( bufferBytes < bytesOut ) makeBuffer = false;
2380 }
2381 }
2382
2383 if ( makeBuffer ) {
2384 bufferBytes *= *bufferSize;
2385 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2386 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2387 if ( stream_.deviceBuffer == NULL ) {
2388 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2389 goto error;
2390 }
2391 }
2392 }
2393
2394 // Allocate memory for the Jack ports (channels) identifiers.
2395 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2396 if ( handle->ports[mode] == NULL ) {
2397 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2398 goto error;
2399 }
2400
2401 stream_.device[mode] = device;
2402 stream_.channelOffset[mode] = firstChannel;
2403 stream_.state = STREAM_STOPPED;
2404 stream_.callbackInfo.object = (void *) this;
2405
2406 if ( stream_.mode == OUTPUT && mode == INPUT )
2407 // We had already set up the stream for output.
2408 stream_.mode = DUPLEX;
2409 else {
2410 stream_.mode = mode;
2411 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2412 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2413 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2414 }
2415
2416 // Register our ports.
2417 char label[64];
2418 if ( mode == OUTPUT ) {
2419 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2420 snprintf( label, 64, "outport %d", i );
2421 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2422 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2423 }
2424 }
2425 else {
2426 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2427 snprintf( label, 64, "inport %d", i );
2428 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2429 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2430 }
2431 }
2432
2433 // Setup the buffer conversion information structure. We don't use
2434 // buffers to do channel offsets, so we override that parameter
2435 // here.
2436 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2437
2438 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2439
2440 return SUCCESS;
2441
2442 error:
2443 if ( handle ) {
2444 pthread_cond_destroy( &handle->condition );
2445 jack_client_close( handle->client );
2446
2447 if ( handle->ports[0] ) free( handle->ports[0] );
2448 if ( handle->ports[1] ) free( handle->ports[1] );
2449
2450 delete handle;
2451 stream_.apiHandle = 0;
2452 }
2453
2454 for ( int i=0; i<2; i++ ) {
2455 if ( stream_.userBuffer[i] ) {
2456 free( stream_.userBuffer[i] );
2457 stream_.userBuffer[i] = 0;
2458 }
2459 }
2460
2461 if ( stream_.deviceBuffer ) {
2462 free( stream_.deviceBuffer );
2463 stream_.deviceBuffer = 0;
2464 }
2465
2466 return FAILURE;
2467 }
2468
closeStream(void)2469 void RtApiJack :: closeStream( void )
2470 {
2471 if ( stream_.state == STREAM_CLOSED ) {
2472 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2473 error( RtAudioError::WARNING );
2474 return;
2475 }
2476
2477 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2478 if ( handle ) {
2479
2480 if ( stream_.state == STREAM_RUNNING )
2481 jack_deactivate( handle->client );
2482
2483 jack_client_close( handle->client );
2484 }
2485
2486 if ( handle ) {
2487 if ( handle->ports[0] ) free( handle->ports[0] );
2488 if ( handle->ports[1] ) free( handle->ports[1] );
2489 pthread_cond_destroy( &handle->condition );
2490 delete handle;
2491 stream_.apiHandle = 0;
2492 }
2493
2494 for ( int i=0; i<2; i++ ) {
2495 if ( stream_.userBuffer[i] ) {
2496 free( stream_.userBuffer[i] );
2497 stream_.userBuffer[i] = 0;
2498 }
2499 }
2500
2501 if ( stream_.deviceBuffer ) {
2502 free( stream_.deviceBuffer );
2503 stream_.deviceBuffer = 0;
2504 }
2505
2506 stream_.mode = UNINITIALIZED;
2507 stream_.state = STREAM_CLOSED;
2508 }
2509
startStream(void)2510 void RtApiJack :: startStream( void )
2511 {
2512 verifyStream();
2513 if ( stream_.state == STREAM_RUNNING ) {
2514 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2515 error( RtAudioError::WARNING );
2516 return;
2517 }
2518
2519 #if defined( HAVE_GETTIMEOFDAY )
2520 gettimeofday( &stream_.lastTickTimestamp, NULL );
2521 #endif
2522
2523 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2524 int result = jack_activate( handle->client );
2525 if ( result ) {
2526 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2527 goto unlock;
2528 }
2529
2530 const char **ports;
2531
2532 // Get the list of available ports.
2533 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2534 result = 1;
2535 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2536 if ( ports == NULL) {
2537 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2538 goto unlock;
2539 }
2540
2541 // Now make the port connections. Since RtAudio wasn't designed to
2542 // allow the user to select particular channels of a device, we'll
2543 // just open the first "nChannels" ports with offset.
2544 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2545 result = 1;
2546 if ( ports[ stream_.channelOffset[0] + i ] )
2547 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2548 if ( result ) {
2549 free( ports );
2550 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2551 goto unlock;
2552 }
2553 }
2554 free(ports);
2555 }
2556
2557 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2558 result = 1;
2559 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2560 if ( ports == NULL) {
2561 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2562 goto unlock;
2563 }
2564
2565 // Now make the port connections. See note above.
2566 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2567 result = 1;
2568 if ( ports[ stream_.channelOffset[1] + i ] )
2569 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2570 if ( result ) {
2571 free( ports );
2572 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2573 goto unlock;
2574 }
2575 }
2576 free(ports);
2577 }
2578
2579 handle->drainCounter = 0;
2580 handle->internalDrain = false;
2581 stream_.state = STREAM_RUNNING;
2582
2583 unlock:
2584 if ( result == 0 ) return;
2585 error( RtAudioError::SYSTEM_ERROR );
2586 }
2587
stopStream(void)2588 void RtApiJack :: stopStream( void )
2589 {
2590 verifyStream();
2591 if ( stream_.state == STREAM_STOPPED ) {
2592 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2593 error( RtAudioError::WARNING );
2594 return;
2595 }
2596
2597 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2598 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2599
2600 if ( handle->drainCounter == 0 ) {
2601 handle->drainCounter = 2;
2602 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2603 }
2604 }
2605
2606 jack_deactivate( handle->client );
2607 stream_.state = STREAM_STOPPED;
2608 }
2609
abortStream(void)2610 void RtApiJack :: abortStream( void )
2611 {
2612 verifyStream();
2613 if ( stream_.state == STREAM_STOPPED ) {
2614 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2615 error( RtAudioError::WARNING );
2616 return;
2617 }
2618
2619 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2620 handle->drainCounter = 2;
2621
2622 stopStream();
2623 }
2624
2625 // This function will be called by a spawned thread when the user
2626 // callback function signals that the stream should be stopped or
2627 // aborted. It is necessary to handle it this way because the
2628 // callbackEvent() function must return before the jack_deactivate()
2629 // function will return.
jackStopStream(void * ptr)2630 static void *jackStopStream( void *ptr )
2631 {
2632 CallbackInfo *info = (CallbackInfo *) ptr;
2633 RtApiJack *object = (RtApiJack *) info->object;
2634
2635 object->stopStream();
2636 pthread_exit( NULL );
2637 }
2638
callbackEvent(unsigned long nframes)2639 bool RtApiJack :: callbackEvent( unsigned long nframes )
2640 {
2641 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2642 if ( stream_.state == STREAM_CLOSED ) {
2643 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2644 error( RtAudioError::WARNING );
2645 return FAILURE;
2646 }
2647 if ( stream_.bufferSize != nframes ) {
2648 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2649 error( RtAudioError::WARNING );
2650 return FAILURE;
2651 }
2652
2653 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2654 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2655
2656 // Check if we were draining the stream and signal is finished.
2657 if ( handle->drainCounter > 3 ) {
2658 ThreadHandle threadId;
2659
2660 stream_.state = STREAM_STOPPING;
2661 if ( handle->internalDrain == true )
2662 pthread_create( &threadId, NULL, jackStopStream, info );
2663 else
2664 pthread_cond_signal( &handle->condition );
2665 return SUCCESS;
2666 }
2667
2668 // Invoke user callback first, to get fresh output data.
2669 if ( handle->drainCounter == 0 ) {
2670 RtAudioCallback callback = (RtAudioCallback) info->callback;
2671 double streamTime = getStreamTime();
2672 RtAudioStreamStatus status = 0;
2673 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2674 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2675 handle->xrun[0] = false;
2676 }
2677 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2678 status |= RTAUDIO_INPUT_OVERFLOW;
2679 handle->xrun[1] = false;
2680 }
2681 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2682 stream_.bufferSize, streamTime, status, info->userData );
2683 if ( cbReturnValue == 2 ) {
2684 stream_.state = STREAM_STOPPING;
2685 handle->drainCounter = 2;
2686 ThreadHandle id;
2687 pthread_create( &id, NULL, jackStopStream, info );
2688 return SUCCESS;
2689 }
2690 else if ( cbReturnValue == 1 ) {
2691 handle->drainCounter = 1;
2692 handle->internalDrain = true;
2693 }
2694 }
2695
2696 jack_default_audio_sample_t *jackbuffer;
2697 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2698 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2699
2700 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2701
2702 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2703 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2704 memset( jackbuffer, 0, bufferBytes );
2705 }
2706
2707 }
2708 else if ( stream_.doConvertBuffer[0] ) {
2709
2710 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2711
2712 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2713 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2714 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2715 }
2716 }
2717 else { // no buffer conversion
2718 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2719 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2720 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2721 }
2722 }
2723 }
2724
2725 // Don't bother draining input
2726 if ( handle->drainCounter ) {
2727 handle->drainCounter++;
2728 goto unlock;
2729 }
2730
2731 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2732
2733 if ( stream_.doConvertBuffer[1] ) {
2734 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2735 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2736 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2737 }
2738 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2739 }
2740 else { // no buffer conversion
2741 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2742 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2743 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2744 }
2745 }
2746 }
2747
2748 unlock:
2749 RtApi::tickStreamTime();
2750 return SUCCESS;
2751 }
2752 //******************** End of __UNIX_JACK__ *********************//
2753 #endif
2754
2755 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2756
2757 // The ASIO API is designed around a callback scheme, so this
2758 // implementation is similar to that used for OS-X CoreAudio and Linux
2759 // Jack. The primary constraint with ASIO is that it only allows
2760 // access to a single driver at a time. Thus, it is not possible to
2761 // have more than one simultaneous RtAudio stream.
2762 //
2763 // This implementation also requires a number of external ASIO files
2764 // and a few global variables. The ASIO callback scheme does not
2765 // allow for the passing of user data, so we must create a global
2766 // pointer to our callbackInfo structure.
2767 //
2768 // On unix systems, we make use of a pthread condition variable.
2769 // Since there is no equivalent in Windows, I hacked something based
2770 // on information found in
2771 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2772
2773 #include "asiosys.h"
2774 #include "asio.h"
2775 #include "iasiothiscallresolver.h"
2776 #include "asiodrivers.h"
2777 #include <cmath>
2778
2779 static AsioDrivers drivers;
2780 static ASIOCallbacks asioCallbacks;
2781 static ASIODriverInfo driverInfo;
2782 static CallbackInfo *asioCallbackInfo;
2783 static bool asioXRun;
2784
2785 struct AsioHandle {
2786 int drainCounter; // Tracks callback counts when draining
2787 bool internalDrain; // Indicates if stop is initiated from callback or not.
2788 ASIOBufferInfo *bufferInfos;
2789 HANDLE condition;
2790
AsioHandleAsioHandle2791 AsioHandle()
2792 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2793 };
2794
2795 // Function declarations (definitions at end of section)
2796 static const char* getAsioErrorString( ASIOError result );
2797 static void sampleRateChanged( ASIOSampleRate sRate );
2798 static long asioMessages( long selector, long value, void* message, double* opt );
2799
RtApiAsio()2800 RtApiAsio :: RtApiAsio()
2801 {
2802 // ASIO cannot run on a multi-threaded appartment. You can call
2803 // CoInitialize beforehand, but it must be for appartment threading
2804 // (in which case, CoInitilialize will return S_FALSE here).
2805 coInitialized_ = false;
2806 HRESULT hr = CoInitialize( NULL );
2807 if ( FAILED(hr) ) {
2808 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2809 error( RtAudioError::WARNING );
2810 }
2811 coInitialized_ = true;
2812
2813 drivers.removeCurrentDriver();
2814 driverInfo.asioVersion = 2;
2815
2816 // See note in DirectSound implementation about GetDesktopWindow().
2817 driverInfo.sysRef = GetForegroundWindow();
2818 }
2819
~RtApiAsio()2820 RtApiAsio :: ~RtApiAsio()
2821 {
2822 if ( stream_.state != STREAM_CLOSED ) closeStream();
2823 if ( coInitialized_ ) CoUninitialize();
2824 }
2825
getDeviceCount(void)2826 unsigned int RtApiAsio :: getDeviceCount( void )
2827 {
2828 return (unsigned int) drivers.asioGetNumDev();
2829 }
2830
getDeviceInfo(unsigned int device)2831 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2832 {
2833 RtAudio::DeviceInfo info;
2834 info.probed = false;
2835
2836 // Get device ID
2837 unsigned int nDevices = getDeviceCount();
2838 if ( nDevices == 0 ) {
2839 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2840 error( RtAudioError::INVALID_USE );
2841 return info;
2842 }
2843
2844 if ( device >= nDevices ) {
2845 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2846 error( RtAudioError::INVALID_USE );
2847 return info;
2848 }
2849
2850 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2851 if ( stream_.state != STREAM_CLOSED ) {
2852 if ( device >= devices_.size() ) {
2853 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2854 error( RtAudioError::WARNING );
2855 return info;
2856 }
2857 return devices_[ device ];
2858 }
2859
2860 char driverName[32];
2861 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2862 if ( result != ASE_OK ) {
2863 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2864 errorText_ = errorStream_.str();
2865 error( RtAudioError::WARNING );
2866 return info;
2867 }
2868
2869 info.name = driverName;
2870
2871 if ( !drivers.loadDriver( driverName ) ) {
2872 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2873 errorText_ = errorStream_.str();
2874 error( RtAudioError::WARNING );
2875 return info;
2876 }
2877
2878 result = ASIOInit( &driverInfo );
2879 if ( result != ASE_OK ) {
2880 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2881 errorText_ = errorStream_.str();
2882 error( RtAudioError::WARNING );
2883 return info;
2884 }
2885
2886 // Determine the device channel information.
2887 long inputChannels, outputChannels;
2888 result = ASIOGetChannels( &inputChannels, &outputChannels );
2889 if ( result != ASE_OK ) {
2890 drivers.removeCurrentDriver();
2891 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2892 errorText_ = errorStream_.str();
2893 error( RtAudioError::WARNING );
2894 return info;
2895 }
2896
2897 info.outputChannels = outputChannels;
2898 info.inputChannels = inputChannels;
2899 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2900 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2901
2902 // Determine the supported sample rates.
2903 info.sampleRates.clear();
2904 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2905 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2906 if ( result == ASE_OK ) {
2907 info.sampleRates.push_back( SAMPLE_RATES[i] );
2908
2909 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2910 info.preferredSampleRate = SAMPLE_RATES[i];
2911 }
2912 }
2913
2914 // Determine supported data types ... just check first channel and assume rest are the same.
2915 ASIOChannelInfo channelInfo;
2916 channelInfo.channel = 0;
2917 channelInfo.isInput = true;
2918 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2919 result = ASIOGetChannelInfo( &channelInfo );
2920 if ( result != ASE_OK ) {
2921 drivers.removeCurrentDriver();
2922 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2923 errorText_ = errorStream_.str();
2924 error( RtAudioError::WARNING );
2925 return info;
2926 }
2927
2928 info.nativeFormats = 0;
2929 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2930 info.nativeFormats |= RTAUDIO_SINT16;
2931 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2932 info.nativeFormats |= RTAUDIO_SINT32;
2933 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2934 info.nativeFormats |= RTAUDIO_FLOAT32;
2935 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2936 info.nativeFormats |= RTAUDIO_FLOAT64;
2937 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2938 info.nativeFormats |= RTAUDIO_SINT24;
2939
2940 if ( info.outputChannels > 0 )
2941 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2942 if ( info.inputChannels > 0 )
2943 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2944
2945 info.probed = true;
2946 drivers.removeCurrentDriver();
2947 return info;
2948 }
2949
bufferSwitch(long index,ASIOBool)2950 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2951 {
2952 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2953 object->callbackEvent( index );
2954 }
2955
saveDeviceInfo(void)2956 void RtApiAsio :: saveDeviceInfo( void )
2957 {
2958 devices_.clear();
2959
2960 unsigned int nDevices = getDeviceCount();
2961 devices_.resize( nDevices );
2962 for ( unsigned int i=0; i<nDevices; i++ )
2963 devices_[i] = getDeviceInfo( i );
2964 }
2965
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2966 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2967 unsigned int firstChannel, unsigned int sampleRate,
2968 RtAudioFormat format, unsigned int *bufferSize,
2969 RtAudio::StreamOptions *options )
2970 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2971
2972 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2973
2974 // For ASIO, a duplex stream MUST use the same driver.
2975 if ( isDuplexInput && stream_.device[0] != device ) {
2976 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2977 return FAILURE;
2978 }
2979
2980 char driverName[32];
2981 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2982 if ( result != ASE_OK ) {
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2984 errorText_ = errorStream_.str();
2985 return FAILURE;
2986 }
2987
2988 // Only load the driver once for duplex stream.
2989 if ( !isDuplexInput ) {
2990 // The getDeviceInfo() function will not work when a stream is open
2991 // because ASIO does not allow multiple devices to run at the same
2992 // time. Thus, we'll probe the system before opening a stream and
2993 // save the results for use by getDeviceInfo().
2994 this->saveDeviceInfo();
2995
2996 if ( !drivers.loadDriver( driverName ) ) {
2997 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2998 errorText_ = errorStream_.str();
2999 return FAILURE;
3000 }
3001
3002 result = ASIOInit( &driverInfo );
3003 if ( result != ASE_OK ) {
3004 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3005 errorText_ = errorStream_.str();
3006 return FAILURE;
3007 }
3008 }
3009
3010 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3011 bool buffersAllocated = false;
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3013 unsigned int nChannels;
3014
3015
3016 // Check the device channel count.
3017 long inputChannels, outputChannels;
3018 result = ASIOGetChannels( &inputChannels, &outputChannels );
3019 if ( result != ASE_OK ) {
3020 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3021 errorText_ = errorStream_.str();
3022 goto error;
3023 }
3024
3025 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3026 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3027 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3028 errorText_ = errorStream_.str();
3029 goto error;
3030 }
3031 stream_.nDeviceChannels[mode] = channels;
3032 stream_.nUserChannels[mode] = channels;
3033 stream_.channelOffset[mode] = firstChannel;
3034
3035 // Verify the sample rate is supported.
3036 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3037 if ( result != ASE_OK ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3039 errorText_ = errorStream_.str();
3040 goto error;
3041 }
3042
3043 // Get the current sample rate
3044 ASIOSampleRate currentRate;
3045 result = ASIOGetSampleRate( ¤tRate );
3046 if ( result != ASE_OK ) {
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3048 errorText_ = errorStream_.str();
3049 goto error;
3050 }
3051
3052 // Set the sample rate only if necessary
3053 if ( currentRate != sampleRate ) {
3054 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3057 errorText_ = errorStream_.str();
3058 goto error;
3059 }
3060 }
3061
3062 // Determine the driver data type.
3063 ASIOChannelInfo channelInfo;
3064 channelInfo.channel = 0;
3065 if ( mode == OUTPUT ) channelInfo.isInput = false;
3066 else channelInfo.isInput = true;
3067 result = ASIOGetChannelInfo( &channelInfo );
3068 if ( result != ASE_OK ) {
3069 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3070 errorText_ = errorStream_.str();
3071 goto error;
3072 }
3073
3074 // Assuming WINDOWS host is always little-endian.
3075 stream_.doByteSwap[mode] = false;
3076 stream_.userFormat = format;
3077 stream_.deviceFormat[mode] = 0;
3078 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3079 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3080 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3081 }
3082 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3083 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3084 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3085 }
3086 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3087 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3088 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3089 }
3090 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3091 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3092 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3093 }
3094 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3095 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3096 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3097 }
3098
3099 if ( stream_.deviceFormat[mode] == 0 ) {
3100 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3101 errorText_ = errorStream_.str();
3102 goto error;
3103 }
3104
3105 // Set the buffer size. For a duplex stream, this will end up
3106 // setting the buffer size based on the input constraints, which
3107 // should be ok.
3108 long minSize, maxSize, preferSize, granularity;
3109 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3110 if ( result != ASE_OK ) {
3111 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3112 errorText_ = errorStream_.str();
3113 goto error;
3114 }
3115
3116 if ( isDuplexInput ) {
3117 // When this is the duplex input (output was opened before), then we have to use the same
3118 // buffersize as the output, because it might use the preferred buffer size, which most
3119 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3120 // So instead of throwing an error, make them equal. The caller uses the reference
3121 // to the "bufferSize" param as usual to set up processing buffers.
3122
3123 *bufferSize = stream_.bufferSize;
3124
3125 } else {
3126 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3127 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3128 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3129 else if ( granularity == -1 ) {
3130 // Make sure bufferSize is a power of two.
3131 int log2_of_min_size = 0;
3132 int log2_of_max_size = 0;
3133
3134 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3135 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3136 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3137 }
3138
3139 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3140 int min_delta_num = log2_of_min_size;
3141
3142 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3143 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3144 if (current_delta < min_delta) {
3145 min_delta = current_delta;
3146 min_delta_num = i;
3147 }
3148 }
3149
3150 *bufferSize = ( (unsigned int)1 << min_delta_num );
3151 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3152 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3153 }
3154 else if ( granularity != 0 ) {
3155 // Set to an even multiple of granularity, rounding up.
3156 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3157 }
3158 }
3159
3160 /*
3161 // we don't use it anymore, see above!
3162 // Just left it here for the case...
3163 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3164 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3165 goto error;
3166 }
3167 */
3168
3169 stream_.bufferSize = *bufferSize;
3170 stream_.nBuffers = 2;
3171
3172 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3173 else stream_.userInterleaved = true;
3174
3175 // ASIO always uses non-interleaved buffers.
3176 stream_.deviceInterleaved[mode] = false;
3177
3178 // Allocate, if necessary, our AsioHandle structure for the stream.
3179 if ( handle == 0 ) {
3180 try {
3181 handle = new AsioHandle;
3182 }
3183 catch ( std::bad_alloc& ) {
3184 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3185 goto error;
3186 }
3187 handle->bufferInfos = 0;
3188
3189 // Create a manual-reset event.
3190 handle->condition = CreateEvent( NULL, // no security
3191 TRUE, // manual-reset
3192 FALSE, // non-signaled initially
3193 NULL ); // unnamed
3194 stream_.apiHandle = (void *) handle;
3195 }
3196
3197 // Create the ASIO internal buffers. Since RtAudio sets up input
3198 // and output separately, we'll have to dispose of previously
3199 // created output buffers for a duplex stream.
3200 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3201 ASIODisposeBuffers();
3202 if ( handle->bufferInfos ) free( handle->bufferInfos );
3203 }
3204
3205 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3206 unsigned int i;
3207 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3208 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3209 if ( handle->bufferInfos == NULL ) {
3210 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3211 errorText_ = errorStream_.str();
3212 goto error;
3213 }
3214
3215 ASIOBufferInfo *infos;
3216 infos = handle->bufferInfos;
3217 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3218 infos->isInput = ASIOFalse;
3219 infos->channelNum = i + stream_.channelOffset[0];
3220 infos->buffers[0] = infos->buffers[1] = 0;
3221 }
3222 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3223 infos->isInput = ASIOTrue;
3224 infos->channelNum = i + stream_.channelOffset[1];
3225 infos->buffers[0] = infos->buffers[1] = 0;
3226 }
3227
3228 // prepare for callbacks
3229 stream_.sampleRate = sampleRate;
3230 stream_.device[mode] = device;
3231 stream_.mode = isDuplexInput ? DUPLEX : mode;
3232
3233 // store this class instance before registering callbacks, that are going to use it
3234 asioCallbackInfo = &stream_.callbackInfo;
3235 stream_.callbackInfo.object = (void *) this;
3236
3237 // Set up the ASIO callback structure and create the ASIO data buffers.
3238 asioCallbacks.bufferSwitch = &bufferSwitch;
3239 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3240 asioCallbacks.asioMessage = &asioMessages;
3241 asioCallbacks.bufferSwitchTimeInfo = NULL;
3242 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3243 if ( result != ASE_OK ) {
3244 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3245 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3246 // In that case, let's be naïve and try that instead.
3247 *bufferSize = preferSize;
3248 stream_.bufferSize = *bufferSize;
3249 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3250 }
3251
3252 if ( result != ASE_OK ) {
3253 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3254 errorText_ = errorStream_.str();
3255 goto error;
3256 }
3257 buffersAllocated = true;
3258 stream_.state = STREAM_STOPPED;
3259
3260 // Set flags for buffer conversion.
3261 stream_.doConvertBuffer[mode] = false;
3262 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3263 stream_.doConvertBuffer[mode] = true;
3264 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3265 stream_.nUserChannels[mode] > 1 )
3266 stream_.doConvertBuffer[mode] = true;
3267
3268 // Allocate necessary internal buffers
3269 unsigned long bufferBytes;
3270 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3271 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3272 if ( stream_.userBuffer[mode] == NULL ) {
3273 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3274 goto error;
3275 }
3276
3277 if ( stream_.doConvertBuffer[mode] ) {
3278
3279 bool makeBuffer = true;
3280 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3281 if ( isDuplexInput && stream_.deviceBuffer ) {
3282 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3283 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3284 }
3285
3286 if ( makeBuffer ) {
3287 bufferBytes *= *bufferSize;
3288 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3289 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3290 if ( stream_.deviceBuffer == NULL ) {
3291 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3292 goto error;
3293 }
3294 }
3295 }
3296
3297 // Determine device latencies
3298 long inputLatency, outputLatency;
3299 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3300 if ( result != ASE_OK ) {
3301 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3302 errorText_ = errorStream_.str();
3303 error( RtAudioError::WARNING); // warn but don't fail
3304 }
3305 else {
3306 stream_.latency[0] = outputLatency;
3307 stream_.latency[1] = inputLatency;
3308 }
3309
3310 // Setup the buffer conversion information structure. We don't use
3311 // buffers to do channel offsets, so we override that parameter
3312 // here.
3313 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3314
3315 return SUCCESS;
3316
3317 error:
3318 if ( !isDuplexInput ) {
3319 // the cleanup for error in the duplex input, is done by RtApi::openStream
3320 // So we clean up for single channel only
3321
3322 if ( buffersAllocated )
3323 ASIODisposeBuffers();
3324
3325 drivers.removeCurrentDriver();
3326
3327 if ( handle ) {
3328 CloseHandle( handle->condition );
3329 if ( handle->bufferInfos )
3330 free( handle->bufferInfos );
3331
3332 delete handle;
3333 stream_.apiHandle = 0;
3334 }
3335
3336
3337 if ( stream_.userBuffer[mode] ) {
3338 free( stream_.userBuffer[mode] );
3339 stream_.userBuffer[mode] = 0;
3340 }
3341
3342 if ( stream_.deviceBuffer ) {
3343 free( stream_.deviceBuffer );
3344 stream_.deviceBuffer = 0;
3345 }
3346 }
3347
3348 return FAILURE;
3349 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3350
closeStream()3351 void RtApiAsio :: closeStream()
3352 {
3353 if ( stream_.state == STREAM_CLOSED ) {
3354 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3355 error( RtAudioError::WARNING );
3356 return;
3357 }
3358
3359 if ( stream_.state == STREAM_RUNNING ) {
3360 stream_.state = STREAM_STOPPED;
3361 ASIOStop();
3362 }
3363 ASIODisposeBuffers();
3364 drivers.removeCurrentDriver();
3365
3366 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3367 if ( handle ) {
3368 CloseHandle( handle->condition );
3369 if ( handle->bufferInfos )
3370 free( handle->bufferInfos );
3371 delete handle;
3372 stream_.apiHandle = 0;
3373 }
3374
3375 for ( int i=0; i<2; i++ ) {
3376 if ( stream_.userBuffer[i] ) {
3377 free( stream_.userBuffer[i] );
3378 stream_.userBuffer[i] = 0;
3379 }
3380 }
3381
3382 if ( stream_.deviceBuffer ) {
3383 free( stream_.deviceBuffer );
3384 stream_.deviceBuffer = 0;
3385 }
3386
3387 stream_.mode = UNINITIALIZED;
3388 stream_.state = STREAM_CLOSED;
3389 }
3390
3391 bool stopThreadCalled = false;
3392
startStream()3393 void RtApiAsio :: startStream()
3394 {
3395 verifyStream();
3396 if ( stream_.state == STREAM_RUNNING ) {
3397 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3398 error( RtAudioError::WARNING );
3399 return;
3400 }
3401
3402 #if defined( HAVE_GETTIMEOFDAY )
3403 gettimeofday( &stream_.lastTickTimestamp, NULL );
3404 #endif
3405
3406 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3407 ASIOError result = ASIOStart();
3408 if ( result != ASE_OK ) {
3409 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3410 errorText_ = errorStream_.str();
3411 goto unlock;
3412 }
3413
3414 handle->drainCounter = 0;
3415 handle->internalDrain = false;
3416 ResetEvent( handle->condition );
3417 stream_.state = STREAM_RUNNING;
3418 asioXRun = false;
3419
3420 unlock:
3421 stopThreadCalled = false;
3422
3423 if ( result == ASE_OK ) return;
3424 error( RtAudioError::SYSTEM_ERROR );
3425 }
3426
stopStream()3427 void RtApiAsio :: stopStream()
3428 {
3429 verifyStream();
3430 if ( stream_.state == STREAM_STOPPED ) {
3431 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3432 error( RtAudioError::WARNING );
3433 return;
3434 }
3435
3436 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3437 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3438 if ( handle->drainCounter == 0 ) {
3439 handle->drainCounter = 2;
3440 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3441 }
3442 }
3443
3444 stream_.state = STREAM_STOPPED;
3445
3446 ASIOError result = ASIOStop();
3447 if ( result != ASE_OK ) {
3448 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3449 errorText_ = errorStream_.str();
3450 }
3451
3452 if ( result == ASE_OK ) return;
3453 error( RtAudioError::SYSTEM_ERROR );
3454 }
3455
abortStream()3456 void RtApiAsio :: abortStream()
3457 {
3458 verifyStream();
3459 if ( stream_.state == STREAM_STOPPED ) {
3460 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3461 error( RtAudioError::WARNING );
3462 return;
3463 }
3464
3465 // The following lines were commented-out because some behavior was
3466 // noted where the device buffers need to be zeroed to avoid
3467 // continuing sound, even when the device buffers are completely
3468 // disposed. So now, calling abort is the same as calling stop.
3469 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3470 // handle->drainCounter = 2;
3471 stopStream();
3472 }
3473
3474 // This function will be called by a spawned thread when the user
3475 // callback function signals that the stream should be stopped or
3476 // aborted. It is necessary to handle it this way because the
3477 // callbackEvent() function must return before the ASIOStop()
3478 // function will return.
asioStopStream(void * ptr)3479 static unsigned __stdcall asioStopStream( void *ptr )
3480 {
3481 CallbackInfo *info = (CallbackInfo *) ptr;
3482 RtApiAsio *object = (RtApiAsio *) info->object;
3483
3484 object->stopStream();
3485 _endthreadex( 0 );
3486 return 0;
3487 }
3488
callbackEvent(long bufferIndex)3489 bool RtApiAsio :: callbackEvent( long bufferIndex )
3490 {
3491 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3492 if ( stream_.state == STREAM_CLOSED ) {
3493 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3494 error( RtAudioError::WARNING );
3495 return FAILURE;
3496 }
3497
3498 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3499 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3500
3501 // Check if we were draining the stream and signal if finished.
3502 if ( handle->drainCounter > 3 ) {
3503
3504 stream_.state = STREAM_STOPPING;
3505 if ( handle->internalDrain == false )
3506 SetEvent( handle->condition );
3507 else { // spawn a thread to stop the stream
3508 unsigned threadId;
3509 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3510 &stream_.callbackInfo, 0, &threadId );
3511 }
3512 return SUCCESS;
3513 }
3514
3515 // Invoke user callback to get fresh output data UNLESS we are
3516 // draining stream.
3517 if ( handle->drainCounter == 0 ) {
3518 RtAudioCallback callback = (RtAudioCallback) info->callback;
3519 double streamTime = getStreamTime();
3520 RtAudioStreamStatus status = 0;
3521 if ( stream_.mode != INPUT && asioXRun == true ) {
3522 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3523 asioXRun = false;
3524 }
3525 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3526 status |= RTAUDIO_INPUT_OVERFLOW;
3527 asioXRun = false;
3528 }
3529 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3530 stream_.bufferSize, streamTime, status, info->userData );
3531 if ( cbReturnValue == 2 ) {
3532 stream_.state = STREAM_STOPPING;
3533 handle->drainCounter = 2;
3534 unsigned threadId;
3535 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3536 &stream_.callbackInfo, 0, &threadId );
3537 return SUCCESS;
3538 }
3539 else if ( cbReturnValue == 1 ) {
3540 handle->drainCounter = 1;
3541 handle->internalDrain = true;
3542 }
3543 }
3544
3545 unsigned int nChannels, bufferBytes, i, j;
3546 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3547 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3548
3549 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3550
3551 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3552
3553 for ( i=0, j=0; i<nChannels; i++ ) {
3554 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3555 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3556 }
3557
3558 }
3559 else if ( stream_.doConvertBuffer[0] ) {
3560
3561 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3562 if ( stream_.doByteSwap[0] )
3563 byteSwapBuffer( stream_.deviceBuffer,
3564 stream_.bufferSize * stream_.nDeviceChannels[0],
3565 stream_.deviceFormat[0] );
3566
3567 for ( i=0, j=0; i<nChannels; i++ ) {
3568 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3569 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3570 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3571 }
3572
3573 }
3574 else {
3575
3576 if ( stream_.doByteSwap[0] )
3577 byteSwapBuffer( stream_.userBuffer[0],
3578 stream_.bufferSize * stream_.nUserChannels[0],
3579 stream_.userFormat );
3580
3581 for ( i=0, j=0; i<nChannels; i++ ) {
3582 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3583 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3584 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3585 }
3586
3587 }
3588 }
3589
3590 // Don't bother draining input
3591 if ( handle->drainCounter ) {
3592 handle->drainCounter++;
3593 goto unlock;
3594 }
3595
3596 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3597
3598 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3599
3600 if (stream_.doConvertBuffer[1]) {
3601
3602 // Always interleave ASIO input data.
3603 for ( i=0, j=0; i<nChannels; i++ ) {
3604 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3605 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3606 handle->bufferInfos[i].buffers[bufferIndex],
3607 bufferBytes );
3608 }
3609
3610 if ( stream_.doByteSwap[1] )
3611 byteSwapBuffer( stream_.deviceBuffer,
3612 stream_.bufferSize * stream_.nDeviceChannels[1],
3613 stream_.deviceFormat[1] );
3614 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3615
3616 }
3617 else {
3618 for ( i=0, j=0; i<nChannels; i++ ) {
3619 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3620 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3621 handle->bufferInfos[i].buffers[bufferIndex],
3622 bufferBytes );
3623 }
3624 }
3625
3626 if ( stream_.doByteSwap[1] )
3627 byteSwapBuffer( stream_.userBuffer[1],
3628 stream_.bufferSize * stream_.nUserChannels[1],
3629 stream_.userFormat );
3630 }
3631 }
3632
3633 unlock:
3634 // The following call was suggested by Malte Clasen. While the API
3635 // documentation indicates it should not be required, some device
3636 // drivers apparently do not function correctly without it.
3637 ASIOOutputReady();
3638
3639 RtApi::tickStreamTime();
3640 return SUCCESS;
3641 }
3642
sampleRateChanged(ASIOSampleRate sRate)3643 static void sampleRateChanged( ASIOSampleRate sRate )
3644 {
3645 // The ASIO documentation says that this usually only happens during
3646 // external sync. Audio processing is not stopped by the driver,
3647 // actual sample rate might not have even changed, maybe only the
3648 // sample rate status of an AES/EBU or S/PDIF digital input at the
3649 // audio device.
3650
3651 RtApi *object = (RtApi *) asioCallbackInfo->object;
3652 try {
3653 object->stopStream();
3654 }
3655 catch ( RtAudioError &exception ) {
3656 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3657 return;
3658 }
3659
3660 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3661 }
3662
asioMessages(long selector,long value,void *,double *)3663 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3664 {
3665 long ret = 0;
3666
3667 switch( selector ) {
3668 case kAsioSelectorSupported:
3669 if ( value == kAsioResetRequest
3670 || value == kAsioEngineVersion
3671 || value == kAsioResyncRequest
3672 || value == kAsioLatenciesChanged
3673 // The following three were added for ASIO 2.0, you don't
3674 // necessarily have to support them.
3675 || value == kAsioSupportsTimeInfo
3676 || value == kAsioSupportsTimeCode
3677 || value == kAsioSupportsInputMonitor)
3678 ret = 1L;
3679 break;
3680 case kAsioResetRequest:
3681 // Defer the task and perform the reset of the driver during the
3682 // next "safe" situation. You cannot reset the driver right now,
3683 // as this code is called from the driver. Reset the driver is
3684 // done by completely destruct is. I.e. ASIOStop(),
3685 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3686 // driver again.
3687 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3688 ret = 1L;
3689 break;
3690 case kAsioResyncRequest:
3691 // This informs the application that the driver encountered some
3692 // non-fatal data loss. It is used for synchronization purposes
3693 // of different media. Added mainly to work around the Win16Mutex
3694 // problems in Windows 95/98 with the Windows Multimedia system,
3695 // which could lose data because the Mutex was held too long by
3696 // another thread. However a driver can issue it in other
3697 // situations, too.
3698 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3699 asioXRun = true;
3700 ret = 1L;
3701 break;
3702 case kAsioLatenciesChanged:
3703 // This will inform the host application that the drivers were
3704 // latencies changed. Beware, it this does not mean that the
3705 // buffer sizes have changed! You might need to update internal
3706 // delay data.
3707 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3708 ret = 1L;
3709 break;
3710 case kAsioEngineVersion:
3711 // Return the supported ASIO version of the host application. If
3712 // a host application does not implement this selector, ASIO 1.0
3713 // is assumed by the driver.
3714 ret = 2L;
3715 break;
3716 case kAsioSupportsTimeInfo:
3717 // Informs the driver whether the
3718 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3719 // For compatibility with ASIO 1.0 drivers the host application
3720 // should always support the "old" bufferSwitch method, too.
3721 ret = 0;
3722 break;
3723 case kAsioSupportsTimeCode:
3724 // Informs the driver whether application is interested in time
3725 // code info. If an application does not need to know about time
3726 // code, the driver has less work to do.
3727 ret = 0;
3728 break;
3729 }
3730 return ret;
3731 }
3732
getAsioErrorString(ASIOError result)3733 static const char* getAsioErrorString( ASIOError result )
3734 {
3735 struct Messages
3736 {
3737 ASIOError value;
3738 const char*message;
3739 };
3740
3741 static const Messages m[] =
3742 {
3743 { ASE_NotPresent, "Hardware input or output is not present or available." },
3744 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3745 { ASE_InvalidParameter, "Invalid input parameter." },
3746 { ASE_InvalidMode, "Invalid mode." },
3747 { ASE_SPNotAdvancing, "Sample position not advancing." },
3748 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3749 { ASE_NoMemory, "Not enough memory to complete the request." }
3750 };
3751
3752 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3753 if ( m[i].value == result ) return m[i].message;
3754
3755 return "Unknown error.";
3756 }
3757
3758 //******************** End of __WINDOWS_ASIO__ *********************//
3759 #endif
3760
3761
3762 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3763
3764 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3765 // - Introduces support for the Windows WASAPI API
3766 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3767 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3768 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3769
3770 #ifndef INITGUID
3771 #define INITGUID
3772 #endif
3773
3774 #include <mfapi.h>
3775 #include <mferror.h>
3776 #include <mfplay.h>
3777 #include <mftransform.h>
3778 #include <wmcodecdsp.h>
3779
3780 #include <audioclient.h>
3781 #include <avrt.h>
3782 #include <mmdeviceapi.h>
3783 #include <functiondiscoverykeys_devpkey.h>
3784
3785 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3786 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3787 #endif
3788
3789 #ifndef MFSTARTUP_NOSOCKET
3790 #define MFSTARTUP_NOSOCKET 0x1
3791 #endif
3792
3793 #ifdef _MSC_VER
3794 #pragma comment( lib, "ksuser" )
3795 #pragma comment( lib, "mfplat.lib" )
3796 #pragma comment( lib, "mfuuid.lib" )
3797 #pragma comment( lib, "wmcodecdspuuid" )
3798 #endif
3799
3800 //=============================================================================
3801
3802 #define SAFE_RELEASE( objectPtr )\
3803 if ( objectPtr )\
3804 {\
3805 objectPtr->Release();\
3806 objectPtr = NULL;\
3807 }
3808
3809 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3810
3811 //-----------------------------------------------------------------------------
3812
3813 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3814 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3815 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3816 // provide intermediate storage for read / write synchronization.
3817 class WasapiBuffer
3818 {
3819 public:
WasapiBuffer()3820 WasapiBuffer()
3821 : buffer_( NULL ),
3822 bufferSize_( 0 ),
3823 inIndex_( 0 ),
3824 outIndex_( 0 ) {}
3825
~WasapiBuffer()3826 ~WasapiBuffer() {
3827 free( buffer_ );
3828 }
3829
3830 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3831 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3832 free( buffer_ );
3833
3834 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3835
3836 bufferSize_ = bufferSize;
3837 inIndex_ = 0;
3838 outIndex_ = 0;
3839 }
3840
3841 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3842 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3843 {
3844 if ( !buffer || // incoming buffer is NULL
3845 bufferSize == 0 || // incoming buffer has no data
3846 bufferSize > bufferSize_ ) // incoming buffer too large
3847 {
3848 return false;
3849 }
3850
3851 unsigned int relOutIndex = outIndex_;
3852 unsigned int inIndexEnd = inIndex_ + bufferSize;
3853 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3854 relOutIndex += bufferSize_;
3855 }
3856
3857 // the "IN" index CAN BEGIN at the "OUT" index
3858 // the "IN" index CANNOT END at the "OUT" index
3859 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3860 return false; // not enough space between "in" index and "out" index
3861 }
3862
3863 // copy buffer from external to internal
3864 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3865 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3866 int fromInSize = bufferSize - fromZeroSize;
3867
3868 switch( format )
3869 {
3870 case RTAUDIO_SINT8:
3871 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3872 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3873 break;
3874 case RTAUDIO_SINT16:
3875 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3876 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3877 break;
3878 case RTAUDIO_SINT24:
3879 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3880 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3881 break;
3882 case RTAUDIO_SINT32:
3883 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3884 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3885 break;
3886 case RTAUDIO_FLOAT32:
3887 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3888 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3889 break;
3890 case RTAUDIO_FLOAT64:
3891 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3892 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3893 break;
3894 }
3895
3896 // update "in" index
3897 inIndex_ += bufferSize;
3898 inIndex_ %= bufferSize_;
3899
3900 return true;
3901 }
3902
3903 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3904 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3905 {
3906 if ( !buffer || // incoming buffer is NULL
3907 bufferSize == 0 || // incoming buffer has no data
3908 bufferSize > bufferSize_ ) // incoming buffer too large
3909 {
3910 return false;
3911 }
3912
3913 unsigned int relInIndex = inIndex_;
3914 unsigned int outIndexEnd = outIndex_ + bufferSize;
3915 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3916 relInIndex += bufferSize_;
3917 }
3918
3919 // the "OUT" index CANNOT BEGIN at the "IN" index
3920 // the "OUT" index CAN END at the "IN" index
3921 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3922 return false; // not enough space between "out" index and "in" index
3923 }
3924
3925 // copy buffer from internal to external
3926 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3927 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3928 int fromOutSize = bufferSize - fromZeroSize;
3929
3930 switch( format )
3931 {
3932 case RTAUDIO_SINT8:
3933 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3934 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3935 break;
3936 case RTAUDIO_SINT16:
3937 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3938 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3939 break;
3940 case RTAUDIO_SINT24:
3941 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3942 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3943 break;
3944 case RTAUDIO_SINT32:
3945 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3946 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3947 break;
3948 case RTAUDIO_FLOAT32:
3949 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3950 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3951 break;
3952 case RTAUDIO_FLOAT64:
3953 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3954 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3955 break;
3956 }
3957
3958 // update "out" index
3959 outIndex_ += bufferSize;
3960 outIndex_ %= bufferSize_;
3961
3962 return true;
3963 }
3964
3965 private:
3966 char* buffer_;
3967 unsigned int bufferSize_;
3968 unsigned int inIndex_;
3969 unsigned int outIndex_;
3970 };
3971
3972 //-----------------------------------------------------------------------------
3973
3974 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3975 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3976 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3977 class WasapiResampler
3978 {
3979 public:
WasapiResampler(bool isFloat,unsigned int bitsPerSample,unsigned int channelCount,unsigned int inSampleRate,unsigned int outSampleRate)3980 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3981 unsigned int inSampleRate, unsigned int outSampleRate )
3982 : _bytesPerSample( bitsPerSample / 8 )
3983 , _channelCount( channelCount )
3984 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3985 , _transformUnk( NULL )
3986 , _transform( NULL )
3987 , _mediaType( NULL )
3988 , _inputMediaType( NULL )
3989 , _outputMediaType( NULL )
3990
3991 #ifdef __IWMResamplerProps_FWD_DEFINED__
3992 , _resamplerProps( NULL )
3993 #endif
3994 {
3995 // 1. Initialization
3996
3997 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3998
3999 // 2. Create Resampler Transform Object
4000
4001 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4002 IID_IUnknown, ( void** ) &_transformUnk );
4003
4004 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4005
4006 #ifdef __IWMResamplerProps_FWD_DEFINED__
4007 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4008 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4009 #endif
4010
4011 // 3. Specify input / output format
4012
4013 MFCreateMediaType( &_mediaType );
4014 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4015 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4016 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4017 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4018 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4019 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4020 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4021 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4022
4023 MFCreateMediaType( &_inputMediaType );
4024 _mediaType->CopyAllItems( _inputMediaType );
4025
4026 _transform->SetInputType( 0, _inputMediaType, 0 );
4027
4028 MFCreateMediaType( &_outputMediaType );
4029 _mediaType->CopyAllItems( _outputMediaType );
4030
4031 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4032 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4033
4034 _transform->SetOutputType( 0, _outputMediaType, 0 );
4035
4036 // 4. Send stream start messages to Resampler
4037
4038 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4039 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4040 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4041 }
4042
~WasapiResampler()4043 ~WasapiResampler()
4044 {
4045 // 8. Send stream stop messages to Resampler
4046
4047 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4048 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4049
4050 // 9. Cleanup
4051
4052 MFShutdown();
4053
4054 SAFE_RELEASE( _transformUnk );
4055 SAFE_RELEASE( _transform );
4056 SAFE_RELEASE( _mediaType );
4057 SAFE_RELEASE( _inputMediaType );
4058 SAFE_RELEASE( _outputMediaType );
4059
4060 #ifdef __IWMResamplerProps_FWD_DEFINED__
4061 SAFE_RELEASE( _resamplerProps );
4062 #endif
4063 }
4064
Convert(char * outBuffer,const char * inBuffer,unsigned int inSampleCount,unsigned int & outSampleCount)4065 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4066 {
4067 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4068 if ( _sampleRatio == 1 )
4069 {
4070 // no sample rate conversion required
4071 memcpy( outBuffer, inBuffer, inputBufferSize );
4072 outSampleCount = inSampleCount;
4073 return;
4074 }
4075
4076 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4077
4078 IMFMediaBuffer* rInBuffer;
4079 IMFSample* rInSample;
4080 BYTE* rInByteBuffer = NULL;
4081
4082 // 5. Create Sample object from input data
4083
4084 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4085
4086 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4087 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4088 rInBuffer->Unlock();
4089 rInByteBuffer = NULL;
4090
4091 rInBuffer->SetCurrentLength( inputBufferSize );
4092
4093 MFCreateSample( &rInSample );
4094 rInSample->AddBuffer( rInBuffer );
4095
4096 // 6. Pass input data to Resampler
4097
4098 _transform->ProcessInput( 0, rInSample, 0 );
4099
4100 SAFE_RELEASE( rInBuffer );
4101 SAFE_RELEASE( rInSample );
4102
4103 // 7. Perform sample rate conversion
4104
4105 IMFMediaBuffer* rOutBuffer = NULL;
4106 BYTE* rOutByteBuffer = NULL;
4107
4108 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4109 DWORD rStatus;
4110 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4111
4112 // 7.1 Create Sample object for output data
4113
4114 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4115 MFCreateSample( &( rOutDataBuffer.pSample ) );
4116 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4117 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4118 rOutDataBuffer.dwStreamID = 0;
4119 rOutDataBuffer.dwStatus = 0;
4120 rOutDataBuffer.pEvents = NULL;
4121
4122 // 7.2 Get output data from Resampler
4123
4124 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4125 {
4126 outSampleCount = 0;
4127 SAFE_RELEASE( rOutBuffer );
4128 SAFE_RELEASE( rOutDataBuffer.pSample );
4129 return;
4130 }
4131
4132 // 7.3 Write output data to outBuffer
4133
4134 SAFE_RELEASE( rOutBuffer );
4135 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4136 rOutBuffer->GetCurrentLength( &rBytes );
4137
4138 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4139 memcpy( outBuffer, rOutByteBuffer, rBytes );
4140 rOutBuffer->Unlock();
4141 rOutByteBuffer = NULL;
4142
4143 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4144 SAFE_RELEASE( rOutBuffer );
4145 SAFE_RELEASE( rOutDataBuffer.pSample );
4146 }
4147
4148 private:
4149 unsigned int _bytesPerSample;
4150 unsigned int _channelCount;
4151 float _sampleRatio;
4152
4153 IUnknown* _transformUnk;
4154 IMFTransform* _transform;
4155 IMFMediaType* _mediaType;
4156 IMFMediaType* _inputMediaType;
4157 IMFMediaType* _outputMediaType;
4158
4159 #ifdef __IWMResamplerProps_FWD_DEFINED__
4160 IWMResamplerProps* _resamplerProps;
4161 #endif
4162 };
4163
4164 //-----------------------------------------------------------------------------
4165
4166 // A structure to hold various information related to the WASAPI implementation.
4167 struct WasapiHandle
4168 {
4169 IAudioClient* captureAudioClient;
4170 IAudioClient* renderAudioClient;
4171 IAudioCaptureClient* captureClient;
4172 IAudioRenderClient* renderClient;
4173 HANDLE captureEvent;
4174 HANDLE renderEvent;
4175
WasapiHandleWasapiHandle4176 WasapiHandle()
4177 : captureAudioClient( NULL ),
4178 renderAudioClient( NULL ),
4179 captureClient( NULL ),
4180 renderClient( NULL ),
4181 captureEvent( NULL ),
4182 renderEvent( NULL ) {}
4183 };
4184
4185 //=============================================================================
4186
RtApiWasapi()4187 RtApiWasapi::RtApiWasapi()
4188 : coInitialized_( false ), deviceEnumerator_( NULL )
4189 {
4190 // WASAPI can run either apartment or multi-threaded
4191 HRESULT hr = CoInitialize( NULL );
4192 if ( !FAILED( hr ) )
4193 coInitialized_ = true;
4194
4195 // Instantiate device enumerator
4196 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4197 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4198 ( void** ) &deviceEnumerator_ );
4199
4200 // If this runs on an old Windows, it will fail. Ignore and proceed.
4201 if ( FAILED( hr ) )
4202 deviceEnumerator_ = NULL;
4203 }
4204
4205 //-----------------------------------------------------------------------------
4206
~RtApiWasapi()4207 RtApiWasapi::~RtApiWasapi()
4208 {
4209 if ( stream_.state != STREAM_CLOSED )
4210 closeStream();
4211
4212 SAFE_RELEASE( deviceEnumerator_ );
4213
4214 // If this object previously called CoInitialize()
4215 if ( coInitialized_ )
4216 CoUninitialize();
4217 }
4218
4219 //=============================================================================
4220
getDeviceCount(void)4221 unsigned int RtApiWasapi::getDeviceCount( void )
4222 {
4223 unsigned int captureDeviceCount = 0;
4224 unsigned int renderDeviceCount = 0;
4225
4226 IMMDeviceCollection* captureDevices = NULL;
4227 IMMDeviceCollection* renderDevices = NULL;
4228
4229 if ( !deviceEnumerator_ )
4230 return 0;
4231
4232 // Count capture devices
4233 errorText_.clear();
4234 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4235 if ( FAILED( hr ) ) {
4236 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4237 goto Exit;
4238 }
4239
4240 hr = captureDevices->GetCount( &captureDeviceCount );
4241 if ( FAILED( hr ) ) {
4242 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4243 goto Exit;
4244 }
4245
4246 // Count render devices
4247 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4248 if ( FAILED( hr ) ) {
4249 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4250 goto Exit;
4251 }
4252
4253 hr = renderDevices->GetCount( &renderDeviceCount );
4254 if ( FAILED( hr ) ) {
4255 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4256 goto Exit;
4257 }
4258
4259 Exit:
4260 // release all references
4261 SAFE_RELEASE( captureDevices );
4262 SAFE_RELEASE( renderDevices );
4263
4264 if ( errorText_.empty() )
4265 return captureDeviceCount + renderDeviceCount;
4266
4267 error( RtAudioError::DRIVER_ERROR );
4268 return 0;
4269 }
4270
4271 //-----------------------------------------------------------------------------
4272
getDeviceInfo(unsigned int device)4273 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4274 {
4275 RtAudio::DeviceInfo info;
4276 unsigned int captureDeviceCount = 0;
4277 unsigned int renderDeviceCount = 0;
4278 std::string defaultDeviceName;
4279 bool isCaptureDevice = false;
4280
4281 PROPVARIANT deviceNameProp;
4282 PROPVARIANT defaultDeviceNameProp;
4283
4284 IMMDeviceCollection* captureDevices = NULL;
4285 IMMDeviceCollection* renderDevices = NULL;
4286 IMMDevice* devicePtr = NULL;
4287 IMMDevice* defaultDevicePtr = NULL;
4288 IAudioClient* audioClient = NULL;
4289 IPropertyStore* devicePropStore = NULL;
4290 IPropertyStore* defaultDevicePropStore = NULL;
4291
4292 WAVEFORMATEX* deviceFormat = NULL;
4293 WAVEFORMATEX* closestMatchFormat = NULL;
4294
4295 // probed
4296 info.probed = false;
4297
4298 // Count capture devices
4299 errorText_.clear();
4300 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4301 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4302 if ( FAILED( hr ) ) {
4303 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4304 goto Exit;
4305 }
4306
4307 hr = captureDevices->GetCount( &captureDeviceCount );
4308 if ( FAILED( hr ) ) {
4309 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4310 goto Exit;
4311 }
4312
4313 // Count render devices
4314 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4315 if ( FAILED( hr ) ) {
4316 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4317 goto Exit;
4318 }
4319
4320 hr = renderDevices->GetCount( &renderDeviceCount );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4323 goto Exit;
4324 }
4325
4326 // validate device index
4327 if ( device >= captureDeviceCount + renderDeviceCount ) {
4328 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4329 errorType = RtAudioError::INVALID_USE;
4330 goto Exit;
4331 }
4332
4333 // determine whether index falls within capture or render devices
4334 if ( device >= renderDeviceCount ) {
4335 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4336 if ( FAILED( hr ) ) {
4337 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4338 goto Exit;
4339 }
4340 isCaptureDevice = true;
4341 }
4342 else {
4343 hr = renderDevices->Item( device, &devicePtr );
4344 if ( FAILED( hr ) ) {
4345 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4346 goto Exit;
4347 }
4348 isCaptureDevice = false;
4349 }
4350
4351 // get default device name
4352 if ( isCaptureDevice ) {
4353 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4354 if ( FAILED( hr ) ) {
4355 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4356 goto Exit;
4357 }
4358 }
4359 else {
4360 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4361 if ( FAILED( hr ) ) {
4362 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4363 goto Exit;
4364 }
4365 }
4366
4367 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4368 if ( FAILED( hr ) ) {
4369 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4370 goto Exit;
4371 }
4372 PropVariantInit( &defaultDeviceNameProp );
4373
4374 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4375 if ( FAILED( hr ) ) {
4376 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4377 goto Exit;
4378 }
4379
4380 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4381
4382 // name
4383 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4384 if ( FAILED( hr ) ) {
4385 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4386 goto Exit;
4387 }
4388
4389 PropVariantInit( &deviceNameProp );
4390
4391 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4392 if ( FAILED( hr ) ) {
4393 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4394 goto Exit;
4395 }
4396
4397 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4398
4399 // is default
4400 if ( isCaptureDevice ) {
4401 info.isDefaultInput = info.name == defaultDeviceName;
4402 info.isDefaultOutput = false;
4403 }
4404 else {
4405 info.isDefaultInput = false;
4406 info.isDefaultOutput = info.name == defaultDeviceName;
4407 }
4408
4409 // channel count
4410 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4411 if ( FAILED( hr ) ) {
4412 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4413 goto Exit;
4414 }
4415
4416 hr = audioClient->GetMixFormat( &deviceFormat );
4417 if ( FAILED( hr ) ) {
4418 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4419 goto Exit;
4420 }
4421
4422 if ( isCaptureDevice ) {
4423 info.inputChannels = deviceFormat->nChannels;
4424 info.outputChannels = 0;
4425 info.duplexChannels = 0;
4426 }
4427 else {
4428 info.inputChannels = 0;
4429 info.outputChannels = deviceFormat->nChannels;
4430 info.duplexChannels = 0;
4431 }
4432
4433 // sample rates
4434 info.sampleRates.clear();
4435
4436 // allow support for all sample rates as we have a built-in sample rate converter
4437 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4438 info.sampleRates.push_back( SAMPLE_RATES[i] );
4439 }
4440 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4441
4442 // native format
4443 info.nativeFormats = 0;
4444
4445 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4446 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4447 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4448 {
4449 if ( deviceFormat->wBitsPerSample == 32 ) {
4450 info.nativeFormats |= RTAUDIO_FLOAT32;
4451 }
4452 else if ( deviceFormat->wBitsPerSample == 64 ) {
4453 info.nativeFormats |= RTAUDIO_FLOAT64;
4454 }
4455 }
4456 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4457 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4458 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4459 {
4460 if ( deviceFormat->wBitsPerSample == 8 ) {
4461 info.nativeFormats |= RTAUDIO_SINT8;
4462 }
4463 else if ( deviceFormat->wBitsPerSample == 16 ) {
4464 info.nativeFormats |= RTAUDIO_SINT16;
4465 }
4466 else if ( deviceFormat->wBitsPerSample == 24 ) {
4467 info.nativeFormats |= RTAUDIO_SINT24;
4468 }
4469 else if ( deviceFormat->wBitsPerSample == 32 ) {
4470 info.nativeFormats |= RTAUDIO_SINT32;
4471 }
4472 }
4473
4474 // probed
4475 info.probed = true;
4476
4477 Exit:
4478 // release all references
4479 PropVariantClear( &deviceNameProp );
4480 PropVariantClear( &defaultDeviceNameProp );
4481
4482 SAFE_RELEASE( captureDevices );
4483 SAFE_RELEASE( renderDevices );
4484 SAFE_RELEASE( devicePtr );
4485 SAFE_RELEASE( defaultDevicePtr );
4486 SAFE_RELEASE( audioClient );
4487 SAFE_RELEASE( devicePropStore );
4488 SAFE_RELEASE( defaultDevicePropStore );
4489
4490 CoTaskMemFree( deviceFormat );
4491 CoTaskMemFree( closestMatchFormat );
4492
4493 if ( !errorText_.empty() )
4494 error( errorType );
4495 return info;
4496 }
4497
4498 //-----------------------------------------------------------------------------
4499
getDefaultOutputDevice(void)4500 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4501 {
4502 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4503 if ( getDeviceInfo( i ).isDefaultOutput ) {
4504 return i;
4505 }
4506 }
4507
4508 return 0;
4509 }
4510
4511 //-----------------------------------------------------------------------------
4512
getDefaultInputDevice(void)4513 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4514 {
4515 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4516 if ( getDeviceInfo( i ).isDefaultInput ) {
4517 return i;
4518 }
4519 }
4520
4521 return 0;
4522 }
4523
4524 //-----------------------------------------------------------------------------
4525
closeStream(void)4526 void RtApiWasapi::closeStream( void )
4527 {
4528 if ( stream_.state == STREAM_CLOSED ) {
4529 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4530 error( RtAudioError::WARNING );
4531 return;
4532 }
4533
4534 if ( stream_.state != STREAM_STOPPED )
4535 stopStream();
4536
4537 // clean up stream memory
4538 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4539 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4540
4541 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4542 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4543
4544 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4545 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4546
4547 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4548 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4549
4550 delete ( WasapiHandle* ) stream_.apiHandle;
4551 stream_.apiHandle = NULL;
4552
4553 for ( int i = 0; i < 2; i++ ) {
4554 if ( stream_.userBuffer[i] ) {
4555 free( stream_.userBuffer[i] );
4556 stream_.userBuffer[i] = 0;
4557 }
4558 }
4559
4560 if ( stream_.deviceBuffer ) {
4561 free( stream_.deviceBuffer );
4562 stream_.deviceBuffer = 0;
4563 }
4564
4565 // update stream state
4566 stream_.state = STREAM_CLOSED;
4567 }
4568
4569 //-----------------------------------------------------------------------------
4570
startStream(void)4571 void RtApiWasapi::startStream( void )
4572 {
4573 verifyStream();
4574
4575 if ( stream_.state == STREAM_RUNNING ) {
4576 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4577 error( RtAudioError::WARNING );
4578 return;
4579 }
4580
4581 #if defined( HAVE_GETTIMEOFDAY )
4582 gettimeofday( &stream_.lastTickTimestamp, NULL );
4583 #endif
4584
4585 // update stream state
4586 stream_.state = STREAM_RUNNING;
4587
4588 // create WASAPI stream thread
4589 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4590
4591 if ( !stream_.callbackInfo.thread ) {
4592 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4593 error( RtAudioError::THREAD_ERROR );
4594 }
4595 else {
4596 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4597 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4598 }
4599 }
4600
4601 //-----------------------------------------------------------------------------
4602
stopStream(void)4603 void RtApiWasapi::stopStream( void )
4604 {
4605 verifyStream();
4606
4607 if ( stream_.state == STREAM_STOPPED ) {
4608 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4609 error( RtAudioError::WARNING );
4610 return;
4611 }
4612
4613 // inform stream thread by setting stream state to STREAM_STOPPING
4614 stream_.state = STREAM_STOPPING;
4615
4616 // wait until stream thread is stopped
4617 while( stream_.state != STREAM_STOPPED ) {
4618 Sleep( 1 );
4619 }
4620
4621 // Wait for the last buffer to play before stopping.
4622 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4623
4624 // close thread handle
4625 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4626 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4627 error( RtAudioError::THREAD_ERROR );
4628 return;
4629 }
4630
4631 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4632 }
4633
4634 //-----------------------------------------------------------------------------
4635
abortStream(void)4636 void RtApiWasapi::abortStream( void )
4637 {
4638 verifyStream();
4639
4640 if ( stream_.state == STREAM_STOPPED ) {
4641 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4642 error( RtAudioError::WARNING );
4643 return;
4644 }
4645
4646 // inform stream thread by setting stream state to STREAM_STOPPING
4647 stream_.state = STREAM_STOPPING;
4648
4649 // wait until stream thread is stopped
4650 while ( stream_.state != STREAM_STOPPED ) {
4651 Sleep( 1 );
4652 }
4653
4654 // close thread handle
4655 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4656 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4657 error( RtAudioError::THREAD_ERROR );
4658 return;
4659 }
4660
4661 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4662 }
4663
4664 //-----------------------------------------------------------------------------
4665
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4666 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4667 unsigned int firstChannel, unsigned int sampleRate,
4668 RtAudioFormat format, unsigned int* bufferSize,
4669 RtAudio::StreamOptions* options )
4670 {
4671 bool methodResult = FAILURE;
4672 unsigned int captureDeviceCount = 0;
4673 unsigned int renderDeviceCount = 0;
4674
4675 IMMDeviceCollection* captureDevices = NULL;
4676 IMMDeviceCollection* renderDevices = NULL;
4677 IMMDevice* devicePtr = NULL;
4678 WAVEFORMATEX* deviceFormat = NULL;
4679 unsigned int bufferBytes;
4680 stream_.state = STREAM_STOPPED;
4681
4682 // create API Handle if not already created
4683 if ( !stream_.apiHandle )
4684 stream_.apiHandle = ( void* ) new WasapiHandle();
4685
4686 // Count capture devices
4687 errorText_.clear();
4688 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4689 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4690 if ( FAILED( hr ) ) {
4691 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4692 goto Exit;
4693 }
4694
4695 hr = captureDevices->GetCount( &captureDeviceCount );
4696 if ( FAILED( hr ) ) {
4697 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4698 goto Exit;
4699 }
4700
4701 // Count render devices
4702 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4703 if ( FAILED( hr ) ) {
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4705 goto Exit;
4706 }
4707
4708 hr = renderDevices->GetCount( &renderDeviceCount );
4709 if ( FAILED( hr ) ) {
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4711 goto Exit;
4712 }
4713
4714 // validate device index
4715 if ( device >= captureDeviceCount + renderDeviceCount ) {
4716 errorType = RtAudioError::INVALID_USE;
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4718 goto Exit;
4719 }
4720
4721 // if device index falls within capture devices
4722 if ( device >= renderDeviceCount ) {
4723 if ( mode != INPUT ) {
4724 errorType = RtAudioError::INVALID_USE;
4725 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4726 goto Exit;
4727 }
4728
4729 // retrieve captureAudioClient from devicePtr
4730 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4731
4732 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4733 if ( FAILED( hr ) ) {
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4735 goto Exit;
4736 }
4737
4738 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4739 NULL, ( void** ) &captureAudioClient );
4740 if ( FAILED( hr ) ) {
4741 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4742 goto Exit;
4743 }
4744
4745 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4746 if ( FAILED( hr ) ) {
4747 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4748 goto Exit;
4749 }
4750
4751 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4752 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4753 }
4754
4755 // if device index falls within render devices and is configured for loopback
4756 if ( device < renderDeviceCount && mode == INPUT )
4757 {
4758 // if renderAudioClient is not initialised, initialise it now
4759 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4760 if ( !renderAudioClient )
4761 {
4762 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4763 }
4764
4765 // retrieve captureAudioClient from devicePtr
4766 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4767
4768 hr = renderDevices->Item( device, &devicePtr );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4771 goto Exit;
4772 }
4773
4774 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4775 NULL, ( void** ) &captureAudioClient );
4776 if ( FAILED( hr ) ) {
4777 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4778 goto Exit;
4779 }
4780
4781 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4784 goto Exit;
4785 }
4786
4787 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4788 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4789 }
4790
4791 // if device index falls within render devices and is configured for output
4792 if ( device < renderDeviceCount && mode == OUTPUT )
4793 {
4794 // if renderAudioClient is already initialised, don't initialise it again
4795 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4796 if ( renderAudioClient )
4797 {
4798 methodResult = SUCCESS;
4799 goto Exit;
4800 }
4801
4802 hr = renderDevices->Item( device, &devicePtr );
4803 if ( FAILED( hr ) ) {
4804 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4805 goto Exit;
4806 }
4807
4808 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4809 NULL, ( void** ) &renderAudioClient );
4810 if ( FAILED( hr ) ) {
4811 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4812 goto Exit;
4813 }
4814
4815 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4818 goto Exit;
4819 }
4820
4821 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4822 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4823 }
4824
4825 // fill stream data
4826 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4827 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4828 stream_.mode = DUPLEX;
4829 }
4830 else {
4831 stream_.mode = mode;
4832 }
4833
4834 stream_.device[mode] = device;
4835 stream_.doByteSwap[mode] = false;
4836 stream_.sampleRate = sampleRate;
4837 stream_.bufferSize = *bufferSize;
4838 stream_.nBuffers = 1;
4839 stream_.nUserChannels[mode] = channels;
4840 stream_.channelOffset[mode] = firstChannel;
4841 stream_.userFormat = format;
4842 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4843
4844 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4845 stream_.userInterleaved = false;
4846 else
4847 stream_.userInterleaved = true;
4848 stream_.deviceInterleaved[mode] = true;
4849
4850 // Set flags for buffer conversion.
4851 stream_.doConvertBuffer[mode] = false;
4852 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4853 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4854 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4855 stream_.doConvertBuffer[mode] = true;
4856 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4857 stream_.nUserChannels[mode] > 1 )
4858 stream_.doConvertBuffer[mode] = true;
4859
4860 if ( stream_.doConvertBuffer[mode] )
4861 setConvertInfo( mode, 0 );
4862
4863 // Allocate necessary internal buffers
4864 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4865
4866 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4867 if ( !stream_.userBuffer[mode] ) {
4868 errorType = RtAudioError::MEMORY_ERROR;
4869 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4870 goto Exit;
4871 }
4872
4873 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4874 stream_.callbackInfo.priority = 15;
4875 else
4876 stream_.callbackInfo.priority = 0;
4877
4878 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4879 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4880
4881 methodResult = SUCCESS;
4882
4883 Exit:
4884 //clean up
4885 SAFE_RELEASE( captureDevices );
4886 SAFE_RELEASE( renderDevices );
4887 SAFE_RELEASE( devicePtr );
4888 CoTaskMemFree( deviceFormat );
4889
4890 // if method failed, close the stream
4891 if ( methodResult == FAILURE )
4892 closeStream();
4893
4894 if ( !errorText_.empty() )
4895 error( errorType );
4896 return methodResult;
4897 }
4898
4899 //=============================================================================
4900
runWasapiThread(void * wasapiPtr)4901 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4902 {
4903 if ( wasapiPtr )
4904 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4905
4906 return 0;
4907 }
4908
stopWasapiThread(void * wasapiPtr)4909 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4910 {
4911 if ( wasapiPtr )
4912 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4913
4914 return 0;
4915 }
4916
abortWasapiThread(void * wasapiPtr)4917 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4918 {
4919 if ( wasapiPtr )
4920 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4921
4922 return 0;
4923 }
4924
4925 //-----------------------------------------------------------------------------
4926
wasapiThread()4927 void RtApiWasapi::wasapiThread()
4928 {
4929 // as this is a new thread, we must CoInitialize it
4930 CoInitialize( NULL );
4931
4932 HRESULT hr;
4933
4934 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4935 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4936 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4937 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4938 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4939 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4940
4941 WAVEFORMATEX* captureFormat = NULL;
4942 WAVEFORMATEX* renderFormat = NULL;
4943 float captureSrRatio = 0.0f;
4944 float renderSrRatio = 0.0f;
4945 WasapiBuffer captureBuffer;
4946 WasapiBuffer renderBuffer;
4947 WasapiResampler* captureResampler = NULL;
4948 WasapiResampler* renderResampler = NULL;
4949
4950 // declare local stream variables
4951 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4952 BYTE* streamBuffer = NULL;
4953 unsigned long captureFlags = 0;
4954 unsigned int bufferFrameCount = 0;
4955 unsigned int numFramesPadding = 0;
4956 unsigned int convBufferSize = 0;
4957 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4958 bool callbackPushed = true;
4959 bool callbackPulled = false;
4960 bool callbackStopped = false;
4961 int callbackResult = 0;
4962
4963 // convBuffer is used to store converted buffers between WASAPI and the user
4964 char* convBuffer = NULL;
4965 unsigned int convBuffSize = 0;
4966 unsigned int deviceBuffSize = 0;
4967
4968 std::string errorText;
4969 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4970
4971 // Attempt to assign "Pro Audio" characteristic to thread
4972 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4973 if ( AvrtDll ) {
4974 DWORD taskIndex = 0;
4975 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4976 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4977 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4978 FreeLibrary( AvrtDll );
4979 }
4980
4981 // start capture stream if applicable
4982 if ( captureAudioClient ) {
4983 hr = captureAudioClient->GetMixFormat( &captureFormat );
4984 if ( FAILED( hr ) ) {
4985 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4986 goto Exit;
4987 }
4988
4989 // init captureResampler
4990 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4991 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4992 captureFormat->nSamplesPerSec, stream_.sampleRate );
4993
4994 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4995
4996 if ( !captureClient ) {
4997 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4998 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4999 0,
5000 0,
5001 captureFormat,
5002 NULL );
5003 if ( FAILED( hr ) ) {
5004 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5005 goto Exit;
5006 }
5007
5008 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5009 ( void** ) &captureClient );
5010 if ( FAILED( hr ) ) {
5011 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5012 goto Exit;
5013 }
5014
5015 // don't configure captureEvent if in loopback mode
5016 if ( !loopbackEnabled )
5017 {
5018 // configure captureEvent to trigger on every available capture buffer
5019 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5020 if ( !captureEvent ) {
5021 errorType = RtAudioError::SYSTEM_ERROR;
5022 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5023 goto Exit;
5024 }
5025
5026 hr = captureAudioClient->SetEventHandle( captureEvent );
5027 if ( FAILED( hr ) ) {
5028 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5029 goto Exit;
5030 }
5031
5032 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5033 }
5034
5035 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5036
5037 // reset the capture stream
5038 hr = captureAudioClient->Reset();
5039 if ( FAILED( hr ) ) {
5040 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5041 goto Exit;
5042 }
5043
5044 // start the capture stream
5045 hr = captureAudioClient->Start();
5046 if ( FAILED( hr ) ) {
5047 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5048 goto Exit;
5049 }
5050 }
5051
5052 unsigned int inBufferSize = 0;
5053 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5054 if ( FAILED( hr ) ) {
5055 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5056 goto Exit;
5057 }
5058
5059 // scale outBufferSize according to stream->user sample rate ratio
5060 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5061 inBufferSize *= stream_.nDeviceChannels[INPUT];
5062
5063 // set captureBuffer size
5064 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5065 }
5066
5067 // start render stream if applicable
5068 if ( renderAudioClient ) {
5069 hr = renderAudioClient->GetMixFormat( &renderFormat );
5070 if ( FAILED( hr ) ) {
5071 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5072 goto Exit;
5073 }
5074
5075 // init renderResampler
5076 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5077 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5078 stream_.sampleRate, renderFormat->nSamplesPerSec );
5079
5080 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5081
5082 if ( !renderClient ) {
5083 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5084 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5085 0,
5086 0,
5087 renderFormat,
5088 NULL );
5089 if ( FAILED( hr ) ) {
5090 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5091 goto Exit;
5092 }
5093
5094 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5095 ( void** ) &renderClient );
5096 if ( FAILED( hr ) ) {
5097 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5098 goto Exit;
5099 }
5100
5101 // configure renderEvent to trigger on every available render buffer
5102 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5103 if ( !renderEvent ) {
5104 errorType = RtAudioError::SYSTEM_ERROR;
5105 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5106 goto Exit;
5107 }
5108
5109 hr = renderAudioClient->SetEventHandle( renderEvent );
5110 if ( FAILED( hr ) ) {
5111 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5112 goto Exit;
5113 }
5114
5115 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5116 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5117
5118 // reset the render stream
5119 hr = renderAudioClient->Reset();
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5122 goto Exit;
5123 }
5124
5125 // start the render stream
5126 hr = renderAudioClient->Start();
5127 if ( FAILED( hr ) ) {
5128 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5129 goto Exit;
5130 }
5131 }
5132
5133 unsigned int outBufferSize = 0;
5134 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5135 if ( FAILED( hr ) ) {
5136 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5137 goto Exit;
5138 }
5139
5140 // scale inBufferSize according to user->stream sample rate ratio
5141 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5142 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5143
5144 // set renderBuffer size
5145 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5146 }
5147
5148 // malloc buffer memory
5149 if ( stream_.mode == INPUT )
5150 {
5151 using namespace std; // for ceilf
5152 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5153 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5154 }
5155 else if ( stream_.mode == OUTPUT )
5156 {
5157 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5158 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5159 }
5160 else if ( stream_.mode == DUPLEX )
5161 {
5162 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5163 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5164 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5165 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5166 }
5167
5168 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5169 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5170 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5171 if ( !convBuffer || !stream_.deviceBuffer ) {
5172 errorType = RtAudioError::MEMORY_ERROR;
5173 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5174 goto Exit;
5175 }
5176
5177 // stream process loop
5178 while ( stream_.state != STREAM_STOPPING ) {
5179 if ( !callbackPulled ) {
5180 // Callback Input
5181 // ==============
5182 // 1. Pull callback buffer from inputBuffer
5183 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5184 // Convert callback buffer to user format
5185
5186 if ( captureAudioClient )
5187 {
5188 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5189 if ( captureSrRatio != 1 )
5190 {
5191 // account for remainders
5192 samplesToPull--;
5193 }
5194
5195 convBufferSize = 0;
5196 while ( convBufferSize < stream_.bufferSize )
5197 {
5198 // Pull callback buffer from inputBuffer
5199 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5200 samplesToPull * stream_.nDeviceChannels[INPUT],
5201 stream_.deviceFormat[INPUT] );
5202
5203 if ( !callbackPulled )
5204 {
5205 break;
5206 }
5207
5208 // Convert callback buffer to user sample rate
5209 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5210 unsigned int convSamples = 0;
5211
5212 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5213 convBuffer,
5214 samplesToPull,
5215 convSamples );
5216
5217 convBufferSize += convSamples;
5218 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5219 }
5220
5221 if ( callbackPulled )
5222 {
5223 if ( stream_.doConvertBuffer[INPUT] ) {
5224 // Convert callback buffer to user format
5225 convertBuffer( stream_.userBuffer[INPUT],
5226 stream_.deviceBuffer,
5227 stream_.convertInfo[INPUT] );
5228 }
5229 else {
5230 // no further conversion, simple copy deviceBuffer to userBuffer
5231 memcpy( stream_.userBuffer[INPUT],
5232 stream_.deviceBuffer,
5233 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5234 }
5235 }
5236 }
5237 else {
5238 // if there is no capture stream, set callbackPulled flag
5239 callbackPulled = true;
5240 }
5241
5242 // Execute Callback
5243 // ================
5244 // 1. Execute user callback method
5245 // 2. Handle return value from callback
5246
5247 // if callback has not requested the stream to stop
5248 if ( callbackPulled && !callbackStopped ) {
5249 // Execute user callback method
5250 callbackResult = callback( stream_.userBuffer[OUTPUT],
5251 stream_.userBuffer[INPUT],
5252 stream_.bufferSize,
5253 getStreamTime(),
5254 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5255 stream_.callbackInfo.userData );
5256
5257 // tick stream time
5258 RtApi::tickStreamTime();
5259
5260 // Handle return value from callback
5261 if ( callbackResult == 1 ) {
5262 // instantiate a thread to stop this thread
5263 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5264 if ( !threadHandle ) {
5265 errorType = RtAudioError::THREAD_ERROR;
5266 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5267 goto Exit;
5268 }
5269 else if ( !CloseHandle( threadHandle ) ) {
5270 errorType = RtAudioError::THREAD_ERROR;
5271 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5272 goto Exit;
5273 }
5274
5275 callbackStopped = true;
5276 }
5277 else if ( callbackResult == 2 ) {
5278 // instantiate a thread to stop this thread
5279 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5280 if ( !threadHandle ) {
5281 errorType = RtAudioError::THREAD_ERROR;
5282 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5283 goto Exit;
5284 }
5285 else if ( !CloseHandle( threadHandle ) ) {
5286 errorType = RtAudioError::THREAD_ERROR;
5287 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5288 goto Exit;
5289 }
5290
5291 callbackStopped = true;
5292 }
5293 }
5294 }
5295
5296 // Callback Output
5297 // ===============
5298 // 1. Convert callback buffer to stream format
5299 // 2. Convert callback buffer to stream sample rate and channel count
5300 // 3. Push callback buffer into outputBuffer
5301
5302 if ( renderAudioClient && callbackPulled )
5303 {
5304 // if the last call to renderBuffer.PushBuffer() was successful
5305 if ( callbackPushed || convBufferSize == 0 )
5306 {
5307 if ( stream_.doConvertBuffer[OUTPUT] )
5308 {
5309 // Convert callback buffer to stream format
5310 convertBuffer( stream_.deviceBuffer,
5311 stream_.userBuffer[OUTPUT],
5312 stream_.convertInfo[OUTPUT] );
5313
5314 }
5315 else {
5316 // no further conversion, simple copy userBuffer to deviceBuffer
5317 memcpy( stream_.deviceBuffer,
5318 stream_.userBuffer[OUTPUT],
5319 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5320 }
5321
5322 // Convert callback buffer to stream sample rate
5323 renderResampler->Convert( convBuffer,
5324 stream_.deviceBuffer,
5325 stream_.bufferSize,
5326 convBufferSize );
5327 }
5328
5329 // Push callback buffer into outputBuffer
5330 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5331 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5332 stream_.deviceFormat[OUTPUT] );
5333 }
5334 else {
5335 // if there is no render stream, set callbackPushed flag
5336 callbackPushed = true;
5337 }
5338
5339 // Stream Capture
5340 // ==============
5341 // 1. Get capture buffer from stream
5342 // 2. Push capture buffer into inputBuffer
5343 // 3. If 2. was successful: Release capture buffer
5344
5345 if ( captureAudioClient ) {
5346 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5347 if ( !callbackPulled ) {
5348 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5349 }
5350
5351 // Get capture buffer from stream
5352 hr = captureClient->GetBuffer( &streamBuffer,
5353 &bufferFrameCount,
5354 &captureFlags, NULL, NULL );
5355 if ( FAILED( hr ) ) {
5356 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5357 goto Exit;
5358 }
5359
5360 if ( bufferFrameCount != 0 ) {
5361 // Push capture buffer into inputBuffer
5362 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5363 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5364 stream_.deviceFormat[INPUT] ) )
5365 {
5366 // Release capture buffer
5367 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5368 if ( FAILED( hr ) ) {
5369 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5370 goto Exit;
5371 }
5372 }
5373 else
5374 {
5375 // Inform WASAPI that capture was unsuccessful
5376 hr = captureClient->ReleaseBuffer( 0 );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5379 goto Exit;
5380 }
5381 }
5382 }
5383 else
5384 {
5385 // Inform WASAPI that capture was unsuccessful
5386 hr = captureClient->ReleaseBuffer( 0 );
5387 if ( FAILED( hr ) ) {
5388 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5389 goto Exit;
5390 }
5391 }
5392 }
5393
5394 // Stream Render
5395 // =============
5396 // 1. Get render buffer from stream
5397 // 2. Pull next buffer from outputBuffer
5398 // 3. If 2. was successful: Fill render buffer with next buffer
5399 // Release render buffer
5400
5401 if ( renderAudioClient ) {
5402 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5403 if ( callbackPulled && !callbackPushed ) {
5404 WaitForSingleObject( renderEvent, INFINITE );
5405 }
5406
5407 // Get render buffer from stream
5408 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5409 if ( FAILED( hr ) ) {
5410 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5411 goto Exit;
5412 }
5413
5414 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5415 if ( FAILED( hr ) ) {
5416 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5417 goto Exit;
5418 }
5419
5420 bufferFrameCount -= numFramesPadding;
5421
5422 if ( bufferFrameCount != 0 ) {
5423 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5426 goto Exit;
5427 }
5428
5429 // Pull next buffer from outputBuffer
5430 // Fill render buffer with next buffer
5431 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5432 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5433 stream_.deviceFormat[OUTPUT] ) )
5434 {
5435 // Release render buffer
5436 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5437 if ( FAILED( hr ) ) {
5438 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5439 goto Exit;
5440 }
5441 }
5442 else
5443 {
5444 // Inform WASAPI that render was unsuccessful
5445 hr = renderClient->ReleaseBuffer( 0, 0 );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5448 goto Exit;
5449 }
5450 }
5451 }
5452 else
5453 {
5454 // Inform WASAPI that render was unsuccessful
5455 hr = renderClient->ReleaseBuffer( 0, 0 );
5456 if ( FAILED( hr ) ) {
5457 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5458 goto Exit;
5459 }
5460 }
5461 }
5462
5463 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5464 if ( callbackPushed ) {
5465 // unsetting the callbackPulled flag lets the stream know that
5466 // the audio device is ready for another callback output buffer.
5467 callbackPulled = false;
5468 }
5469
5470 }
5471
5472 Exit:
5473 // clean up
5474 CoTaskMemFree( captureFormat );
5475 CoTaskMemFree( renderFormat );
5476
5477 free ( convBuffer );
5478 delete renderResampler;
5479 delete captureResampler;
5480
5481 CoUninitialize();
5482
5483 // update stream state
5484 stream_.state = STREAM_STOPPED;
5485
5486 if ( !errorText.empty() )
5487 {
5488 errorText_ = errorText;
5489 error( errorType );
5490 }
5491 }
5492
5493 //******************** End of __WINDOWS_WASAPI__ *********************//
5494 #endif
5495
5496
5497 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5498
5499 // Modified by Robin Davies, October 2005
5500 // - Improvements to DirectX pointer chasing.
5501 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5502 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5503 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5504 // Changed device query structure for RtAudio 4.0.7, January 2010
5505
5506 #include <windows.h>
5507 #include <process.h>
5508 #include <mmsystem.h>
5509 #include <mmreg.h>
5510 #include <dsound.h>
5511 #include <assert.h>
5512 #include <algorithm>
5513
5514 #if defined(__MINGW32__)
5515 // missing from latest mingw winapi
5516 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5517 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5518 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5519 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5520 #endif
5521
5522 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5523
5524 #ifdef _MSC_VER // if Microsoft Visual C++
5525 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5526 #endif
5527
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5528 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5529 {
5530 if ( pointer > bufferSize ) pointer -= bufferSize;
5531 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5532 if ( pointer < earlierPointer ) pointer += bufferSize;
5533 return pointer >= earlierPointer && pointer < laterPointer;
5534 }
5535
5536 // A structure to hold various information related to the DirectSound
5537 // API implementation.
5538 struct DsHandle {
5539 unsigned int drainCounter; // Tracks callback counts when draining
5540 bool internalDrain; // Indicates if stop is initiated from callback or not.
5541 void *id[2];
5542 void *buffer[2];
5543 bool xrun[2];
5544 UINT bufferPointer[2];
5545 DWORD dsBufferSize[2];
5546 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5547 HANDLE condition;
5548
DsHandleDsHandle5549 DsHandle()
5550 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5551 };
5552
5553 // Declarations for utility functions, callbacks, and structures
5554 // specific to the DirectSound implementation.
5555 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5556 LPCTSTR description,
5557 LPCTSTR module,
5558 LPVOID lpContext );
5559
5560 static const char* getErrorString( int code );
5561
5562 static unsigned __stdcall callbackHandler( void *ptr );
5563
5564 struct DsDevice {
5565 LPGUID id[2];
5566 bool validId[2];
5567 bool found;
5568 std::string name;
5569
DsDeviceDsDevice5570 DsDevice()
5571 : found(false) { validId[0] = false; validId[1] = false; }
5572 };
5573
5574 struct DsProbeData {
5575 bool isInput;
5576 std::vector<struct DsDevice>* dsDevices;
5577 };
5578
RtApiDs()5579 RtApiDs :: RtApiDs()
5580 {
5581 // Dsound will run both-threaded. If CoInitialize fails, then just
5582 // accept whatever the mainline chose for a threading model.
5583 coInitialized_ = false;
5584 HRESULT hr = CoInitialize( NULL );
5585 if ( !FAILED( hr ) ) coInitialized_ = true;
5586 }
5587
~RtApiDs()5588 RtApiDs :: ~RtApiDs()
5589 {
5590 if ( stream_.state != STREAM_CLOSED ) closeStream();
5591 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5592 }
5593
5594 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5595 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5596 {
5597 return 0;
5598 }
5599
5600 // The DirectSound default input is always the first input device,
5601 // which is the first capture device enumerated.
getDefaultInputDevice(void)5602 unsigned int RtApiDs :: getDefaultInputDevice( void )
5603 {
5604 return 0;
5605 }
5606
getDeviceCount(void)5607 unsigned int RtApiDs :: getDeviceCount( void )
5608 {
5609 // Set query flag for previously found devices to false, so that we
5610 // can check for any devices that have disappeared.
5611 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5612 dsDevices[i].found = false;
5613
5614 // Query DirectSound devices.
5615 struct DsProbeData probeInfo;
5616 probeInfo.isInput = false;
5617 probeInfo.dsDevices = &dsDevices;
5618 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5619 if ( FAILED( result ) ) {
5620 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5621 errorText_ = errorStream_.str();
5622 error( RtAudioError::WARNING );
5623 }
5624
5625 // Query DirectSoundCapture devices.
5626 probeInfo.isInput = true;
5627 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5628 if ( FAILED( result ) ) {
5629 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5630 errorText_ = errorStream_.str();
5631 error( RtAudioError::WARNING );
5632 }
5633
5634 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5635 for ( unsigned int i=0; i<dsDevices.size(); ) {
5636 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5637 else i++;
5638 }
5639
5640 return static_cast<unsigned int>(dsDevices.size());
5641 }
5642
getDeviceInfo(unsigned int device)5643 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5644 {
5645 RtAudio::DeviceInfo info;
5646 info.probed = false;
5647
5648 if ( dsDevices.size() == 0 ) {
5649 // Force a query of all devices
5650 getDeviceCount();
5651 if ( dsDevices.size() == 0 ) {
5652 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5653 error( RtAudioError::INVALID_USE );
5654 return info;
5655 }
5656 }
5657
5658 if ( device >= dsDevices.size() ) {
5659 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5660 error( RtAudioError::INVALID_USE );
5661 return info;
5662 }
5663
5664 HRESULT result;
5665 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5666
5667 LPDIRECTSOUND output;
5668 DSCAPS outCaps;
5669 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5670 if ( FAILED( result ) ) {
5671 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5672 errorText_ = errorStream_.str();
5673 error( RtAudioError::WARNING );
5674 goto probeInput;
5675 }
5676
5677 outCaps.dwSize = sizeof( outCaps );
5678 result = output->GetCaps( &outCaps );
5679 if ( FAILED( result ) ) {
5680 output->Release();
5681 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5682 errorText_ = errorStream_.str();
5683 error( RtAudioError::WARNING );
5684 goto probeInput;
5685 }
5686
5687 // Get output channel information.
5688 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5689
5690 // Get sample rate information.
5691 info.sampleRates.clear();
5692 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5693 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5694 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5695 info.sampleRates.push_back( SAMPLE_RATES[k] );
5696
5697 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5698 info.preferredSampleRate = SAMPLE_RATES[k];
5699 }
5700 }
5701
5702 // Get format information.
5703 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5704 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5705
5706 output->Release();
5707
5708 if ( getDefaultOutputDevice() == device )
5709 info.isDefaultOutput = true;
5710
5711 if ( dsDevices[ device ].validId[1] == false ) {
5712 info.name = dsDevices[ device ].name;
5713 info.probed = true;
5714 return info;
5715 }
5716
5717 probeInput:
5718
5719 LPDIRECTSOUNDCAPTURE input;
5720 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5721 if ( FAILED( result ) ) {
5722 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5723 errorText_ = errorStream_.str();
5724 error( RtAudioError::WARNING );
5725 return info;
5726 }
5727
5728 DSCCAPS inCaps;
5729 inCaps.dwSize = sizeof( inCaps );
5730 result = input->GetCaps( &inCaps );
5731 if ( FAILED( result ) ) {
5732 input->Release();
5733 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5734 errorText_ = errorStream_.str();
5735 error( RtAudioError::WARNING );
5736 return info;
5737 }
5738
5739 // Get input channel information.
5740 info.inputChannels = inCaps.dwChannels;
5741
5742 // Get sample rate and format information.
5743 std::vector<unsigned int> rates;
5744 if ( inCaps.dwChannels >= 2 ) {
5745 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5746 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5747 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5748 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5749 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5750 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5751 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5752 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5753
5754 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5755 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5756 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5757 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5758 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5759 }
5760 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5761 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5762 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5763 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5765 }
5766 }
5767 else if ( inCaps.dwChannels == 1 ) {
5768 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776
5777 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5778 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5780 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5782 }
5783 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5784 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5785 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5786 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5788 }
5789 }
5790 else info.inputChannels = 0; // technically, this would be an error
5791
5792 input->Release();
5793
5794 if ( info.inputChannels == 0 ) return info;
5795
5796 // Copy the supported rates to the info structure but avoid duplication.
5797 bool found;
5798 for ( unsigned int i=0; i<rates.size(); i++ ) {
5799 found = false;
5800 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5801 if ( rates[i] == info.sampleRates[j] ) {
5802 found = true;
5803 break;
5804 }
5805 }
5806 if ( found == false ) info.sampleRates.push_back( rates[i] );
5807 }
5808 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5809
5810 // If device opens for both playback and capture, we determine the channels.
5811 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5812 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5813
5814 if ( device == 0 ) info.isDefaultInput = true;
5815
5816 // Copy name and return.
5817 info.name = dsDevices[ device ].name;
5818 info.probed = true;
5819 return info;
5820 }
5821
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5822 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5823 unsigned int firstChannel, unsigned int sampleRate,
5824 RtAudioFormat format, unsigned int *bufferSize,
5825 RtAudio::StreamOptions *options )
5826 {
5827 if ( channels + firstChannel > 2 ) {
5828 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5829 return FAILURE;
5830 }
5831
5832 size_t nDevices = dsDevices.size();
5833 if ( nDevices == 0 ) {
5834 // This should not happen because a check is made before this function is called.
5835 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5836 return FAILURE;
5837 }
5838
5839 if ( device >= nDevices ) {
5840 // This should not happen because a check is made before this function is called.
5841 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5842 return FAILURE;
5843 }
5844
5845 if ( mode == OUTPUT ) {
5846 if ( dsDevices[ device ].validId[0] == false ) {
5847 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5848 errorText_ = errorStream_.str();
5849 return FAILURE;
5850 }
5851 }
5852 else { // mode == INPUT
5853 if ( dsDevices[ device ].validId[1] == false ) {
5854 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5855 errorText_ = errorStream_.str();
5856 return FAILURE;
5857 }
5858 }
5859
5860 // According to a note in PortAudio, using GetDesktopWindow()
5861 // instead of GetForegroundWindow() is supposed to avoid problems
5862 // that occur when the application's window is not the foreground
5863 // window. Also, if the application window closes before the
5864 // DirectSound buffer, DirectSound can crash. In the past, I had
5865 // problems when using GetDesktopWindow() but it seems fine now
5866 // (January 2010). I'll leave it commented here.
5867 // HWND hWnd = GetForegroundWindow();
5868 HWND hWnd = GetDesktopWindow();
5869
5870 // Check the numberOfBuffers parameter and limit the lowest value to
5871 // two. This is a judgement call and a value of two is probably too
5872 // low for capture, but it should work for playback.
5873 int nBuffers = 0;
5874 if ( options ) nBuffers = options->numberOfBuffers;
5875 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5876 if ( nBuffers < 2 ) nBuffers = 3;
5877
5878 // Check the lower range of the user-specified buffer size and set
5879 // (arbitrarily) to a lower bound of 32.
5880 if ( *bufferSize < 32 ) *bufferSize = 32;
5881
5882 // Create the wave format structure. The data format setting will
5883 // be determined later.
5884 WAVEFORMATEX waveFormat;
5885 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5886 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5887 waveFormat.nChannels = channels + firstChannel;
5888 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5889
5890 // Determine the device buffer size. By default, we'll use the value
5891 // defined above (32K), but we will grow it to make allowances for
5892 // very large software buffer sizes.
5893 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5894 DWORD dsPointerLeadTime = 0;
5895
5896 void *ohandle = 0, *bhandle = 0;
5897 HRESULT result;
5898 if ( mode == OUTPUT ) {
5899
5900 LPDIRECTSOUND output;
5901 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5902 if ( FAILED( result ) ) {
5903 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5904 errorText_ = errorStream_.str();
5905 return FAILURE;
5906 }
5907
5908 DSCAPS outCaps;
5909 outCaps.dwSize = sizeof( outCaps );
5910 result = output->GetCaps( &outCaps );
5911 if ( FAILED( result ) ) {
5912 output->Release();
5913 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5914 errorText_ = errorStream_.str();
5915 return FAILURE;
5916 }
5917
5918 // Check channel information.
5919 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5920 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5921 errorText_ = errorStream_.str();
5922 return FAILURE;
5923 }
5924
5925 // Check format information. Use 16-bit format unless not
5926 // supported or user requests 8-bit.
5927 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5928 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5929 waveFormat.wBitsPerSample = 16;
5930 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5931 }
5932 else {
5933 waveFormat.wBitsPerSample = 8;
5934 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5935 }
5936 stream_.userFormat = format;
5937
5938 // Update wave format structure and buffer information.
5939 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5940 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5941 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5942
5943 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5944 while ( dsPointerLeadTime * 2U > dsBufferSize )
5945 dsBufferSize *= 2;
5946
5947 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5948 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5949 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5950 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5951 if ( FAILED( result ) ) {
5952 output->Release();
5953 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5954 errorText_ = errorStream_.str();
5955 return FAILURE;
5956 }
5957
5958 // Even though we will write to the secondary buffer, we need to
5959 // access the primary buffer to set the correct output format
5960 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5961 // buffer description.
5962 DSBUFFERDESC bufferDescription;
5963 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5964 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5965 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5966
5967 // Obtain the primary buffer
5968 LPDIRECTSOUNDBUFFER buffer;
5969 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5970 if ( FAILED( result ) ) {
5971 output->Release();
5972 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5973 errorText_ = errorStream_.str();
5974 return FAILURE;
5975 }
5976
5977 // Set the primary DS buffer sound format.
5978 result = buffer->SetFormat( &waveFormat );
5979 if ( FAILED( result ) ) {
5980 output->Release();
5981 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5982 errorText_ = errorStream_.str();
5983 return FAILURE;
5984 }
5985
5986 // Setup the secondary DS buffer description.
5987 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5988 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5989 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5990 DSBCAPS_GLOBALFOCUS |
5991 DSBCAPS_GETCURRENTPOSITION2 |
5992 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5993 bufferDescription.dwBufferBytes = dsBufferSize;
5994 bufferDescription.lpwfxFormat = &waveFormat;
5995
5996 // Try to create the secondary DS buffer. If that doesn't work,
5997 // try to use software mixing. Otherwise, there's a problem.
5998 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5999 if ( FAILED( result ) ) {
6000 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6001 DSBCAPS_GLOBALFOCUS |
6002 DSBCAPS_GETCURRENTPOSITION2 |
6003 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6004 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6005 if ( FAILED( result ) ) {
6006 output->Release();
6007 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6008 errorText_ = errorStream_.str();
6009 return FAILURE;
6010 }
6011 }
6012
6013 // Get the buffer size ... might be different from what we specified.
6014 DSBCAPS dsbcaps;
6015 dsbcaps.dwSize = sizeof( DSBCAPS );
6016 result = buffer->GetCaps( &dsbcaps );
6017 if ( FAILED( result ) ) {
6018 output->Release();
6019 buffer->Release();
6020 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6021 errorText_ = errorStream_.str();
6022 return FAILURE;
6023 }
6024
6025 dsBufferSize = dsbcaps.dwBufferBytes;
6026
6027 // Lock the DS buffer
6028 LPVOID audioPtr;
6029 DWORD dataLen;
6030 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6031 if ( FAILED( result ) ) {
6032 output->Release();
6033 buffer->Release();
6034 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6035 errorText_ = errorStream_.str();
6036 return FAILURE;
6037 }
6038
6039 // Zero the DS buffer
6040 ZeroMemory( audioPtr, dataLen );
6041
6042 // Unlock the DS buffer
6043 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6044 if ( FAILED( result ) ) {
6045 output->Release();
6046 buffer->Release();
6047 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6048 errorText_ = errorStream_.str();
6049 return FAILURE;
6050 }
6051
6052 ohandle = (void *) output;
6053 bhandle = (void *) buffer;
6054 }
6055
6056 if ( mode == INPUT ) {
6057
6058 LPDIRECTSOUNDCAPTURE input;
6059 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6060 if ( FAILED( result ) ) {
6061 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6062 errorText_ = errorStream_.str();
6063 return FAILURE;
6064 }
6065
6066 DSCCAPS inCaps;
6067 inCaps.dwSize = sizeof( inCaps );
6068 result = input->GetCaps( &inCaps );
6069 if ( FAILED( result ) ) {
6070 input->Release();
6071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6072 errorText_ = errorStream_.str();
6073 return FAILURE;
6074 }
6075
6076 // Check channel information.
6077 if ( inCaps.dwChannels < channels + firstChannel ) {
6078 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6079 return FAILURE;
6080 }
6081
6082 // Check format information. Use 16-bit format unless user
6083 // requests 8-bit.
6084 DWORD deviceFormats;
6085 if ( channels + firstChannel == 2 ) {
6086 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6087 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6088 waveFormat.wBitsPerSample = 8;
6089 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6090 }
6091 else { // assume 16-bit is supported
6092 waveFormat.wBitsPerSample = 16;
6093 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6094 }
6095 }
6096 else { // channel == 1
6097 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6098 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6099 waveFormat.wBitsPerSample = 8;
6100 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6101 }
6102 else { // assume 16-bit is supported
6103 waveFormat.wBitsPerSample = 16;
6104 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6105 }
6106 }
6107 stream_.userFormat = format;
6108
6109 // Update wave format structure and buffer information.
6110 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6111 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6112 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6113
6114 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6115 while ( dsPointerLeadTime * 2U > dsBufferSize )
6116 dsBufferSize *= 2;
6117
6118 // Setup the secondary DS buffer description.
6119 DSCBUFFERDESC bufferDescription;
6120 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6121 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6122 bufferDescription.dwFlags = 0;
6123 bufferDescription.dwReserved = 0;
6124 bufferDescription.dwBufferBytes = dsBufferSize;
6125 bufferDescription.lpwfxFormat = &waveFormat;
6126
6127 // Create the capture buffer.
6128 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6129 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6130 if ( FAILED( result ) ) {
6131 input->Release();
6132 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6133 errorText_ = errorStream_.str();
6134 return FAILURE;
6135 }
6136
6137 // Get the buffer size ... might be different from what we specified.
6138 DSCBCAPS dscbcaps;
6139 dscbcaps.dwSize = sizeof( DSCBCAPS );
6140 result = buffer->GetCaps( &dscbcaps );
6141 if ( FAILED( result ) ) {
6142 input->Release();
6143 buffer->Release();
6144 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6145 errorText_ = errorStream_.str();
6146 return FAILURE;
6147 }
6148
6149 dsBufferSize = dscbcaps.dwBufferBytes;
6150
6151 // NOTE: We could have a problem here if this is a duplex stream
6152 // and the play and capture hardware buffer sizes are different
6153 // (I'm actually not sure if that is a problem or not).
6154 // Currently, we are not verifying that.
6155
6156 // Lock the capture buffer
6157 LPVOID audioPtr;
6158 DWORD dataLen;
6159 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6160 if ( FAILED( result ) ) {
6161 input->Release();
6162 buffer->Release();
6163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6164 errorText_ = errorStream_.str();
6165 return FAILURE;
6166 }
6167
6168 // Zero the buffer
6169 ZeroMemory( audioPtr, dataLen );
6170
6171 // Unlock the buffer
6172 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6173 if ( FAILED( result ) ) {
6174 input->Release();
6175 buffer->Release();
6176 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6177 errorText_ = errorStream_.str();
6178 return FAILURE;
6179 }
6180
6181 ohandle = (void *) input;
6182 bhandle = (void *) buffer;
6183 }
6184
6185 // Set various stream parameters
6186 DsHandle *handle = 0;
6187 stream_.nDeviceChannels[mode] = channels + firstChannel;
6188 stream_.nUserChannels[mode] = channels;
6189 stream_.bufferSize = *bufferSize;
6190 stream_.channelOffset[mode] = firstChannel;
6191 stream_.deviceInterleaved[mode] = true;
6192 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6193 else stream_.userInterleaved = true;
6194
6195 // Set flag for buffer conversion
6196 stream_.doConvertBuffer[mode] = false;
6197 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6198 stream_.doConvertBuffer[mode] = true;
6199 if (stream_.userFormat != stream_.deviceFormat[mode])
6200 stream_.doConvertBuffer[mode] = true;
6201 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6202 stream_.nUserChannels[mode] > 1 )
6203 stream_.doConvertBuffer[mode] = true;
6204
6205 // Allocate necessary internal buffers
6206 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6207 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6208 if ( stream_.userBuffer[mode] == NULL ) {
6209 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6210 goto error;
6211 }
6212
6213 if ( stream_.doConvertBuffer[mode] ) {
6214
6215 bool makeBuffer = true;
6216 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6217 if ( mode == INPUT ) {
6218 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6219 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6220 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6221 }
6222 }
6223
6224 if ( makeBuffer ) {
6225 bufferBytes *= *bufferSize;
6226 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6227 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6228 if ( stream_.deviceBuffer == NULL ) {
6229 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6230 goto error;
6231 }
6232 }
6233 }
6234
6235 // Allocate our DsHandle structures for the stream.
6236 if ( stream_.apiHandle == 0 ) {
6237 try {
6238 handle = new DsHandle;
6239 }
6240 catch ( std::bad_alloc& ) {
6241 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6242 goto error;
6243 }
6244
6245 // Create a manual-reset event.
6246 handle->condition = CreateEvent( NULL, // no security
6247 TRUE, // manual-reset
6248 FALSE, // non-signaled initially
6249 NULL ); // unnamed
6250 stream_.apiHandle = (void *) handle;
6251 }
6252 else
6253 handle = (DsHandle *) stream_.apiHandle;
6254 handle->id[mode] = ohandle;
6255 handle->buffer[mode] = bhandle;
6256 handle->dsBufferSize[mode] = dsBufferSize;
6257 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6258
6259 stream_.device[mode] = device;
6260 stream_.state = STREAM_STOPPED;
6261 if ( stream_.mode == OUTPUT && mode == INPUT )
6262 // We had already set up an output stream.
6263 stream_.mode = DUPLEX;
6264 else
6265 stream_.mode = mode;
6266 stream_.nBuffers = nBuffers;
6267 stream_.sampleRate = sampleRate;
6268
6269 // Setup the buffer conversion information structure.
6270 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6271
6272 // Setup the callback thread.
6273 if ( stream_.callbackInfo.isRunning == false ) {
6274 unsigned threadId;
6275 stream_.callbackInfo.isRunning = true;
6276 stream_.callbackInfo.object = (void *) this;
6277 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6278 &stream_.callbackInfo, 0, &threadId );
6279 if ( stream_.callbackInfo.thread == 0 ) {
6280 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6281 goto error;
6282 }
6283
6284 // Boost DS thread priority
6285 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6286 }
6287 return SUCCESS;
6288
6289 error:
6290 if ( handle ) {
6291 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6292 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6293 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6294 if ( buffer ) buffer->Release();
6295 object->Release();
6296 }
6297 if ( handle->buffer[1] ) {
6298 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6299 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6300 if ( buffer ) buffer->Release();
6301 object->Release();
6302 }
6303 CloseHandle( handle->condition );
6304 delete handle;
6305 stream_.apiHandle = 0;
6306 }
6307
6308 for ( int i=0; i<2; i++ ) {
6309 if ( stream_.userBuffer[i] ) {
6310 free( stream_.userBuffer[i] );
6311 stream_.userBuffer[i] = 0;
6312 }
6313 }
6314
6315 if ( stream_.deviceBuffer ) {
6316 free( stream_.deviceBuffer );
6317 stream_.deviceBuffer = 0;
6318 }
6319
6320 stream_.state = STREAM_CLOSED;
6321 return FAILURE;
6322 }
6323
closeStream()6324 void RtApiDs :: closeStream()
6325 {
6326 if ( stream_.state == STREAM_CLOSED ) {
6327 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6328 error( RtAudioError::WARNING );
6329 return;
6330 }
6331
6332 // Stop the callback thread.
6333 stream_.callbackInfo.isRunning = false;
6334 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6335 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6336
6337 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6338 if ( handle ) {
6339 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6340 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6341 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6342 if ( buffer ) {
6343 buffer->Stop();
6344 buffer->Release();
6345 }
6346 object->Release();
6347 }
6348 if ( handle->buffer[1] ) {
6349 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6350 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6351 if ( buffer ) {
6352 buffer->Stop();
6353 buffer->Release();
6354 }
6355 object->Release();
6356 }
6357 CloseHandle( handle->condition );
6358 delete handle;
6359 stream_.apiHandle = 0;
6360 }
6361
6362 for ( int i=0; i<2; i++ ) {
6363 if ( stream_.userBuffer[i] ) {
6364 free( stream_.userBuffer[i] );
6365 stream_.userBuffer[i] = 0;
6366 }
6367 }
6368
6369 if ( stream_.deviceBuffer ) {
6370 free( stream_.deviceBuffer );
6371 stream_.deviceBuffer = 0;
6372 }
6373
6374 stream_.mode = UNINITIALIZED;
6375 stream_.state = STREAM_CLOSED;
6376 }
6377
startStream()6378 void RtApiDs :: startStream()
6379 {
6380 verifyStream();
6381 if ( stream_.state == STREAM_RUNNING ) {
6382 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6383 error( RtAudioError::WARNING );
6384 return;
6385 }
6386
6387 #if defined( HAVE_GETTIMEOFDAY )
6388 gettimeofday( &stream_.lastTickTimestamp, NULL );
6389 #endif
6390
6391 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6392
6393 // Increase scheduler frequency on lesser windows (a side-effect of
6394 // increasing timer accuracy). On greater windows (Win2K or later),
6395 // this is already in effect.
6396 timeBeginPeriod( 1 );
6397
6398 buffersRolling = false;
6399 duplexPrerollBytes = 0;
6400
6401 if ( stream_.mode == DUPLEX ) {
6402 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6403 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6404 }
6405
6406 HRESULT result = 0;
6407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6408
6409 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6410 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6411 if ( FAILED( result ) ) {
6412 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6413 errorText_ = errorStream_.str();
6414 goto unlock;
6415 }
6416 }
6417
6418 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6419
6420 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6421 result = buffer->Start( DSCBSTART_LOOPING );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6424 errorText_ = errorStream_.str();
6425 goto unlock;
6426 }
6427 }
6428
6429 handle->drainCounter = 0;
6430 handle->internalDrain = false;
6431 ResetEvent( handle->condition );
6432 stream_.state = STREAM_RUNNING;
6433
6434 unlock:
6435 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6436 }
6437
stopStream()6438 void RtApiDs :: stopStream()
6439 {
6440 verifyStream();
6441 if ( stream_.state == STREAM_STOPPED ) {
6442 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6443 error( RtAudioError::WARNING );
6444 return;
6445 }
6446
6447 HRESULT result = 0;
6448 LPVOID audioPtr;
6449 DWORD dataLen;
6450 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6452 if ( handle->drainCounter == 0 ) {
6453 handle->drainCounter = 2;
6454 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6455 }
6456
6457 stream_.state = STREAM_STOPPED;
6458
6459 MUTEX_LOCK( &stream_.mutex );
6460
6461 // Stop the buffer and clear memory
6462 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6463 result = buffer->Stop();
6464 if ( FAILED( result ) ) {
6465 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6466 errorText_ = errorStream_.str();
6467 goto unlock;
6468 }
6469
6470 // Lock the buffer and clear it so that if we start to play again,
6471 // we won't have old data playing.
6472 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6473 if ( FAILED( result ) ) {
6474 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6475 errorText_ = errorStream_.str();
6476 goto unlock;
6477 }
6478
6479 // Zero the DS buffer
6480 ZeroMemory( audioPtr, dataLen );
6481
6482 // Unlock the DS buffer
6483 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6484 if ( FAILED( result ) ) {
6485 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6486 errorText_ = errorStream_.str();
6487 goto unlock;
6488 }
6489
6490 // If we start playing again, we must begin at beginning of buffer.
6491 handle->bufferPointer[0] = 0;
6492 }
6493
6494 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6495 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6496 audioPtr = NULL;
6497 dataLen = 0;
6498
6499 stream_.state = STREAM_STOPPED;
6500
6501 if ( stream_.mode != DUPLEX )
6502 MUTEX_LOCK( &stream_.mutex );
6503
6504 result = buffer->Stop();
6505 if ( FAILED( result ) ) {
6506 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6507 errorText_ = errorStream_.str();
6508 goto unlock;
6509 }
6510
6511 // Lock the buffer and clear it so that if we start to play again,
6512 // we won't have old data playing.
6513 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6514 if ( FAILED( result ) ) {
6515 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6516 errorText_ = errorStream_.str();
6517 goto unlock;
6518 }
6519
6520 // Zero the DS buffer
6521 ZeroMemory( audioPtr, dataLen );
6522
6523 // Unlock the DS buffer
6524 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6525 if ( FAILED( result ) ) {
6526 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6527 errorText_ = errorStream_.str();
6528 goto unlock;
6529 }
6530
6531 // If we start recording again, we must begin at beginning of buffer.
6532 handle->bufferPointer[1] = 0;
6533 }
6534
6535 unlock:
6536 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6537 MUTEX_UNLOCK( &stream_.mutex );
6538
6539 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6540 }
6541
abortStream()6542 void RtApiDs :: abortStream()
6543 {
6544 verifyStream();
6545 if ( stream_.state == STREAM_STOPPED ) {
6546 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6547 error( RtAudioError::WARNING );
6548 return;
6549 }
6550
6551 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6552 handle->drainCounter = 2;
6553
6554 stopStream();
6555 }
6556
callbackEvent()6557 void RtApiDs :: callbackEvent()
6558 {
6559 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6560 Sleep( 50 ); // sleep 50 milliseconds
6561 return;
6562 }
6563
6564 if ( stream_.state == STREAM_CLOSED ) {
6565 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6566 error( RtAudioError::WARNING );
6567 return;
6568 }
6569
6570 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6571 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6572
6573 // Check if we were draining the stream and signal is finished.
6574 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6575
6576 stream_.state = STREAM_STOPPING;
6577 if ( handle->internalDrain == false )
6578 SetEvent( handle->condition );
6579 else
6580 stopStream();
6581 return;
6582 }
6583
6584 // Invoke user callback to get fresh output data UNLESS we are
6585 // draining stream.
6586 if ( handle->drainCounter == 0 ) {
6587 RtAudioCallback callback = (RtAudioCallback) info->callback;
6588 double streamTime = getStreamTime();
6589 RtAudioStreamStatus status = 0;
6590 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6591 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6592 handle->xrun[0] = false;
6593 }
6594 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6595 status |= RTAUDIO_INPUT_OVERFLOW;
6596 handle->xrun[1] = false;
6597 }
6598 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6599 stream_.bufferSize, streamTime, status, info->userData );
6600 if ( cbReturnValue == 2 ) {
6601 stream_.state = STREAM_STOPPING;
6602 handle->drainCounter = 2;
6603 abortStream();
6604 return;
6605 }
6606 else if ( cbReturnValue == 1 ) {
6607 handle->drainCounter = 1;
6608 handle->internalDrain = true;
6609 }
6610 }
6611
6612 HRESULT result;
6613 DWORD currentWritePointer, safeWritePointer;
6614 DWORD currentReadPointer, safeReadPointer;
6615 UINT nextWritePointer;
6616
6617 LPVOID buffer1 = NULL;
6618 LPVOID buffer2 = NULL;
6619 DWORD bufferSize1 = 0;
6620 DWORD bufferSize2 = 0;
6621
6622 char *buffer;
6623 long bufferBytes;
6624
6625 MUTEX_LOCK( &stream_.mutex );
6626 if ( stream_.state == STREAM_STOPPED ) {
6627 MUTEX_UNLOCK( &stream_.mutex );
6628 return;
6629 }
6630
6631 if ( buffersRolling == false ) {
6632 if ( stream_.mode == DUPLEX ) {
6633 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6634
6635 // It takes a while for the devices to get rolling. As a result,
6636 // there's no guarantee that the capture and write device pointers
6637 // will move in lockstep. Wait here for both devices to start
6638 // rolling, and then set our buffer pointers accordingly.
6639 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6640 // bytes later than the write buffer.
6641
6642 // Stub: a serious risk of having a pre-emptive scheduling round
6643 // take place between the two GetCurrentPosition calls... but I'm
6644 // really not sure how to solve the problem. Temporarily boost to
6645 // Realtime priority, maybe; but I'm not sure what priority the
6646 // DirectSound service threads run at. We *should* be roughly
6647 // within a ms or so of correct.
6648
6649 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6650 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6651
6652 DWORD startSafeWritePointer, startSafeReadPointer;
6653
6654 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6655 if ( FAILED( result ) ) {
6656 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6657 errorText_ = errorStream_.str();
6658 MUTEX_UNLOCK( &stream_.mutex );
6659 error( RtAudioError::SYSTEM_ERROR );
6660 return;
6661 }
6662 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6663 if ( FAILED( result ) ) {
6664 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6665 errorText_ = errorStream_.str();
6666 MUTEX_UNLOCK( &stream_.mutex );
6667 error( RtAudioError::SYSTEM_ERROR );
6668 return;
6669 }
6670 while ( true ) {
6671 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6672 if ( FAILED( result ) ) {
6673 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6674 errorText_ = errorStream_.str();
6675 MUTEX_UNLOCK( &stream_.mutex );
6676 error( RtAudioError::SYSTEM_ERROR );
6677 return;
6678 }
6679 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6680 if ( FAILED( result ) ) {
6681 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6682 errorText_ = errorStream_.str();
6683 MUTEX_UNLOCK( &stream_.mutex );
6684 error( RtAudioError::SYSTEM_ERROR );
6685 return;
6686 }
6687 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6688 Sleep( 1 );
6689 }
6690
6691 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6692
6693 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6694 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6695 handle->bufferPointer[1] = safeReadPointer;
6696 }
6697 else if ( stream_.mode == OUTPUT ) {
6698
6699 // Set the proper nextWritePosition after initial startup.
6700 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6701 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6702 if ( FAILED( result ) ) {
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6704 errorText_ = errorStream_.str();
6705 MUTEX_UNLOCK( &stream_.mutex );
6706 error( RtAudioError::SYSTEM_ERROR );
6707 return;
6708 }
6709 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6710 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6711 }
6712
6713 buffersRolling = true;
6714 }
6715
6716 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6717
6718 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6719
6720 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6721 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6722 bufferBytes *= formatBytes( stream_.userFormat );
6723 memset( stream_.userBuffer[0], 0, bufferBytes );
6724 }
6725
6726 // Setup parameters and do buffer conversion if necessary.
6727 if ( stream_.doConvertBuffer[0] ) {
6728 buffer = stream_.deviceBuffer;
6729 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6730 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6731 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6732 }
6733 else {
6734 buffer = stream_.userBuffer[0];
6735 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6736 bufferBytes *= formatBytes( stream_.userFormat );
6737 }
6738
6739 // No byte swapping necessary in DirectSound implementation.
6740
6741 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6742 // unsigned. So, we need to convert our signed 8-bit data here to
6743 // unsigned.
6744 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6745 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6746
6747 DWORD dsBufferSize = handle->dsBufferSize[0];
6748 nextWritePointer = handle->bufferPointer[0];
6749
6750 DWORD endWrite, leadPointer;
6751 while ( true ) {
6752 // Find out where the read and "safe write" pointers are.
6753 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6754 if ( FAILED( result ) ) {
6755 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6756 errorText_ = errorStream_.str();
6757 MUTEX_UNLOCK( &stream_.mutex );
6758 error( RtAudioError::SYSTEM_ERROR );
6759 return;
6760 }
6761
6762 // We will copy our output buffer into the region between
6763 // safeWritePointer and leadPointer. If leadPointer is not
6764 // beyond the next endWrite position, wait until it is.
6765 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6766 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6767 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6768 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6769 endWrite = nextWritePointer + bufferBytes;
6770
6771 // Check whether the entire write region is behind the play pointer.
6772 if ( leadPointer >= endWrite ) break;
6773
6774 // If we are here, then we must wait until the leadPointer advances
6775 // beyond the end of our next write region. We use the
6776 // Sleep() function to suspend operation until that happens.
6777 double millis = ( endWrite - leadPointer ) * 1000.0;
6778 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6779 if ( millis < 1.0 ) millis = 1.0;
6780 Sleep( (DWORD) millis );
6781 }
6782
6783 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6784 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6785 // We've strayed into the forbidden zone ... resync the read pointer.
6786 handle->xrun[0] = true;
6787 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6788 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6789 handle->bufferPointer[0] = nextWritePointer;
6790 endWrite = nextWritePointer + bufferBytes;
6791 }
6792
6793 // Lock free space in the buffer
6794 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6795 &bufferSize1, &buffer2, &bufferSize2, 0 );
6796 if ( FAILED( result ) ) {
6797 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6798 errorText_ = errorStream_.str();
6799 MUTEX_UNLOCK( &stream_.mutex );
6800 error( RtAudioError::SYSTEM_ERROR );
6801 return;
6802 }
6803
6804 // Copy our buffer into the DS buffer
6805 CopyMemory( buffer1, buffer, bufferSize1 );
6806 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6807
6808 // Update our buffer offset and unlock sound buffer
6809 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6810 if ( FAILED( result ) ) {
6811 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6812 errorText_ = errorStream_.str();
6813 MUTEX_UNLOCK( &stream_.mutex );
6814 error( RtAudioError::SYSTEM_ERROR );
6815 return;
6816 }
6817 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6818 handle->bufferPointer[0] = nextWritePointer;
6819 }
6820
6821 // Don't bother draining input
6822 if ( handle->drainCounter ) {
6823 handle->drainCounter++;
6824 goto unlock;
6825 }
6826
6827 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6828
6829 // Setup parameters.
6830 if ( stream_.doConvertBuffer[1] ) {
6831 buffer = stream_.deviceBuffer;
6832 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6833 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6834 }
6835 else {
6836 buffer = stream_.userBuffer[1];
6837 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6838 bufferBytes *= formatBytes( stream_.userFormat );
6839 }
6840
6841 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6842 long nextReadPointer = handle->bufferPointer[1];
6843 DWORD dsBufferSize = handle->dsBufferSize[1];
6844
6845 // Find out where the write and "safe read" pointers are.
6846 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6847 if ( FAILED( result ) ) {
6848 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6849 errorText_ = errorStream_.str();
6850 MUTEX_UNLOCK( &stream_.mutex );
6851 error( RtAudioError::SYSTEM_ERROR );
6852 return;
6853 }
6854
6855 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6856 DWORD endRead = nextReadPointer + bufferBytes;
6857
6858 // Handling depends on whether we are INPUT or DUPLEX.
6859 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6860 // then a wait here will drag the write pointers into the forbidden zone.
6861 //
6862 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6863 // it's in a safe position. This causes dropouts, but it seems to be the only
6864 // practical way to sync up the read and write pointers reliably, given the
6865 // the very complex relationship between phase and increment of the read and write
6866 // pointers.
6867 //
6868 // In order to minimize audible dropouts in DUPLEX mode, we will
6869 // provide a pre-roll period of 0.5 seconds in which we return
6870 // zeros from the read buffer while the pointers sync up.
6871
6872 if ( stream_.mode == DUPLEX ) {
6873 if ( safeReadPointer < endRead ) {
6874 if ( duplexPrerollBytes <= 0 ) {
6875 // Pre-roll time over. Be more agressive.
6876 int adjustment = endRead-safeReadPointer;
6877
6878 handle->xrun[1] = true;
6879 // Two cases:
6880 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6881 // and perform fine adjustments later.
6882 // - small adjustments: back off by twice as much.
6883 if ( adjustment >= 2*bufferBytes )
6884 nextReadPointer = safeReadPointer-2*bufferBytes;
6885 else
6886 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6887
6888 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6889
6890 }
6891 else {
6892 // In pre=roll time. Just do it.
6893 nextReadPointer = safeReadPointer - bufferBytes;
6894 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6895 }
6896 endRead = nextReadPointer + bufferBytes;
6897 }
6898 }
6899 else { // mode == INPUT
6900 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6901 // See comments for playback.
6902 double millis = (endRead - safeReadPointer) * 1000.0;
6903 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6904 if ( millis < 1.0 ) millis = 1.0;
6905 Sleep( (DWORD) millis );
6906
6907 // Wake up and find out where we are now.
6908 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6909 if ( FAILED( result ) ) {
6910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6911 errorText_ = errorStream_.str();
6912 MUTEX_UNLOCK( &stream_.mutex );
6913 error( RtAudioError::SYSTEM_ERROR );
6914 return;
6915 }
6916
6917 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6918 }
6919 }
6920
6921 // Lock free space in the buffer
6922 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6923 &bufferSize1, &buffer2, &bufferSize2, 0 );
6924 if ( FAILED( result ) ) {
6925 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6926 errorText_ = errorStream_.str();
6927 MUTEX_UNLOCK( &stream_.mutex );
6928 error( RtAudioError::SYSTEM_ERROR );
6929 return;
6930 }
6931
6932 if ( duplexPrerollBytes <= 0 ) {
6933 // Copy our buffer into the DS buffer
6934 CopyMemory( buffer, buffer1, bufferSize1 );
6935 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6936 }
6937 else {
6938 memset( buffer, 0, bufferSize1 );
6939 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6940 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6941 }
6942
6943 // Update our buffer offset and unlock sound buffer
6944 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6945 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6946 if ( FAILED( result ) ) {
6947 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6948 errorText_ = errorStream_.str();
6949 MUTEX_UNLOCK( &stream_.mutex );
6950 error( RtAudioError::SYSTEM_ERROR );
6951 return;
6952 }
6953 handle->bufferPointer[1] = nextReadPointer;
6954
6955 // No byte swapping necessary in DirectSound implementation.
6956
6957 // If necessary, convert 8-bit data from unsigned to signed.
6958 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6959 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6960
6961 // Do buffer conversion if necessary.
6962 if ( stream_.doConvertBuffer[1] )
6963 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6964 }
6965
6966 unlock:
6967 MUTEX_UNLOCK( &stream_.mutex );
6968 RtApi::tickStreamTime();
6969 }
6970
6971 // Definitions for utility functions and callbacks
6972 // specific to the DirectSound implementation.
6973
callbackHandler(void * ptr)6974 static unsigned __stdcall callbackHandler( void *ptr )
6975 {
6976 CallbackInfo *info = (CallbackInfo *) ptr;
6977 RtApiDs *object = (RtApiDs *) info->object;
6978 bool* isRunning = &info->isRunning;
6979
6980 while ( *isRunning == true ) {
6981 object->callbackEvent();
6982 }
6983
6984 _endthreadex( 0 );
6985 return 0;
6986 }
6987
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6988 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6989 LPCTSTR description,
6990 LPCTSTR /*module*/,
6991 LPVOID lpContext )
6992 {
6993 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6994 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6995
6996 HRESULT hr;
6997 bool validDevice = false;
6998 if ( probeInfo.isInput == true ) {
6999 DSCCAPS caps;
7000 LPDIRECTSOUNDCAPTURE object;
7001
7002 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7003 if ( hr != DS_OK ) return TRUE;
7004
7005 caps.dwSize = sizeof(caps);
7006 hr = object->GetCaps( &caps );
7007 if ( hr == DS_OK ) {
7008 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7009 validDevice = true;
7010 }
7011 object->Release();
7012 }
7013 else {
7014 DSCAPS caps;
7015 LPDIRECTSOUND object;
7016 hr = DirectSoundCreate( lpguid, &object, NULL );
7017 if ( hr != DS_OK ) return TRUE;
7018
7019 caps.dwSize = sizeof(caps);
7020 hr = object->GetCaps( &caps );
7021 if ( hr == DS_OK ) {
7022 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7023 validDevice = true;
7024 }
7025 object->Release();
7026 }
7027
7028 // If good device, then save its name and guid.
7029 std::string name = convertCharPointerToStdString( description );
7030 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7031 if ( lpguid == NULL )
7032 name = "Default Device";
7033 if ( validDevice ) {
7034 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7035 if ( dsDevices[i].name == name ) {
7036 dsDevices[i].found = true;
7037 if ( probeInfo.isInput ) {
7038 dsDevices[i].id[1] = lpguid;
7039 dsDevices[i].validId[1] = true;
7040 }
7041 else {
7042 dsDevices[i].id[0] = lpguid;
7043 dsDevices[i].validId[0] = true;
7044 }
7045 return TRUE;
7046 }
7047 }
7048
7049 DsDevice device;
7050 device.name = name;
7051 device.found = true;
7052 if ( probeInfo.isInput ) {
7053 device.id[1] = lpguid;
7054 device.validId[1] = true;
7055 }
7056 else {
7057 device.id[0] = lpguid;
7058 device.validId[0] = true;
7059 }
7060 dsDevices.push_back( device );
7061 }
7062
7063 return TRUE;
7064 }
7065
getErrorString(int code)7066 static const char* getErrorString( int code )
7067 {
7068 switch ( code ) {
7069
7070 case DSERR_ALLOCATED:
7071 return "Already allocated";
7072
7073 case DSERR_CONTROLUNAVAIL:
7074 return "Control unavailable";
7075
7076 case DSERR_INVALIDPARAM:
7077 return "Invalid parameter";
7078
7079 case DSERR_INVALIDCALL:
7080 return "Invalid call";
7081
7082 case DSERR_GENERIC:
7083 return "Generic error";
7084
7085 case DSERR_PRIOLEVELNEEDED:
7086 return "Priority level needed";
7087
7088 case DSERR_OUTOFMEMORY:
7089 return "Out of memory";
7090
7091 case DSERR_BADFORMAT:
7092 return "The sample rate or the channel format is not supported";
7093
7094 case DSERR_UNSUPPORTED:
7095 return "Not supported";
7096
7097 case DSERR_NODRIVER:
7098 return "No driver";
7099
7100 case DSERR_ALREADYINITIALIZED:
7101 return "Already initialized";
7102
7103 case DSERR_NOAGGREGATION:
7104 return "No aggregation";
7105
7106 case DSERR_BUFFERLOST:
7107 return "Buffer lost";
7108
7109 case DSERR_OTHERAPPHASPRIO:
7110 return "Another application already has priority";
7111
7112 case DSERR_UNINITIALIZED:
7113 return "Uninitialized";
7114
7115 default:
7116 return "DirectSound unknown error";
7117 }
7118 }
7119 //******************** End of __WINDOWS_DS__ *********************//
7120 #endif
7121
7122
7123 #if defined(__LINUX_ALSA__)
7124
7125 #include <alsa/asoundlib.h>
7126 #include <unistd.h>
7127
7128 // A structure to hold various information related to the ALSA API
7129 // implementation.
7130 struct AlsaHandle {
7131 snd_pcm_t *handles[2];
7132 bool synchronized;
7133 bool xrun[2];
7134 pthread_cond_t runnable_cv;
7135 bool runnable;
7136
AlsaHandleAlsaHandle7137 AlsaHandle()
7138 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7139 };
7140
7141 static void *alsaCallbackHandler( void * ptr );
7142
RtApiAlsa()7143 RtApiAlsa :: RtApiAlsa()
7144 {
7145 // Nothing to do here.
7146 }
7147
~RtApiAlsa()7148 RtApiAlsa :: ~RtApiAlsa()
7149 {
7150 if ( stream_.state != STREAM_CLOSED ) closeStream();
7151 }
7152
getDeviceCount(void)7153 unsigned int RtApiAlsa :: getDeviceCount( void )
7154 {
7155 unsigned nDevices = 0;
7156 int result, subdevice, card;
7157 char name[64];
7158 snd_ctl_t *handle = 0;
7159
7160 // Count cards and devices
7161 card = -1;
7162 snd_card_next( &card );
7163 while ( card >= 0 ) {
7164 sprintf( name, "hw:%d", card );
7165 result = snd_ctl_open( &handle, name, 0 );
7166 if ( result < 0 ) {
7167 handle = 0;
7168 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7169 errorText_ = errorStream_.str();
7170 error( RtAudioError::WARNING );
7171 goto nextcard;
7172 }
7173 subdevice = -1;
7174 while( 1 ) {
7175 result = snd_ctl_pcm_next_device( handle, &subdevice );
7176 if ( result < 0 ) {
7177 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7178 errorText_ = errorStream_.str();
7179 error( RtAudioError::WARNING );
7180 break;
7181 }
7182 if ( subdevice < 0 )
7183 break;
7184 nDevices++;
7185 }
7186 nextcard:
7187 if ( handle )
7188 snd_ctl_close( handle );
7189 snd_card_next( &card );
7190 }
7191
7192 result = snd_ctl_open( &handle, "default", 0 );
7193 if (result == 0) {
7194 nDevices++;
7195 snd_ctl_close( handle );
7196 }
7197
7198 return nDevices;
7199 }
7200
getDeviceInfo(unsigned int device)7201 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7202 {
7203 RtAudio::DeviceInfo info;
7204 info.probed = false;
7205
7206 unsigned nDevices = 0;
7207 int result, subdevice, card;
7208 char name[64];
7209 snd_ctl_t *chandle = 0;
7210
7211 // Count cards and devices
7212 card = -1;
7213 subdevice = -1;
7214 snd_card_next( &card );
7215 while ( card >= 0 ) {
7216 sprintf( name, "hw:%d", card );
7217 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7218 if ( result < 0 ) {
7219 chandle = 0;
7220 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7221 errorText_ = errorStream_.str();
7222 error( RtAudioError::WARNING );
7223 goto nextcard;
7224 }
7225 subdevice = -1;
7226 while( 1 ) {
7227 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7228 if ( result < 0 ) {
7229 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7230 errorText_ = errorStream_.str();
7231 error( RtAudioError::WARNING );
7232 break;
7233 }
7234 if ( subdevice < 0 ) break;
7235 if ( nDevices == device ) {
7236 sprintf( name, "hw:%d,%d", card, subdevice );
7237 goto foundDevice;
7238 }
7239 nDevices++;
7240 }
7241 nextcard:
7242 if ( chandle )
7243 snd_ctl_close( chandle );
7244 snd_card_next( &card );
7245 }
7246
7247 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7248 if ( result == 0 ) {
7249 if ( nDevices == device ) {
7250 strcpy( name, "default" );
7251 goto foundDevice;
7252 }
7253 nDevices++;
7254 }
7255
7256 if ( nDevices == 0 ) {
7257 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7258 error( RtAudioError::INVALID_USE );
7259 return info;
7260 }
7261
7262 if ( device >= nDevices ) {
7263 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7264 error( RtAudioError::INVALID_USE );
7265 return info;
7266 }
7267
7268 foundDevice:
7269
7270 // If a stream is already open, we cannot probe the stream devices.
7271 // Thus, use the saved results.
7272 if ( stream_.state != STREAM_CLOSED &&
7273 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7274 snd_ctl_close( chandle );
7275 if ( device >= devices_.size() ) {
7276 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7277 error( RtAudioError::WARNING );
7278 return info;
7279 }
7280 return devices_[ device ];
7281 }
7282
7283 int openMode = SND_PCM_ASYNC;
7284 snd_pcm_stream_t stream;
7285 snd_pcm_info_t *pcminfo;
7286 snd_pcm_info_alloca( &pcminfo );
7287 snd_pcm_t *phandle;
7288 snd_pcm_hw_params_t *params;
7289 snd_pcm_hw_params_alloca( ¶ms );
7290
7291 // First try for playback unless default device (which has subdev -1)
7292 stream = SND_PCM_STREAM_PLAYBACK;
7293 snd_pcm_info_set_stream( pcminfo, stream );
7294 if ( subdevice != -1 ) {
7295 snd_pcm_info_set_device( pcminfo, subdevice );
7296 snd_pcm_info_set_subdevice( pcminfo, 0 );
7297
7298 result = snd_ctl_pcm_info( chandle, pcminfo );
7299 if ( result < 0 ) {
7300 // Device probably doesn't support playback.
7301 goto captureProbe;
7302 }
7303 }
7304
7305 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7306 if ( result < 0 ) {
7307 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7309 error( RtAudioError::WARNING );
7310 goto captureProbe;
7311 }
7312
7313 // The device is open ... fill the parameter structure.
7314 result = snd_pcm_hw_params_any( phandle, params );
7315 if ( result < 0 ) {
7316 snd_pcm_close( phandle );
7317 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7318 errorText_ = errorStream_.str();
7319 error( RtAudioError::WARNING );
7320 goto captureProbe;
7321 }
7322
7323 // Get output channel information.
7324 unsigned int value;
7325 result = snd_pcm_hw_params_get_channels_max( params, &value );
7326 if ( result < 0 ) {
7327 snd_pcm_close( phandle );
7328 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7329 errorText_ = errorStream_.str();
7330 error( RtAudioError::WARNING );
7331 goto captureProbe;
7332 }
7333 info.outputChannels = value;
7334 snd_pcm_close( phandle );
7335
7336 captureProbe:
7337 stream = SND_PCM_STREAM_CAPTURE;
7338 snd_pcm_info_set_stream( pcminfo, stream );
7339
7340 // Now try for capture unless default device (with subdev = -1)
7341 if ( subdevice != -1 ) {
7342 result = snd_ctl_pcm_info( chandle, pcminfo );
7343 snd_ctl_close( chandle );
7344 if ( result < 0 ) {
7345 // Device probably doesn't support capture.
7346 if ( info.outputChannels == 0 ) return info;
7347 goto probeParameters;
7348 }
7349 }
7350 else
7351 snd_ctl_close( chandle );
7352
7353 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7354 if ( result < 0 ) {
7355 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7356 errorText_ = errorStream_.str();
7357 error( RtAudioError::WARNING );
7358 if ( info.outputChannels == 0 ) return info;
7359 goto probeParameters;
7360 }
7361
7362 // The device is open ... fill the parameter structure.
7363 result = snd_pcm_hw_params_any( phandle, params );
7364 if ( result < 0 ) {
7365 snd_pcm_close( phandle );
7366 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7367 errorText_ = errorStream_.str();
7368 error( RtAudioError::WARNING );
7369 if ( info.outputChannels == 0 ) return info;
7370 goto probeParameters;
7371 }
7372
7373 result = snd_pcm_hw_params_get_channels_max( params, &value );
7374 if ( result < 0 ) {
7375 snd_pcm_close( phandle );
7376 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7378 error( RtAudioError::WARNING );
7379 if ( info.outputChannels == 0 ) return info;
7380 goto probeParameters;
7381 }
7382 info.inputChannels = value;
7383 snd_pcm_close( phandle );
7384
7385 // If device opens for both playback and capture, we determine the channels.
7386 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7387 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7388
7389 // ALSA doesn't provide default devices so we'll use the first available one.
7390 if ( device == 0 && info.outputChannels > 0 )
7391 info.isDefaultOutput = true;
7392 if ( device == 0 && info.inputChannels > 0 )
7393 info.isDefaultInput = true;
7394
7395 probeParameters:
7396 // At this point, we just need to figure out the supported data
7397 // formats and sample rates. We'll proceed by opening the device in
7398 // the direction with the maximum number of channels, or playback if
7399 // they are equal. This might limit our sample rate options, but so
7400 // be it.
7401
7402 if ( info.outputChannels >= info.inputChannels )
7403 stream = SND_PCM_STREAM_PLAYBACK;
7404 else
7405 stream = SND_PCM_STREAM_CAPTURE;
7406 snd_pcm_info_set_stream( pcminfo, stream );
7407
7408 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7409 if ( result < 0 ) {
7410 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7411 errorText_ = errorStream_.str();
7412 error( RtAudioError::WARNING );
7413 return info;
7414 }
7415
7416 // The device is open ... fill the parameter structure.
7417 result = snd_pcm_hw_params_any( phandle, params );
7418 if ( result < 0 ) {
7419 snd_pcm_close( phandle );
7420 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7421 errorText_ = errorStream_.str();
7422 error( RtAudioError::WARNING );
7423 return info;
7424 }
7425
7426 // Test our discrete set of sample rate values.
7427 info.sampleRates.clear();
7428 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7429 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7430 info.sampleRates.push_back( SAMPLE_RATES[i] );
7431
7432 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7433 info.preferredSampleRate = SAMPLE_RATES[i];
7434 }
7435 }
7436 if ( info.sampleRates.size() == 0 ) {
7437 snd_pcm_close( phandle );
7438 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7439 errorText_ = errorStream_.str();
7440 error( RtAudioError::WARNING );
7441 return info;
7442 }
7443
7444 // Probe the supported data formats ... we don't care about endian-ness just yet
7445 snd_pcm_format_t format;
7446 info.nativeFormats = 0;
7447 format = SND_PCM_FORMAT_S8;
7448 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7449 info.nativeFormats |= RTAUDIO_SINT8;
7450 format = SND_PCM_FORMAT_S16;
7451 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7452 info.nativeFormats |= RTAUDIO_SINT16;
7453 format = SND_PCM_FORMAT_S24;
7454 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7455 info.nativeFormats |= RTAUDIO_SINT24;
7456 format = SND_PCM_FORMAT_S32;
7457 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7458 info.nativeFormats |= RTAUDIO_SINT32;
7459 format = SND_PCM_FORMAT_FLOAT;
7460 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7461 info.nativeFormats |= RTAUDIO_FLOAT32;
7462 format = SND_PCM_FORMAT_FLOAT64;
7463 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7464 info.nativeFormats |= RTAUDIO_FLOAT64;
7465
7466 // Check that we have at least one supported format
7467 if ( info.nativeFormats == 0 ) {
7468 snd_pcm_close( phandle );
7469 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7470 errorText_ = errorStream_.str();
7471 error( RtAudioError::WARNING );
7472 return info;
7473 }
7474
7475 // Get the device name
7476 char *cardname;
7477 result = snd_card_get_name( card, &cardname );
7478 if ( result >= 0 ) {
7479 sprintf( name, "hw:%s,%d", cardname, subdevice );
7480 free( cardname );
7481 }
7482 info.name = name;
7483
7484 // That's all ... close the device and return
7485 snd_pcm_close( phandle );
7486 info.probed = true;
7487 return info;
7488 }
7489
saveDeviceInfo(void)7490 void RtApiAlsa :: saveDeviceInfo( void )
7491 {
7492 devices_.clear();
7493
7494 unsigned int nDevices = getDeviceCount();
7495 devices_.resize( nDevices );
7496 for ( unsigned int i=0; i<nDevices; i++ )
7497 devices_[i] = getDeviceInfo( i );
7498 }
7499
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7500 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7501 unsigned int firstChannel, unsigned int sampleRate,
7502 RtAudioFormat format, unsigned int *bufferSize,
7503 RtAudio::StreamOptions *options )
7504
7505 {
7506 #if defined(__RTAUDIO_DEBUG__)
7507 snd_output_t *out;
7508 snd_output_stdio_attach(&out, stderr, 0);
7509 #endif
7510
7511 // I'm not using the "plug" interface ... too much inconsistent behavior.
7512
7513 unsigned nDevices = 0;
7514 int result, subdevice, card;
7515 char name[64];
7516 snd_ctl_t *chandle;
7517
7518 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7519 snprintf(name, sizeof(name), "%s", "default");
7520 else {
7521 // Count cards and devices
7522 card = -1;
7523 snd_card_next( &card );
7524 while ( card >= 0 ) {
7525 sprintf( name, "hw:%d", card );
7526 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7527 if ( result < 0 ) {
7528 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7529 errorText_ = errorStream_.str();
7530 return FAILURE;
7531 }
7532 subdevice = -1;
7533 while( 1 ) {
7534 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7535 if ( result < 0 ) break;
7536 if ( subdevice < 0 ) break;
7537 if ( nDevices == device ) {
7538 sprintf( name, "hw:%d,%d", card, subdevice );
7539 snd_ctl_close( chandle );
7540 goto foundDevice;
7541 }
7542 nDevices++;
7543 }
7544 snd_ctl_close( chandle );
7545 snd_card_next( &card );
7546 }
7547
7548 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7549 if ( result == 0 ) {
7550 if ( nDevices == device ) {
7551 strcpy( name, "default" );
7552 snd_ctl_close( chandle );
7553 goto foundDevice;
7554 }
7555 nDevices++;
7556 }
7557 snd_ctl_close( chandle );
7558
7559 if ( nDevices == 0 ) {
7560 // This should not happen because a check is made before this function is called.
7561 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7562 return FAILURE;
7563 }
7564
7565 if ( device >= nDevices ) {
7566 // This should not happen because a check is made before this function is called.
7567 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7568 return FAILURE;
7569 }
7570 }
7571
7572 foundDevice:
7573
7574 // The getDeviceInfo() function will not work for a device that is
7575 // already open. Thus, we'll probe the system before opening a
7576 // stream and save the results for use by getDeviceInfo().
7577 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7578 this->saveDeviceInfo();
7579
7580 snd_pcm_stream_t stream;
7581 if ( mode == OUTPUT )
7582 stream = SND_PCM_STREAM_PLAYBACK;
7583 else
7584 stream = SND_PCM_STREAM_CAPTURE;
7585
7586 snd_pcm_t *phandle;
7587 int openMode = SND_PCM_ASYNC;
7588 result = snd_pcm_open( &phandle, name, stream, openMode );
7589 if ( result < 0 ) {
7590 if ( mode == OUTPUT )
7591 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7592 else
7593 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7594 errorText_ = errorStream_.str();
7595 return FAILURE;
7596 }
7597
7598 // Fill the parameter structure.
7599 snd_pcm_hw_params_t *hw_params;
7600 snd_pcm_hw_params_alloca( &hw_params );
7601 result = snd_pcm_hw_params_any( phandle, hw_params );
7602 if ( result < 0 ) {
7603 snd_pcm_close( phandle );
7604 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7605 errorText_ = errorStream_.str();
7606 return FAILURE;
7607 }
7608
7609 #if defined(__RTAUDIO_DEBUG__)
7610 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7611 snd_pcm_hw_params_dump( hw_params, out );
7612 #endif
7613
7614 // Set access ... check user preference.
7615 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7616 stream_.userInterleaved = false;
7617 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7618 if ( result < 0 ) {
7619 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7620 stream_.deviceInterleaved[mode] = true;
7621 }
7622 else
7623 stream_.deviceInterleaved[mode] = false;
7624 }
7625 else {
7626 stream_.userInterleaved = true;
7627 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7628 if ( result < 0 ) {
7629 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7630 stream_.deviceInterleaved[mode] = false;
7631 }
7632 else
7633 stream_.deviceInterleaved[mode] = true;
7634 }
7635
7636 if ( result < 0 ) {
7637 snd_pcm_close( phandle );
7638 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7639 errorText_ = errorStream_.str();
7640 return FAILURE;
7641 }
7642
7643 // Determine how to set the device format.
7644 stream_.userFormat = format;
7645 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7646
7647 if ( format == RTAUDIO_SINT8 )
7648 deviceFormat = SND_PCM_FORMAT_S8;
7649 else if ( format == RTAUDIO_SINT16 )
7650 deviceFormat = SND_PCM_FORMAT_S16;
7651 else if ( format == RTAUDIO_SINT24 )
7652 deviceFormat = SND_PCM_FORMAT_S24;
7653 else if ( format == RTAUDIO_SINT32 )
7654 deviceFormat = SND_PCM_FORMAT_S32;
7655 else if ( format == RTAUDIO_FLOAT32 )
7656 deviceFormat = SND_PCM_FORMAT_FLOAT;
7657 else if ( format == RTAUDIO_FLOAT64 )
7658 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7659
7660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7661 stream_.deviceFormat[mode] = format;
7662 goto setFormat;
7663 }
7664
7665 // The user requested format is not natively supported by the device.
7666 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7667 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7668 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7669 goto setFormat;
7670 }
7671
7672 deviceFormat = SND_PCM_FORMAT_FLOAT;
7673 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7674 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7675 goto setFormat;
7676 }
7677
7678 deviceFormat = SND_PCM_FORMAT_S32;
7679 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7680 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7681 goto setFormat;
7682 }
7683
7684 deviceFormat = SND_PCM_FORMAT_S24;
7685 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7686 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7687 goto setFormat;
7688 }
7689
7690 deviceFormat = SND_PCM_FORMAT_S16;
7691 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7692 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7693 goto setFormat;
7694 }
7695
7696 deviceFormat = SND_PCM_FORMAT_S8;
7697 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7698 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7699 goto setFormat;
7700 }
7701
7702 // If we get here, no supported format was found.
7703 snd_pcm_close( phandle );
7704 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7705 errorText_ = errorStream_.str();
7706 return FAILURE;
7707
7708 setFormat:
7709 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7710 if ( result < 0 ) {
7711 snd_pcm_close( phandle );
7712 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7713 errorText_ = errorStream_.str();
7714 return FAILURE;
7715 }
7716
7717 // Determine whether byte-swaping is necessary.
7718 stream_.doByteSwap[mode] = false;
7719 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7720 result = snd_pcm_format_cpu_endian( deviceFormat );
7721 if ( result == 0 )
7722 stream_.doByteSwap[mode] = true;
7723 else if (result < 0) {
7724 snd_pcm_close( phandle );
7725 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7726 errorText_ = errorStream_.str();
7727 return FAILURE;
7728 }
7729 }
7730
7731 // Set the sample rate.
7732 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7733 if ( result < 0 ) {
7734 snd_pcm_close( phandle );
7735 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7736 errorText_ = errorStream_.str();
7737 return FAILURE;
7738 }
7739
7740 // Determine the number of channels for this device. We support a possible
7741 // minimum device channel number > than the value requested by the user.
7742 stream_.nUserChannels[mode] = channels;
7743 unsigned int value;
7744 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7745 unsigned int deviceChannels = value;
7746 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7747 snd_pcm_close( phandle );
7748 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7749 errorText_ = errorStream_.str();
7750 return FAILURE;
7751 }
7752
7753 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7754 if ( result < 0 ) {
7755 snd_pcm_close( phandle );
7756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7758 return FAILURE;
7759 }
7760 deviceChannels = value;
7761 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7762 stream_.nDeviceChannels[mode] = deviceChannels;
7763
7764 // Set the device channels.
7765 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7766 if ( result < 0 ) {
7767 snd_pcm_close( phandle );
7768 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7769 errorText_ = errorStream_.str();
7770 return FAILURE;
7771 }
7772
7773 // Set the buffer (or period) size.
7774 int dir = 0;
7775 snd_pcm_uframes_t periodSize = *bufferSize;
7776 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7777 if ( result < 0 ) {
7778 snd_pcm_close( phandle );
7779 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7781 return FAILURE;
7782 }
7783 *bufferSize = periodSize;
7784
7785 // Set the buffer number, which in ALSA is referred to as the "period".
7786 unsigned int periods = 0;
7787 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7788 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7789 if ( periods < 2 ) periods = 4; // a fairly safe default value
7790 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7791 if ( result < 0 ) {
7792 snd_pcm_close( phandle );
7793 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7794 errorText_ = errorStream_.str();
7795 return FAILURE;
7796 }
7797
7798 // If attempting to setup a duplex stream, the bufferSize parameter
7799 // MUST be the same in both directions!
7800 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7801 snd_pcm_close( phandle );
7802 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7803 errorText_ = errorStream_.str();
7804 return FAILURE;
7805 }
7806
7807 stream_.bufferSize = *bufferSize;
7808
7809 // Install the hardware configuration
7810 result = snd_pcm_hw_params( phandle, hw_params );
7811 if ( result < 0 ) {
7812 snd_pcm_close( phandle );
7813 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7814 errorText_ = errorStream_.str();
7815 return FAILURE;
7816 }
7817
7818 #if defined(__RTAUDIO_DEBUG__)
7819 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7820 snd_pcm_hw_params_dump( hw_params, out );
7821 #endif
7822
7823 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7824 snd_pcm_sw_params_t *sw_params = NULL;
7825 snd_pcm_sw_params_alloca( &sw_params );
7826 snd_pcm_sw_params_current( phandle, sw_params );
7827 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7828 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7829 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7830
7831 // The following two settings were suggested by Theo Veenker
7832 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7833 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7834
7835 // here are two options for a fix
7836 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7837 snd_pcm_uframes_t val;
7838 snd_pcm_sw_params_get_boundary( sw_params, &val );
7839 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7840
7841 result = snd_pcm_sw_params( phandle, sw_params );
7842 if ( result < 0 ) {
7843 snd_pcm_close( phandle );
7844 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7845 errorText_ = errorStream_.str();
7846 return FAILURE;
7847 }
7848
7849 #if defined(__RTAUDIO_DEBUG__)
7850 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7851 snd_pcm_sw_params_dump( sw_params, out );
7852 #endif
7853
7854 // Set flags for buffer conversion
7855 stream_.doConvertBuffer[mode] = false;
7856 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7857 stream_.doConvertBuffer[mode] = true;
7858 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7859 stream_.doConvertBuffer[mode] = true;
7860 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7861 stream_.nUserChannels[mode] > 1 )
7862 stream_.doConvertBuffer[mode] = true;
7863
7864 // Allocate the ApiHandle if necessary and then save.
7865 AlsaHandle *apiInfo = 0;
7866 if ( stream_.apiHandle == 0 ) {
7867 try {
7868 apiInfo = (AlsaHandle *) new AlsaHandle;
7869 }
7870 catch ( std::bad_alloc& ) {
7871 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7872 goto error;
7873 }
7874
7875 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7876 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7877 goto error;
7878 }
7879
7880 stream_.apiHandle = (void *) apiInfo;
7881 apiInfo->handles[0] = 0;
7882 apiInfo->handles[1] = 0;
7883 }
7884 else {
7885 apiInfo = (AlsaHandle *) stream_.apiHandle;
7886 }
7887 apiInfo->handles[mode] = phandle;
7888 phandle = 0;
7889
7890 // Allocate necessary internal buffers.
7891 unsigned long bufferBytes;
7892 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7893 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7894 if ( stream_.userBuffer[mode] == NULL ) {
7895 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7896 goto error;
7897 }
7898
7899 if ( stream_.doConvertBuffer[mode] ) {
7900
7901 bool makeBuffer = true;
7902 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7903 if ( mode == INPUT ) {
7904 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7905 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7906 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7907 }
7908 }
7909
7910 if ( makeBuffer ) {
7911 bufferBytes *= *bufferSize;
7912 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7913 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7914 if ( stream_.deviceBuffer == NULL ) {
7915 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7916 goto error;
7917 }
7918 }
7919 }
7920
7921 stream_.sampleRate = sampleRate;
7922 stream_.nBuffers = periods;
7923 stream_.device[mode] = device;
7924 stream_.state = STREAM_STOPPED;
7925
7926 // Setup the buffer conversion information structure.
7927 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7928
7929 // Setup thread if necessary.
7930 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7931 // We had already set up an output stream.
7932 stream_.mode = DUPLEX;
7933 // Link the streams if possible.
7934 apiInfo->synchronized = false;
7935 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7936 apiInfo->synchronized = true;
7937 else {
7938 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7939 error( RtAudioError::WARNING );
7940 }
7941 }
7942 else {
7943 stream_.mode = mode;
7944
7945 // Setup callback thread.
7946 stream_.callbackInfo.object = (void *) this;
7947
7948 // Set the thread attributes for joinable and realtime scheduling
7949 // priority (optional). The higher priority will only take affect
7950 // if the program is run as root or suid. Note, under Linux
7951 // processes with CAP_SYS_NICE privilege, a user can change
7952 // scheduling policy and priority (thus need not be root). See
7953 // POSIX "capabilities".
7954 pthread_attr_t attr;
7955 pthread_attr_init( &attr );
7956 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7957 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7958 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7959 stream_.callbackInfo.doRealtime = true;
7960 struct sched_param param;
7961 int priority = options->priority;
7962 int min = sched_get_priority_min( SCHED_RR );
7963 int max = sched_get_priority_max( SCHED_RR );
7964 if ( priority < min ) priority = min;
7965 else if ( priority > max ) priority = max;
7966 param.sched_priority = priority;
7967
7968 // Set the policy BEFORE the priority. Otherwise it fails.
7969 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7970 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7971 // This is definitely required. Otherwise it fails.
7972 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7973 pthread_attr_setschedparam(&attr, ¶m);
7974 }
7975 else
7976 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7977 #else
7978 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7979 #endif
7980
7981 stream_.callbackInfo.isRunning = true;
7982 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7983 pthread_attr_destroy( &attr );
7984 if ( result ) {
7985 // Failed. Try instead with default attributes.
7986 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7987 if ( result ) {
7988 stream_.callbackInfo.isRunning = false;
7989 errorText_ = "RtApiAlsa::error creating callback thread!";
7990 goto error;
7991 }
7992 }
7993 }
7994
7995 return SUCCESS;
7996
7997 error:
7998 if ( apiInfo ) {
7999 pthread_cond_destroy( &apiInfo->runnable_cv );
8000 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8001 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8002 delete apiInfo;
8003 stream_.apiHandle = 0;
8004 }
8005
8006 if ( phandle) snd_pcm_close( phandle );
8007
8008 for ( int i=0; i<2; i++ ) {
8009 if ( stream_.userBuffer[i] ) {
8010 free( stream_.userBuffer[i] );
8011 stream_.userBuffer[i] = 0;
8012 }
8013 }
8014
8015 if ( stream_.deviceBuffer ) {
8016 free( stream_.deviceBuffer );
8017 stream_.deviceBuffer = 0;
8018 }
8019
8020 stream_.state = STREAM_CLOSED;
8021 return FAILURE;
8022 }
8023
closeStream()8024 void RtApiAlsa :: closeStream()
8025 {
8026 if ( stream_.state == STREAM_CLOSED ) {
8027 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8028 error( RtAudioError::WARNING );
8029 return;
8030 }
8031
8032 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8033 stream_.callbackInfo.isRunning = false;
8034 MUTEX_LOCK( &stream_.mutex );
8035 if ( stream_.state == STREAM_STOPPED ) {
8036 apiInfo->runnable = true;
8037 pthread_cond_signal( &apiInfo->runnable_cv );
8038 }
8039 MUTEX_UNLOCK( &stream_.mutex );
8040 pthread_join( stream_.callbackInfo.thread, NULL );
8041
8042 if ( stream_.state == STREAM_RUNNING ) {
8043 stream_.state = STREAM_STOPPED;
8044 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8045 snd_pcm_drop( apiInfo->handles[0] );
8046 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8047 snd_pcm_drop( apiInfo->handles[1] );
8048 }
8049
8050 if ( apiInfo ) {
8051 pthread_cond_destroy( &apiInfo->runnable_cv );
8052 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8053 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8054 delete apiInfo;
8055 stream_.apiHandle = 0;
8056 }
8057
8058 for ( int i=0; i<2; i++ ) {
8059 if ( stream_.userBuffer[i] ) {
8060 free( stream_.userBuffer[i] );
8061 stream_.userBuffer[i] = 0;
8062 }
8063 }
8064
8065 if ( stream_.deviceBuffer ) {
8066 free( stream_.deviceBuffer );
8067 stream_.deviceBuffer = 0;
8068 }
8069
8070 stream_.mode = UNINITIALIZED;
8071 stream_.state = STREAM_CLOSED;
8072 }
8073
startStream()8074 void RtApiAlsa :: startStream()
8075 {
8076 // This method calls snd_pcm_prepare if the device isn't already in that state.
8077
8078 verifyStream();
8079 if ( stream_.state == STREAM_RUNNING ) {
8080 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8081 error( RtAudioError::WARNING );
8082 return;
8083 }
8084
8085 MUTEX_LOCK( &stream_.mutex );
8086
8087 #if defined( HAVE_GETTIMEOFDAY )
8088 gettimeofday( &stream_.lastTickTimestamp, NULL );
8089 #endif
8090
8091 int result = 0;
8092 snd_pcm_state_t state;
8093 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8094 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8095 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8096 state = snd_pcm_state( handle[0] );
8097 if ( state != SND_PCM_STATE_PREPARED ) {
8098 result = snd_pcm_prepare( handle[0] );
8099 if ( result < 0 ) {
8100 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8101 errorText_ = errorStream_.str();
8102 goto unlock;
8103 }
8104 }
8105 }
8106
8107 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8108 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8109 state = snd_pcm_state( handle[1] );
8110 if ( state != SND_PCM_STATE_PREPARED ) {
8111 result = snd_pcm_prepare( handle[1] );
8112 if ( result < 0 ) {
8113 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8114 errorText_ = errorStream_.str();
8115 goto unlock;
8116 }
8117 }
8118 }
8119
8120 stream_.state = STREAM_RUNNING;
8121
8122 unlock:
8123 apiInfo->runnable = true;
8124 pthread_cond_signal( &apiInfo->runnable_cv );
8125 MUTEX_UNLOCK( &stream_.mutex );
8126
8127 if ( result >= 0 ) return;
8128 error( RtAudioError::SYSTEM_ERROR );
8129 }
8130
stopStream()8131 void RtApiAlsa :: stopStream()
8132 {
8133 verifyStream();
8134 if ( stream_.state == STREAM_STOPPED ) {
8135 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8136 error( RtAudioError::WARNING );
8137 return;
8138 }
8139
8140 stream_.state = STREAM_STOPPED;
8141 MUTEX_LOCK( &stream_.mutex );
8142
8143 int result = 0;
8144 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8145 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8146 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8147 if ( apiInfo->synchronized )
8148 result = snd_pcm_drop( handle[0] );
8149 else
8150 result = snd_pcm_drain( handle[0] );
8151 if ( result < 0 ) {
8152 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8153 errorText_ = errorStream_.str();
8154 goto unlock;
8155 }
8156 }
8157
8158 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8159 result = snd_pcm_drop( handle[1] );
8160 if ( result < 0 ) {
8161 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8162 errorText_ = errorStream_.str();
8163 goto unlock;
8164 }
8165 }
8166
8167 unlock:
8168 apiInfo->runnable = false; // fixes high CPU usage when stopped
8169 MUTEX_UNLOCK( &stream_.mutex );
8170
8171 if ( result >= 0 ) return;
8172 error( RtAudioError::SYSTEM_ERROR );
8173 }
8174
abortStream()8175 void RtApiAlsa :: abortStream()
8176 {
8177 verifyStream();
8178 if ( stream_.state == STREAM_STOPPED ) {
8179 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8180 error( RtAudioError::WARNING );
8181 return;
8182 }
8183
8184 stream_.state = STREAM_STOPPED;
8185 MUTEX_LOCK( &stream_.mutex );
8186
8187 int result = 0;
8188 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8189 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8190 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8191 result = snd_pcm_drop( handle[0] );
8192 if ( result < 0 ) {
8193 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8194 errorText_ = errorStream_.str();
8195 goto unlock;
8196 }
8197 }
8198
8199 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8200 result = snd_pcm_drop( handle[1] );
8201 if ( result < 0 ) {
8202 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8203 errorText_ = errorStream_.str();
8204 goto unlock;
8205 }
8206 }
8207
8208 unlock:
8209 apiInfo->runnable = false; // fixes high CPU usage when stopped
8210 MUTEX_UNLOCK( &stream_.mutex );
8211
8212 if ( result >= 0 ) return;
8213 error( RtAudioError::SYSTEM_ERROR );
8214 }
8215
callbackEvent()8216 void RtApiAlsa :: callbackEvent()
8217 {
8218 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8219 if ( stream_.state == STREAM_STOPPED ) {
8220 MUTEX_LOCK( &stream_.mutex );
8221 while ( !apiInfo->runnable )
8222 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8223
8224 if ( stream_.state != STREAM_RUNNING ) {
8225 MUTEX_UNLOCK( &stream_.mutex );
8226 return;
8227 }
8228 MUTEX_UNLOCK( &stream_.mutex );
8229 }
8230
8231 if ( stream_.state == STREAM_CLOSED ) {
8232 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8233 error( RtAudioError::WARNING );
8234 return;
8235 }
8236
8237 int doStopStream = 0;
8238 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8239 double streamTime = getStreamTime();
8240 RtAudioStreamStatus status = 0;
8241 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8242 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8243 apiInfo->xrun[0] = false;
8244 }
8245 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8246 status |= RTAUDIO_INPUT_OVERFLOW;
8247 apiInfo->xrun[1] = false;
8248 }
8249 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8250 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8251
8252 if ( doStopStream == 2 ) {
8253 abortStream();
8254 return;
8255 }
8256
8257 MUTEX_LOCK( &stream_.mutex );
8258
8259 // The state might change while waiting on a mutex.
8260 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8261
8262 int result;
8263 char *buffer;
8264 int channels;
8265 snd_pcm_t **handle;
8266 snd_pcm_sframes_t frames;
8267 RtAudioFormat format;
8268 handle = (snd_pcm_t **) apiInfo->handles;
8269
8270 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8271
8272 // Setup parameters.
8273 if ( stream_.doConvertBuffer[1] ) {
8274 buffer = stream_.deviceBuffer;
8275 channels = stream_.nDeviceChannels[1];
8276 format = stream_.deviceFormat[1];
8277 }
8278 else {
8279 buffer = stream_.userBuffer[1];
8280 channels = stream_.nUserChannels[1];
8281 format = stream_.userFormat;
8282 }
8283
8284 // Read samples from device in interleaved/non-interleaved format.
8285 if ( stream_.deviceInterleaved[1] )
8286 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8287 else {
8288 void *bufs[channels];
8289 size_t offset = stream_.bufferSize * formatBytes( format );
8290 for ( int i=0; i<channels; i++ )
8291 bufs[i] = (void *) (buffer + (i * offset));
8292 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8293 }
8294
8295 if ( result < (int) stream_.bufferSize ) {
8296 // Either an error or overrun occured.
8297 if ( result == -EPIPE ) {
8298 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8299 if ( state == SND_PCM_STATE_XRUN ) {
8300 apiInfo->xrun[1] = true;
8301 result = snd_pcm_prepare( handle[1] );
8302 if ( result < 0 ) {
8303 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8304 errorText_ = errorStream_.str();
8305 }
8306 }
8307 else {
8308 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8309 errorText_ = errorStream_.str();
8310 }
8311 }
8312 else {
8313 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8314 errorText_ = errorStream_.str();
8315 }
8316 error( RtAudioError::WARNING );
8317 goto tryOutput;
8318 }
8319
8320 // Do byte swapping if necessary.
8321 if ( stream_.doByteSwap[1] )
8322 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8323
8324 // Do buffer conversion if necessary.
8325 if ( stream_.doConvertBuffer[1] )
8326 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8327
8328 // Check stream latency
8329 result = snd_pcm_delay( handle[1], &frames );
8330 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8331 }
8332
8333 tryOutput:
8334
8335 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8336
8337 // Setup parameters and do buffer conversion if necessary.
8338 if ( stream_.doConvertBuffer[0] ) {
8339 buffer = stream_.deviceBuffer;
8340 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8341 channels = stream_.nDeviceChannels[0];
8342 format = stream_.deviceFormat[0];
8343 }
8344 else {
8345 buffer = stream_.userBuffer[0];
8346 channels = stream_.nUserChannels[0];
8347 format = stream_.userFormat;
8348 }
8349
8350 // Do byte swapping if necessary.
8351 if ( stream_.doByteSwap[0] )
8352 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8353
8354 // Write samples to device in interleaved/non-interleaved format.
8355 if ( stream_.deviceInterleaved[0] )
8356 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8357 else {
8358 void *bufs[channels];
8359 size_t offset = stream_.bufferSize * formatBytes( format );
8360 for ( int i=0; i<channels; i++ )
8361 bufs[i] = (void *) (buffer + (i * offset));
8362 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8363 }
8364
8365 if ( result < (int) stream_.bufferSize ) {
8366 // Either an error or underrun occured.
8367 if ( result == -EPIPE ) {
8368 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8369 if ( state == SND_PCM_STATE_XRUN ) {
8370 apiInfo->xrun[0] = true;
8371 result = snd_pcm_prepare( handle[0] );
8372 if ( result < 0 ) {
8373 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8374 errorText_ = errorStream_.str();
8375 }
8376 else
8377 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8378 }
8379 else {
8380 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8381 errorText_ = errorStream_.str();
8382 }
8383 }
8384 else {
8385 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8386 errorText_ = errorStream_.str();
8387 }
8388 error( RtAudioError::WARNING );
8389 goto unlock;
8390 }
8391
8392 // Check stream latency
8393 result = snd_pcm_delay( handle[0], &frames );
8394 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8395 }
8396
8397 unlock:
8398 MUTEX_UNLOCK( &stream_.mutex );
8399
8400 RtApi::tickStreamTime();
8401 if ( doStopStream == 1 ) this->stopStream();
8402 }
8403
alsaCallbackHandler(void * ptr)8404 static void *alsaCallbackHandler( void *ptr )
8405 {
8406 CallbackInfo *info = (CallbackInfo *) ptr;
8407 RtApiAlsa *object = (RtApiAlsa *) info->object;
8408 bool *isRunning = &info->isRunning;
8409
8410 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8411 if ( info->doRealtime ) {
8412 std::cerr << "RtAudio alsa: " <<
8413 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8414 "running realtime scheduling" << std::endl;
8415 }
8416 #endif
8417
8418 while ( *isRunning == true ) {
8419 pthread_testcancel();
8420 object->callbackEvent();
8421 }
8422
8423 pthread_exit( NULL );
8424 }
8425
8426 //******************** End of __LINUX_ALSA__ *********************//
8427 #endif
8428
8429 #if defined(__LINUX_PULSE__)
8430
8431 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8432 // and Tristan Matthews.
8433
8434 #include <pulse/error.h>
8435 #include <pulse/simple.h>
8436 #include <cstdio>
8437
8438 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8439 44100, 48000, 96000, 0};
8440
8441 struct rtaudio_pa_format_mapping_t {
8442 RtAudioFormat rtaudio_format;
8443 pa_sample_format_t pa_format;
8444 };
8445
8446 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8447 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8448 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8449 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8450 {0, PA_SAMPLE_INVALID}};
8451
8452 struct PulseAudioHandle {
8453 pa_simple *s_play;
8454 pa_simple *s_rec;
8455 pthread_t thread;
8456 pthread_cond_t runnable_cv;
8457 bool runnable;
PulseAudioHandlePulseAudioHandle8458 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8459 };
8460
~RtApiPulse()8461 RtApiPulse::~RtApiPulse()
8462 {
8463 if ( stream_.state != STREAM_CLOSED )
8464 closeStream();
8465 }
8466
getDeviceCount(void)8467 unsigned int RtApiPulse::getDeviceCount( void )
8468 {
8469 return 1;
8470 }
8471
getDeviceInfo(unsigned int)8472 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8473 {
8474 RtAudio::DeviceInfo info;
8475 info.probed = true;
8476 info.name = "PulseAudio";
8477 info.outputChannels = 2;
8478 info.inputChannels = 2;
8479 info.duplexChannels = 2;
8480 info.isDefaultOutput = true;
8481 info.isDefaultInput = true;
8482
8483 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8484 info.sampleRates.push_back( *sr );
8485
8486 info.preferredSampleRate = 48000;
8487 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8488
8489 return info;
8490 }
8491
pulseaudio_callback(void * user)8492 static void *pulseaudio_callback( void * user )
8493 {
8494 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8495 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8496 volatile bool *isRunning = &cbi->isRunning;
8497
8498 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8499 if (cbi->doRealtime) {
8500 std::cerr << "RtAudio pulse: " <<
8501 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8502 "running realtime scheduling" << std::endl;
8503 }
8504 #endif
8505
8506 while ( *isRunning ) {
8507 pthread_testcancel();
8508 context->callbackEvent();
8509 }
8510
8511 pthread_exit( NULL );
8512 }
8513
closeStream(void)8514 void RtApiPulse::closeStream( void )
8515 {
8516 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8517
8518 stream_.callbackInfo.isRunning = false;
8519 if ( pah ) {
8520 MUTEX_LOCK( &stream_.mutex );
8521 if ( stream_.state == STREAM_STOPPED ) {
8522 pah->runnable = true;
8523 pthread_cond_signal( &pah->runnable_cv );
8524 }
8525 MUTEX_UNLOCK( &stream_.mutex );
8526
8527 pthread_join( pah->thread, 0 );
8528 if ( pah->s_play ) {
8529 pa_simple_flush( pah->s_play, NULL );
8530 pa_simple_free( pah->s_play );
8531 }
8532 if ( pah->s_rec )
8533 pa_simple_free( pah->s_rec );
8534
8535 pthread_cond_destroy( &pah->runnable_cv );
8536 delete pah;
8537 stream_.apiHandle = 0;
8538 }
8539
8540 if ( stream_.userBuffer[0] ) {
8541 free( stream_.userBuffer[0] );
8542 stream_.userBuffer[0] = 0;
8543 }
8544 if ( stream_.userBuffer[1] ) {
8545 free( stream_.userBuffer[1] );
8546 stream_.userBuffer[1] = 0;
8547 }
8548
8549 stream_.state = STREAM_CLOSED;
8550 stream_.mode = UNINITIALIZED;
8551 }
8552
callbackEvent(void)8553 void RtApiPulse::callbackEvent( void )
8554 {
8555 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8556
8557 if ( stream_.state == STREAM_STOPPED ) {
8558 MUTEX_LOCK( &stream_.mutex );
8559 while ( !pah->runnable )
8560 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8561
8562 if ( stream_.state != STREAM_RUNNING ) {
8563 MUTEX_UNLOCK( &stream_.mutex );
8564 return;
8565 }
8566 MUTEX_UNLOCK( &stream_.mutex );
8567 }
8568
8569 if ( stream_.state == STREAM_CLOSED ) {
8570 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8571 "this shouldn't happen!";
8572 error( RtAudioError::WARNING );
8573 return;
8574 }
8575
8576 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8577 double streamTime = getStreamTime();
8578 RtAudioStreamStatus status = 0;
8579 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8580 stream_.bufferSize, streamTime, status,
8581 stream_.callbackInfo.userData );
8582
8583 if ( doStopStream == 2 ) {
8584 abortStream();
8585 return;
8586 }
8587
8588 MUTEX_LOCK( &stream_.mutex );
8589 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8590 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8591
8592 if ( stream_.state != STREAM_RUNNING )
8593 goto unlock;
8594
8595 int pa_error;
8596 size_t bytes;
8597 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8598 if ( stream_.doConvertBuffer[OUTPUT] ) {
8599 convertBuffer( stream_.deviceBuffer,
8600 stream_.userBuffer[OUTPUT],
8601 stream_.convertInfo[OUTPUT] );
8602 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8603 formatBytes( stream_.deviceFormat[OUTPUT] );
8604 } else
8605 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8606 formatBytes( stream_.userFormat );
8607
8608 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8609 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8610 pa_strerror( pa_error ) << ".";
8611 errorText_ = errorStream_.str();
8612 error( RtAudioError::WARNING );
8613 }
8614 }
8615
8616 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8617 if ( stream_.doConvertBuffer[INPUT] )
8618 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8619 formatBytes( stream_.deviceFormat[INPUT] );
8620 else
8621 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8622 formatBytes( stream_.userFormat );
8623
8624 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8625 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8626 pa_strerror( pa_error ) << ".";
8627 errorText_ = errorStream_.str();
8628 error( RtAudioError::WARNING );
8629 }
8630 if ( stream_.doConvertBuffer[INPUT] ) {
8631 convertBuffer( stream_.userBuffer[INPUT],
8632 stream_.deviceBuffer,
8633 stream_.convertInfo[INPUT] );
8634 }
8635 }
8636
8637 unlock:
8638 MUTEX_UNLOCK( &stream_.mutex );
8639 RtApi::tickStreamTime();
8640
8641 if ( doStopStream == 1 )
8642 stopStream();
8643 }
8644
startStream(void)8645 void RtApiPulse::startStream( void )
8646 {
8647 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8648
8649 if ( stream_.state == STREAM_CLOSED ) {
8650 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8651 error( RtAudioError::INVALID_USE );
8652 return;
8653 }
8654 if ( stream_.state == STREAM_RUNNING ) {
8655 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8656 error( RtAudioError::WARNING );
8657 return;
8658 }
8659
8660 MUTEX_LOCK( &stream_.mutex );
8661
8662 #if defined( HAVE_GETTIMEOFDAY )
8663 gettimeofday( &stream_.lastTickTimestamp, NULL );
8664 #endif
8665
8666 stream_.state = STREAM_RUNNING;
8667
8668 pah->runnable = true;
8669 pthread_cond_signal( &pah->runnable_cv );
8670 MUTEX_UNLOCK( &stream_.mutex );
8671 }
8672
stopStream(void)8673 void RtApiPulse::stopStream( void )
8674 {
8675 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8676
8677 if ( stream_.state == STREAM_CLOSED ) {
8678 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8679 error( RtAudioError::INVALID_USE );
8680 return;
8681 }
8682 if ( stream_.state == STREAM_STOPPED ) {
8683 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8684 error( RtAudioError::WARNING );
8685 return;
8686 }
8687
8688 stream_.state = STREAM_STOPPED;
8689 MUTEX_LOCK( &stream_.mutex );
8690
8691 if ( pah && pah->s_play ) {
8692 int pa_error;
8693 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8694 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8695 pa_strerror( pa_error ) << ".";
8696 errorText_ = errorStream_.str();
8697 MUTEX_UNLOCK( &stream_.mutex );
8698 error( RtAudioError::SYSTEM_ERROR );
8699 return;
8700 }
8701 }
8702
8703 stream_.state = STREAM_STOPPED;
8704 MUTEX_UNLOCK( &stream_.mutex );
8705 }
8706
abortStream(void)8707 void RtApiPulse::abortStream( void )
8708 {
8709 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8710
8711 if ( stream_.state == STREAM_CLOSED ) {
8712 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8713 error( RtAudioError::INVALID_USE );
8714 return;
8715 }
8716 if ( stream_.state == STREAM_STOPPED ) {
8717 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8718 error( RtAudioError::WARNING );
8719 return;
8720 }
8721
8722 stream_.state = STREAM_STOPPED;
8723 MUTEX_LOCK( &stream_.mutex );
8724
8725 if ( pah && pah->s_play ) {
8726 int pa_error;
8727 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8728 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8729 pa_strerror( pa_error ) << ".";
8730 errorText_ = errorStream_.str();
8731 MUTEX_UNLOCK( &stream_.mutex );
8732 error( RtAudioError::SYSTEM_ERROR );
8733 return;
8734 }
8735 }
8736
8737 stream_.state = STREAM_STOPPED;
8738 MUTEX_UNLOCK( &stream_.mutex );
8739 }
8740
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8741 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8742 unsigned int channels, unsigned int firstChannel,
8743 unsigned int sampleRate, RtAudioFormat format,
8744 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8745 {
8746 PulseAudioHandle *pah = 0;
8747 unsigned long bufferBytes = 0;
8748 pa_sample_spec ss;
8749
8750 if ( device != 0 ) return false;
8751 if ( mode != INPUT && mode != OUTPUT ) return false;
8752 if ( channels != 1 && channels != 2 ) {
8753 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8754 return false;
8755 }
8756 ss.channels = channels;
8757
8758 if ( firstChannel != 0 ) return false;
8759
8760 bool sr_found = false;
8761 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8762 if ( sampleRate == *sr ) {
8763 sr_found = true;
8764 stream_.sampleRate = sampleRate;
8765 ss.rate = sampleRate;
8766 break;
8767 }
8768 }
8769 if ( !sr_found ) {
8770 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8771 return false;
8772 }
8773
8774 bool sf_found = 0;
8775 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8776 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8777 if ( format == sf->rtaudio_format ) {
8778 sf_found = true;
8779 stream_.userFormat = sf->rtaudio_format;
8780 stream_.deviceFormat[mode] = stream_.userFormat;
8781 ss.format = sf->pa_format;
8782 break;
8783 }
8784 }
8785 if ( !sf_found ) { // Use internal data format conversion.
8786 stream_.userFormat = format;
8787 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8788 ss.format = PA_SAMPLE_FLOAT32LE;
8789 }
8790
8791 // Set other stream parameters.
8792 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8793 else stream_.userInterleaved = true;
8794 stream_.deviceInterleaved[mode] = true;
8795 stream_.nBuffers = 1;
8796 stream_.doByteSwap[mode] = false;
8797 stream_.nUserChannels[mode] = channels;
8798 stream_.nDeviceChannels[mode] = channels + firstChannel;
8799 stream_.channelOffset[mode] = 0;
8800 std::string streamName = "RtAudio";
8801
8802 // Set flags for buffer conversion.
8803 stream_.doConvertBuffer[mode] = false;
8804 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8805 stream_.doConvertBuffer[mode] = true;
8806 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8807 stream_.doConvertBuffer[mode] = true;
8808
8809 // Allocate necessary internal buffers.
8810 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8811 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8812 if ( stream_.userBuffer[mode] == NULL ) {
8813 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8814 goto error;
8815 }
8816 stream_.bufferSize = *bufferSize;
8817
8818 if ( stream_.doConvertBuffer[mode] ) {
8819
8820 bool makeBuffer = true;
8821 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8822 if ( mode == INPUT ) {
8823 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8824 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8825 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8826 }
8827 }
8828
8829 if ( makeBuffer ) {
8830 bufferBytes *= *bufferSize;
8831 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8832 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8833 if ( stream_.deviceBuffer == NULL ) {
8834 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8835 goto error;
8836 }
8837 }
8838 }
8839
8840 stream_.device[mode] = device;
8841
8842 // Setup the buffer conversion information structure.
8843 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8844
8845 if ( !stream_.apiHandle ) {
8846 PulseAudioHandle *pah = new PulseAudioHandle;
8847 if ( !pah ) {
8848 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8849 goto error;
8850 }
8851
8852 stream_.apiHandle = pah;
8853 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8854 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8855 goto error;
8856 }
8857 }
8858 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8859
8860 int error;
8861 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8862 switch ( mode ) {
8863 case INPUT:
8864 pa_buffer_attr buffer_attr;
8865 buffer_attr.fragsize = bufferBytes;
8866 buffer_attr.maxlength = -1;
8867
8868 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8869 if ( !pah->s_rec ) {
8870 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8871 goto error;
8872 }
8873 break;
8874 case OUTPUT:
8875 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8876 if ( !pah->s_play ) {
8877 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8878 goto error;
8879 }
8880 break;
8881 default:
8882 goto error;
8883 }
8884
8885 if ( stream_.mode == UNINITIALIZED )
8886 stream_.mode = mode;
8887 else if ( stream_.mode == mode )
8888 goto error;
8889 else
8890 stream_.mode = DUPLEX;
8891
8892 if ( !stream_.callbackInfo.isRunning ) {
8893 stream_.callbackInfo.object = this;
8894
8895 stream_.state = STREAM_STOPPED;
8896 // Set the thread attributes for joinable and realtime scheduling
8897 // priority (optional). The higher priority will only take affect
8898 // if the program is run as root or suid. Note, under Linux
8899 // processes with CAP_SYS_NICE privilege, a user can change
8900 // scheduling policy and priority (thus need not be root). See
8901 // POSIX "capabilities".
8902 pthread_attr_t attr;
8903 pthread_attr_init( &attr );
8904 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8905 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8906 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8907 stream_.callbackInfo.doRealtime = true;
8908 struct sched_param param;
8909 int priority = options->priority;
8910 int min = sched_get_priority_min( SCHED_RR );
8911 int max = sched_get_priority_max( SCHED_RR );
8912 if ( priority < min ) priority = min;
8913 else if ( priority > max ) priority = max;
8914 param.sched_priority = priority;
8915
8916 // Set the policy BEFORE the priority. Otherwise it fails.
8917 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8918 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8919 // This is definitely required. Otherwise it fails.
8920 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8921 pthread_attr_setschedparam(&attr, ¶m);
8922 }
8923 else
8924 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8925 #else
8926 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8927 #endif
8928
8929 stream_.callbackInfo.isRunning = true;
8930 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8931 pthread_attr_destroy(&attr);
8932 if(result != 0) {
8933 // Failed. Try instead with default attributes.
8934 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8935 if(result != 0) {
8936 stream_.callbackInfo.isRunning = false;
8937 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8938 goto error;
8939 }
8940 }
8941 }
8942
8943 return SUCCESS;
8944
8945 error:
8946 if ( pah && stream_.callbackInfo.isRunning ) {
8947 pthread_cond_destroy( &pah->runnable_cv );
8948 delete pah;
8949 stream_.apiHandle = 0;
8950 }
8951
8952 for ( int i=0; i<2; i++ ) {
8953 if ( stream_.userBuffer[i] ) {
8954 free( stream_.userBuffer[i] );
8955 stream_.userBuffer[i] = 0;
8956 }
8957 }
8958
8959 if ( stream_.deviceBuffer ) {
8960 free( stream_.deviceBuffer );
8961 stream_.deviceBuffer = 0;
8962 }
8963
8964 stream_.state = STREAM_CLOSED;
8965 return FAILURE;
8966 }
8967
8968 //******************** End of __LINUX_PULSE__ *********************//
8969 #endif
8970
8971 #if defined(__LINUX_OSS__)
8972
8973 #include <unistd.h>
8974 #include <sys/ioctl.h>
8975 #include <unistd.h>
8976 #include <fcntl.h>
8977 #include <sys/soundcard.h>
8978 #include <errno.h>
8979 #include <math.h>
8980
8981 static void *ossCallbackHandler(void * ptr);
8982
8983 // A structure to hold various information related to the OSS API
8984 // implementation.
8985 struct OssHandle {
8986 int id[2]; // device ids
8987 bool xrun[2];
8988 bool triggered;
8989 pthread_cond_t runnable;
8990
OssHandleOssHandle8991 OssHandle()
8992 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8993 };
8994
RtApiOss()8995 RtApiOss :: RtApiOss()
8996 {
8997 // Nothing to do here.
8998 }
8999
~RtApiOss()9000 RtApiOss :: ~RtApiOss()
9001 {
9002 if ( stream_.state != STREAM_CLOSED ) closeStream();
9003 }
9004
getDeviceCount(void)9005 unsigned int RtApiOss :: getDeviceCount( void )
9006 {
9007 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9008 if ( mixerfd == -1 ) {
9009 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9010 error( RtAudioError::WARNING );
9011 return 0;
9012 }
9013
9014 oss_sysinfo sysinfo;
9015 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9016 close( mixerfd );
9017 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9018 error( RtAudioError::WARNING );
9019 return 0;
9020 }
9021
9022 close( mixerfd );
9023 return sysinfo.numaudios;
9024 }
9025
getDeviceInfo(unsigned int device)9026 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9027 {
9028 RtAudio::DeviceInfo info;
9029 info.probed = false;
9030
9031 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9032 if ( mixerfd == -1 ) {
9033 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9034 error( RtAudioError::WARNING );
9035 return info;
9036 }
9037
9038 oss_sysinfo sysinfo;
9039 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9040 if ( result == -1 ) {
9041 close( mixerfd );
9042 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9043 error( RtAudioError::WARNING );
9044 return info;
9045 }
9046
9047 unsigned nDevices = sysinfo.numaudios;
9048 if ( nDevices == 0 ) {
9049 close( mixerfd );
9050 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9051 error( RtAudioError::INVALID_USE );
9052 return info;
9053 }
9054
9055 if ( device >= nDevices ) {
9056 close( mixerfd );
9057 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9058 error( RtAudioError::INVALID_USE );
9059 return info;
9060 }
9061
9062 oss_audioinfo ainfo;
9063 ainfo.dev = device;
9064 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9065 close( mixerfd );
9066 if ( result == -1 ) {
9067 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9068 errorText_ = errorStream_.str();
9069 error( RtAudioError::WARNING );
9070 return info;
9071 }
9072
9073 // Probe channels
9074 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9075 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9076 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9077 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9078 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9079 }
9080
9081 // Probe data formats ... do for input
9082 unsigned long mask = ainfo.iformats;
9083 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9084 info.nativeFormats |= RTAUDIO_SINT16;
9085 if ( mask & AFMT_S8 )
9086 info.nativeFormats |= RTAUDIO_SINT8;
9087 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9088 info.nativeFormats |= RTAUDIO_SINT32;
9089 #ifdef AFMT_FLOAT
9090 if ( mask & AFMT_FLOAT )
9091 info.nativeFormats |= RTAUDIO_FLOAT32;
9092 #endif
9093 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9094 info.nativeFormats |= RTAUDIO_SINT24;
9095
9096 // Check that we have at least one supported format
9097 if ( info.nativeFormats == 0 ) {
9098 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9099 errorText_ = errorStream_.str();
9100 error( RtAudioError::WARNING );
9101 return info;
9102 }
9103
9104 // Probe the supported sample rates.
9105 info.sampleRates.clear();
9106 if ( ainfo.nrates ) {
9107 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9108 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9109 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9110 info.sampleRates.push_back( SAMPLE_RATES[k] );
9111
9112 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9113 info.preferredSampleRate = SAMPLE_RATES[k];
9114
9115 break;
9116 }
9117 }
9118 }
9119 }
9120 else {
9121 // Check min and max rate values;
9122 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9123 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9124 info.sampleRates.push_back( SAMPLE_RATES[k] );
9125
9126 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9127 info.preferredSampleRate = SAMPLE_RATES[k];
9128 }
9129 }
9130 }
9131
9132 if ( info.sampleRates.size() == 0 ) {
9133 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9134 errorText_ = errorStream_.str();
9135 error( RtAudioError::WARNING );
9136 }
9137 else {
9138 info.probed = true;
9139 info.name = ainfo.name;
9140 }
9141
9142 return info;
9143 }
9144
9145
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)9146 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9147 unsigned int firstChannel, unsigned int sampleRate,
9148 RtAudioFormat format, unsigned int *bufferSize,
9149 RtAudio::StreamOptions *options )
9150 {
9151 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9152 if ( mixerfd == -1 ) {
9153 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9154 return FAILURE;
9155 }
9156
9157 oss_sysinfo sysinfo;
9158 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9159 if ( result == -1 ) {
9160 close( mixerfd );
9161 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9162 return FAILURE;
9163 }
9164
9165 unsigned nDevices = sysinfo.numaudios;
9166 if ( nDevices == 0 ) {
9167 // This should not happen because a check is made before this function is called.
9168 close( mixerfd );
9169 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9170 return FAILURE;
9171 }
9172
9173 if ( device >= nDevices ) {
9174 // This should not happen because a check is made before this function is called.
9175 close( mixerfd );
9176 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9177 return FAILURE;
9178 }
9179
9180 oss_audioinfo ainfo;
9181 ainfo.dev = device;
9182 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9183 close( mixerfd );
9184 if ( result == -1 ) {
9185 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9186 errorText_ = errorStream_.str();
9187 return FAILURE;
9188 }
9189
9190 // Check if device supports input or output
9191 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9192 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9193 if ( mode == OUTPUT )
9194 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9195 else
9196 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9197 errorText_ = errorStream_.str();
9198 return FAILURE;
9199 }
9200
9201 int flags = 0;
9202 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9203 if ( mode == OUTPUT )
9204 flags |= O_WRONLY;
9205 else { // mode == INPUT
9206 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9207 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9208 close( handle->id[0] );
9209 handle->id[0] = 0;
9210 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9211 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9212 errorText_ = errorStream_.str();
9213 return FAILURE;
9214 }
9215 // Check that the number previously set channels is the same.
9216 if ( stream_.nUserChannels[0] != channels ) {
9217 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9218 errorText_ = errorStream_.str();
9219 return FAILURE;
9220 }
9221 flags |= O_RDWR;
9222 }
9223 else
9224 flags |= O_RDONLY;
9225 }
9226
9227 // Set exclusive access if specified.
9228 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9229
9230 // Try to open the device.
9231 int fd;
9232 fd = open( ainfo.devnode, flags, 0 );
9233 if ( fd == -1 ) {
9234 if ( errno == EBUSY )
9235 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9236 else
9237 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9238 errorText_ = errorStream_.str();
9239 return FAILURE;
9240 }
9241
9242 // For duplex operation, specifically set this mode (this doesn't seem to work).
9243 /*
9244 if ( flags | O_RDWR ) {
9245 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9246 if ( result == -1) {
9247 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9248 errorText_ = errorStream_.str();
9249 return FAILURE;
9250 }
9251 }
9252 */
9253
9254 // Check the device channel support.
9255 stream_.nUserChannels[mode] = channels;
9256 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9257 close( fd );
9258 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9259 errorText_ = errorStream_.str();
9260 return FAILURE;
9261 }
9262
9263 // Set the number of channels.
9264 int deviceChannels = channels + firstChannel;
9265 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9266 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9267 close( fd );
9268 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9269 errorText_ = errorStream_.str();
9270 return FAILURE;
9271 }
9272 stream_.nDeviceChannels[mode] = deviceChannels;
9273
9274 // Get the data format mask
9275 int mask;
9276 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9277 if ( result == -1 ) {
9278 close( fd );
9279 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9280 errorText_ = errorStream_.str();
9281 return FAILURE;
9282 }
9283
9284 // Determine how to set the device format.
9285 stream_.userFormat = format;
9286 int deviceFormat = -1;
9287 stream_.doByteSwap[mode] = false;
9288 if ( format == RTAUDIO_SINT8 ) {
9289 if ( mask & AFMT_S8 ) {
9290 deviceFormat = AFMT_S8;
9291 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9292 }
9293 }
9294 else if ( format == RTAUDIO_SINT16 ) {
9295 if ( mask & AFMT_S16_NE ) {
9296 deviceFormat = AFMT_S16_NE;
9297 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9298 }
9299 else if ( mask & AFMT_S16_OE ) {
9300 deviceFormat = AFMT_S16_OE;
9301 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9302 stream_.doByteSwap[mode] = true;
9303 }
9304 }
9305 else if ( format == RTAUDIO_SINT24 ) {
9306 if ( mask & AFMT_S24_NE ) {
9307 deviceFormat = AFMT_S24_NE;
9308 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9309 }
9310 else if ( mask & AFMT_S24_OE ) {
9311 deviceFormat = AFMT_S24_OE;
9312 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9313 stream_.doByteSwap[mode] = true;
9314 }
9315 }
9316 else if ( format == RTAUDIO_SINT32 ) {
9317 if ( mask & AFMT_S32_NE ) {
9318 deviceFormat = AFMT_S32_NE;
9319 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9320 }
9321 else if ( mask & AFMT_S32_OE ) {
9322 deviceFormat = AFMT_S32_OE;
9323 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9324 stream_.doByteSwap[mode] = true;
9325 }
9326 }
9327
9328 if ( deviceFormat == -1 ) {
9329 // The user requested format is not natively supported by the device.
9330 if ( mask & AFMT_S16_NE ) {
9331 deviceFormat = AFMT_S16_NE;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9333 }
9334 else if ( mask & AFMT_S32_NE ) {
9335 deviceFormat = AFMT_S32_NE;
9336 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9337 }
9338 else if ( mask & AFMT_S24_NE ) {
9339 deviceFormat = AFMT_S24_NE;
9340 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9341 }
9342 else if ( mask & AFMT_S16_OE ) {
9343 deviceFormat = AFMT_S16_OE;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9345 stream_.doByteSwap[mode] = true;
9346 }
9347 else if ( mask & AFMT_S32_OE ) {
9348 deviceFormat = AFMT_S32_OE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9350 stream_.doByteSwap[mode] = true;
9351 }
9352 else if ( mask & AFMT_S24_OE ) {
9353 deviceFormat = AFMT_S24_OE;
9354 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9355 stream_.doByteSwap[mode] = true;
9356 }
9357 else if ( mask & AFMT_S8) {
9358 deviceFormat = AFMT_S8;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9360 }
9361 }
9362
9363 if ( stream_.deviceFormat[mode] == 0 ) {
9364 // This really shouldn't happen ...
9365 close( fd );
9366 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9367 errorText_ = errorStream_.str();
9368 return FAILURE;
9369 }
9370
9371 // Set the data format.
9372 int temp = deviceFormat;
9373 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9374 if ( result == -1 || deviceFormat != temp ) {
9375 close( fd );
9376 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9377 errorText_ = errorStream_.str();
9378 return FAILURE;
9379 }
9380
9381 // Attempt to set the buffer size. According to OSS, the minimum
9382 // number of buffers is two. The supposed minimum buffer size is 16
9383 // bytes, so that will be our lower bound. The argument to this
9384 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9385 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9386 // We'll check the actual value used near the end of the setup
9387 // procedure.
9388 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9389 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9390 int buffers = 0;
9391 if ( options ) buffers = options->numberOfBuffers;
9392 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9393 if ( buffers < 2 ) buffers = 3;
9394 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9395 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9396 if ( result == -1 ) {
9397 close( fd );
9398 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9399 errorText_ = errorStream_.str();
9400 return FAILURE;
9401 }
9402 stream_.nBuffers = buffers;
9403
9404 // Save buffer size (in sample frames).
9405 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9406 stream_.bufferSize = *bufferSize;
9407
9408 // Set the sample rate.
9409 int srate = sampleRate;
9410 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9411 if ( result == -1 ) {
9412 close( fd );
9413 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9414 errorText_ = errorStream_.str();
9415 return FAILURE;
9416 }
9417
9418 // Verify the sample rate setup worked.
9419 if ( abs( srate - (int)sampleRate ) > 100 ) {
9420 close( fd );
9421 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9422 errorText_ = errorStream_.str();
9423 return FAILURE;
9424 }
9425 stream_.sampleRate = sampleRate;
9426
9427 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9428 // We're doing duplex setup here.
9429 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9430 stream_.nDeviceChannels[0] = deviceChannels;
9431 }
9432
9433 // Set interleaving parameters.
9434 stream_.userInterleaved = true;
9435 stream_.deviceInterleaved[mode] = true;
9436 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9437 stream_.userInterleaved = false;
9438
9439 // Set flags for buffer conversion
9440 stream_.doConvertBuffer[mode] = false;
9441 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9442 stream_.doConvertBuffer[mode] = true;
9443 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9444 stream_.doConvertBuffer[mode] = true;
9445 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9446 stream_.nUserChannels[mode] > 1 )
9447 stream_.doConvertBuffer[mode] = true;
9448
9449 // Allocate the stream handles if necessary and then save.
9450 if ( stream_.apiHandle == 0 ) {
9451 try {
9452 handle = new OssHandle;
9453 }
9454 catch ( std::bad_alloc& ) {
9455 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9456 goto error;
9457 }
9458
9459 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9460 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9461 goto error;
9462 }
9463
9464 stream_.apiHandle = (void *) handle;
9465 }
9466 else {
9467 handle = (OssHandle *) stream_.apiHandle;
9468 }
9469 handle->id[mode] = fd;
9470
9471 // Allocate necessary internal buffers.
9472 unsigned long bufferBytes;
9473 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9474 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9475 if ( stream_.userBuffer[mode] == NULL ) {
9476 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9477 goto error;
9478 }
9479
9480 if ( stream_.doConvertBuffer[mode] ) {
9481
9482 bool makeBuffer = true;
9483 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9484 if ( mode == INPUT ) {
9485 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9486 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9487 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9488 }
9489 }
9490
9491 if ( makeBuffer ) {
9492 bufferBytes *= *bufferSize;
9493 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9494 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9495 if ( stream_.deviceBuffer == NULL ) {
9496 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9497 goto error;
9498 }
9499 }
9500 }
9501
9502 stream_.device[mode] = device;
9503 stream_.state = STREAM_STOPPED;
9504
9505 // Setup the buffer conversion information structure.
9506 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9507
9508 // Setup thread if necessary.
9509 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9510 // We had already set up an output stream.
9511 stream_.mode = DUPLEX;
9512 if ( stream_.device[0] == device ) handle->id[0] = fd;
9513 }
9514 else {
9515 stream_.mode = mode;
9516
9517 // Setup callback thread.
9518 stream_.callbackInfo.object = (void *) this;
9519
9520 // Set the thread attributes for joinable and realtime scheduling
9521 // priority. The higher priority will only take affect if the
9522 // program is run as root or suid.
9523 pthread_attr_t attr;
9524 pthread_attr_init( &attr );
9525 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9526 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9527 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9528 stream_.callbackInfo.doRealtime = true;
9529 struct sched_param param;
9530 int priority = options->priority;
9531 int min = sched_get_priority_min( SCHED_RR );
9532 int max = sched_get_priority_max( SCHED_RR );
9533 if ( priority < min ) priority = min;
9534 else if ( priority > max ) priority = max;
9535 param.sched_priority = priority;
9536
9537 // Set the policy BEFORE the priority. Otherwise it fails.
9538 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9539 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9540 // This is definitely required. Otherwise it fails.
9541 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9542 pthread_attr_setschedparam(&attr, ¶m);
9543 }
9544 else
9545 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9546 #else
9547 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9548 #endif
9549
9550 stream_.callbackInfo.isRunning = true;
9551 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9552 pthread_attr_destroy( &attr );
9553 if ( result ) {
9554 // Failed. Try instead with default attributes.
9555 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9556 if ( result ) {
9557 stream_.callbackInfo.isRunning = false;
9558 errorText_ = "RtApiOss::error creating callback thread!";
9559 goto error;
9560 }
9561 }
9562 }
9563
9564 return SUCCESS;
9565
9566 error:
9567 if ( handle ) {
9568 pthread_cond_destroy( &handle->runnable );
9569 if ( handle->id[0] ) close( handle->id[0] );
9570 if ( handle->id[1] ) close( handle->id[1] );
9571 delete handle;
9572 stream_.apiHandle = 0;
9573 }
9574
9575 for ( int i=0; i<2; i++ ) {
9576 if ( stream_.userBuffer[i] ) {
9577 free( stream_.userBuffer[i] );
9578 stream_.userBuffer[i] = 0;
9579 }
9580 }
9581
9582 if ( stream_.deviceBuffer ) {
9583 free( stream_.deviceBuffer );
9584 stream_.deviceBuffer = 0;
9585 }
9586
9587 stream_.state = STREAM_CLOSED;
9588 return FAILURE;
9589 }
9590
closeStream()9591 void RtApiOss :: closeStream()
9592 {
9593 if ( stream_.state == STREAM_CLOSED ) {
9594 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9595 error( RtAudioError::WARNING );
9596 return;
9597 }
9598
9599 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9600 stream_.callbackInfo.isRunning = false;
9601 MUTEX_LOCK( &stream_.mutex );
9602 if ( stream_.state == STREAM_STOPPED )
9603 pthread_cond_signal( &handle->runnable );
9604 MUTEX_UNLOCK( &stream_.mutex );
9605 pthread_join( stream_.callbackInfo.thread, NULL );
9606
9607 if ( stream_.state == STREAM_RUNNING ) {
9608 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9609 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9610 else
9611 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9612 stream_.state = STREAM_STOPPED;
9613 }
9614
9615 if ( handle ) {
9616 pthread_cond_destroy( &handle->runnable );
9617 if ( handle->id[0] ) close( handle->id[0] );
9618 if ( handle->id[1] ) close( handle->id[1] );
9619 delete handle;
9620 stream_.apiHandle = 0;
9621 }
9622
9623 for ( int i=0; i<2; i++ ) {
9624 if ( stream_.userBuffer[i] ) {
9625 free( stream_.userBuffer[i] );
9626 stream_.userBuffer[i] = 0;
9627 }
9628 }
9629
9630 if ( stream_.deviceBuffer ) {
9631 free( stream_.deviceBuffer );
9632 stream_.deviceBuffer = 0;
9633 }
9634
9635 stream_.mode = UNINITIALIZED;
9636 stream_.state = STREAM_CLOSED;
9637 }
9638
startStream()9639 void RtApiOss :: startStream()
9640 {
9641 verifyStream();
9642 if ( stream_.state == STREAM_RUNNING ) {
9643 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9644 error( RtAudioError::WARNING );
9645 return;
9646 }
9647
9648 MUTEX_LOCK( &stream_.mutex );
9649
9650 #if defined( HAVE_GETTIMEOFDAY )
9651 gettimeofday( &stream_.lastTickTimestamp, NULL );
9652 #endif
9653
9654 stream_.state = STREAM_RUNNING;
9655
9656 // No need to do anything else here ... OSS automatically starts
9657 // when fed samples.
9658
9659 MUTEX_UNLOCK( &stream_.mutex );
9660
9661 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9662 pthread_cond_signal( &handle->runnable );
9663 }
9664
stopStream()9665 void RtApiOss :: stopStream()
9666 {
9667 verifyStream();
9668 if ( stream_.state == STREAM_STOPPED ) {
9669 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9670 error( RtAudioError::WARNING );
9671 return;
9672 }
9673
9674 MUTEX_LOCK( &stream_.mutex );
9675
9676 // The state might change while waiting on a mutex.
9677 if ( stream_.state == STREAM_STOPPED ) {
9678 MUTEX_UNLOCK( &stream_.mutex );
9679 return;
9680 }
9681
9682 int result = 0;
9683 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9684 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9685
9686 // Flush the output with zeros a few times.
9687 char *buffer;
9688 int samples;
9689 RtAudioFormat format;
9690
9691 if ( stream_.doConvertBuffer[0] ) {
9692 buffer = stream_.deviceBuffer;
9693 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9694 format = stream_.deviceFormat[0];
9695 }
9696 else {
9697 buffer = stream_.userBuffer[0];
9698 samples = stream_.bufferSize * stream_.nUserChannels[0];
9699 format = stream_.userFormat;
9700 }
9701
9702 memset( buffer, 0, samples * formatBytes(format) );
9703 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9704 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9705 if ( result == -1 ) {
9706 errorText_ = "RtApiOss::stopStream: audio write error.";
9707 error( RtAudioError::WARNING );
9708 }
9709 }
9710
9711 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9712 if ( result == -1 ) {
9713 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9714 errorText_ = errorStream_.str();
9715 goto unlock;
9716 }
9717 handle->triggered = false;
9718 }
9719
9720 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9721 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9722 if ( result == -1 ) {
9723 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9724 errorText_ = errorStream_.str();
9725 goto unlock;
9726 }
9727 }
9728
9729 unlock:
9730 stream_.state = STREAM_STOPPED;
9731 MUTEX_UNLOCK( &stream_.mutex );
9732
9733 if ( result != -1 ) return;
9734 error( RtAudioError::SYSTEM_ERROR );
9735 }
9736
abortStream()9737 void RtApiOss :: abortStream()
9738 {
9739 verifyStream();
9740 if ( stream_.state == STREAM_STOPPED ) {
9741 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9742 error( RtAudioError::WARNING );
9743 return;
9744 }
9745
9746 MUTEX_LOCK( &stream_.mutex );
9747
9748 // The state might change while waiting on a mutex.
9749 if ( stream_.state == STREAM_STOPPED ) {
9750 MUTEX_UNLOCK( &stream_.mutex );
9751 return;
9752 }
9753
9754 int result = 0;
9755 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9756 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9757 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9758 if ( result == -1 ) {
9759 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9760 errorText_ = errorStream_.str();
9761 goto unlock;
9762 }
9763 handle->triggered = false;
9764 }
9765
9766 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9767 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9768 if ( result == -1 ) {
9769 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9770 errorText_ = errorStream_.str();
9771 goto unlock;
9772 }
9773 }
9774
9775 unlock:
9776 stream_.state = STREAM_STOPPED;
9777 MUTEX_UNLOCK( &stream_.mutex );
9778
9779 if ( result != -1 ) return;
9780 error( RtAudioError::SYSTEM_ERROR );
9781 }
9782
callbackEvent()9783 void RtApiOss :: callbackEvent()
9784 {
9785 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9786 if ( stream_.state == STREAM_STOPPED ) {
9787 MUTEX_LOCK( &stream_.mutex );
9788 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9789 if ( stream_.state != STREAM_RUNNING ) {
9790 MUTEX_UNLOCK( &stream_.mutex );
9791 return;
9792 }
9793 MUTEX_UNLOCK( &stream_.mutex );
9794 }
9795
9796 if ( stream_.state == STREAM_CLOSED ) {
9797 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9798 error( RtAudioError::WARNING );
9799 return;
9800 }
9801
9802 // Invoke user callback to get fresh output data.
9803 int doStopStream = 0;
9804 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9805 double streamTime = getStreamTime();
9806 RtAudioStreamStatus status = 0;
9807 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9808 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9809 handle->xrun[0] = false;
9810 }
9811 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9812 status |= RTAUDIO_INPUT_OVERFLOW;
9813 handle->xrun[1] = false;
9814 }
9815 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9816 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9817 if ( doStopStream == 2 ) {
9818 this->abortStream();
9819 return;
9820 }
9821
9822 MUTEX_LOCK( &stream_.mutex );
9823
9824 // The state might change while waiting on a mutex.
9825 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9826
9827 int result;
9828 char *buffer;
9829 int samples;
9830 RtAudioFormat format;
9831
9832 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9833
9834 // Setup parameters and do buffer conversion if necessary.
9835 if ( stream_.doConvertBuffer[0] ) {
9836 buffer = stream_.deviceBuffer;
9837 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9838 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9839 format = stream_.deviceFormat[0];
9840 }
9841 else {
9842 buffer = stream_.userBuffer[0];
9843 samples = stream_.bufferSize * stream_.nUserChannels[0];
9844 format = stream_.userFormat;
9845 }
9846
9847 // Do byte swapping if necessary.
9848 if ( stream_.doByteSwap[0] )
9849 byteSwapBuffer( buffer, samples, format );
9850
9851 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9852 int trig = 0;
9853 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9854 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9855 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9856 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9857 handle->triggered = true;
9858 }
9859 else
9860 // Write samples to device.
9861 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9862
9863 if ( result == -1 ) {
9864 // We'll assume this is an underrun, though there isn't a
9865 // specific means for determining that.
9866 handle->xrun[0] = true;
9867 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9868 error( RtAudioError::WARNING );
9869 // Continue on to input section.
9870 }
9871 }
9872
9873 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9874
9875 // Setup parameters.
9876 if ( stream_.doConvertBuffer[1] ) {
9877 buffer = stream_.deviceBuffer;
9878 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9879 format = stream_.deviceFormat[1];
9880 }
9881 else {
9882 buffer = stream_.userBuffer[1];
9883 samples = stream_.bufferSize * stream_.nUserChannels[1];
9884 format = stream_.userFormat;
9885 }
9886
9887 // Read samples from device.
9888 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9889
9890 if ( result == -1 ) {
9891 // We'll assume this is an overrun, though there isn't a
9892 // specific means for determining that.
9893 handle->xrun[1] = true;
9894 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9895 error( RtAudioError::WARNING );
9896 goto unlock;
9897 }
9898
9899 // Do byte swapping if necessary.
9900 if ( stream_.doByteSwap[1] )
9901 byteSwapBuffer( buffer, samples, format );
9902
9903 // Do buffer conversion if necessary.
9904 if ( stream_.doConvertBuffer[1] )
9905 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9906 }
9907
9908 unlock:
9909 MUTEX_UNLOCK( &stream_.mutex );
9910
9911 RtApi::tickStreamTime();
9912 if ( doStopStream == 1 ) this->stopStream();
9913 }
9914
ossCallbackHandler(void * ptr)9915 static void *ossCallbackHandler( void *ptr )
9916 {
9917 CallbackInfo *info = (CallbackInfo *) ptr;
9918 RtApiOss *object = (RtApiOss *) info->object;
9919 bool *isRunning = &info->isRunning;
9920
9921 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9922 if (info->doRealtime) {
9923 std::cerr << "RtAudio oss: " <<
9924 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9925 "running realtime scheduling" << std::endl;
9926 }
9927 #endif
9928
9929 while ( *isRunning == true ) {
9930 pthread_testcancel();
9931 object->callbackEvent();
9932 }
9933
9934 pthread_exit( NULL );
9935 }
9936
9937 //******************** End of __LINUX_OSS__ *********************//
9938 #endif
9939
9940
9941 // *************************************************** //
9942 //
9943 // Protected common (OS-independent) RtAudio methods.
9944 //
9945 // *************************************************** //
9946
9947 // This method can be modified to control the behavior of error
9948 // message printing.
error(RtAudioError::Type type)9949 void RtApi :: error( RtAudioError::Type type )
9950 {
9951 errorStream_.str(""); // clear the ostringstream
9952
9953 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9954 if ( errorCallback ) {
9955 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9956
9957 if ( firstErrorOccurred_ )
9958 return;
9959
9960 firstErrorOccurred_ = true;
9961 const std::string errorMessage = errorText_;
9962
9963 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9964 stream_.callbackInfo.isRunning = false; // exit from the thread
9965 abortStream();
9966 }
9967
9968 errorCallback( type, errorMessage );
9969 firstErrorOccurred_ = false;
9970 return;
9971 }
9972
9973 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9974 std::cerr << '\n' << errorText_ << "\n\n";
9975 else if ( type != RtAudioError::WARNING )
9976 throw( RtAudioError( errorText_, type ) );
9977 }
9978
verifyStream()9979 void RtApi :: verifyStream()
9980 {
9981 if ( stream_.state == STREAM_CLOSED ) {
9982 errorText_ = "RtApi:: a stream is not open!";
9983 error( RtAudioError::INVALID_USE );
9984 }
9985 }
9986
clearStreamInfo()9987 void RtApi :: clearStreamInfo()
9988 {
9989 stream_.mode = UNINITIALIZED;
9990 stream_.state = STREAM_CLOSED;
9991 stream_.sampleRate = 0;
9992 stream_.bufferSize = 0;
9993 stream_.nBuffers = 0;
9994 stream_.userFormat = 0;
9995 stream_.userInterleaved = true;
9996 stream_.streamTime = 0.0;
9997 stream_.apiHandle = 0;
9998 stream_.deviceBuffer = 0;
9999 stream_.callbackInfo.callback = 0;
10000 stream_.callbackInfo.userData = 0;
10001 stream_.callbackInfo.isRunning = false;
10002 stream_.callbackInfo.errorCallback = 0;
10003 for ( int i=0; i<2; i++ ) {
10004 stream_.device[i] = 11111;
10005 stream_.doConvertBuffer[i] = false;
10006 stream_.deviceInterleaved[i] = true;
10007 stream_.doByteSwap[i] = false;
10008 stream_.nUserChannels[i] = 0;
10009 stream_.nDeviceChannels[i] = 0;
10010 stream_.channelOffset[i] = 0;
10011 stream_.deviceFormat[i] = 0;
10012 stream_.latency[i] = 0;
10013 stream_.userBuffer[i] = 0;
10014 stream_.convertInfo[i].channels = 0;
10015 stream_.convertInfo[i].inJump = 0;
10016 stream_.convertInfo[i].outJump = 0;
10017 stream_.convertInfo[i].inFormat = 0;
10018 stream_.convertInfo[i].outFormat = 0;
10019 stream_.convertInfo[i].inOffset.clear();
10020 stream_.convertInfo[i].outOffset.clear();
10021 }
10022 }
10023
formatBytes(RtAudioFormat format)10024 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10025 {
10026 if ( format == RTAUDIO_SINT16 )
10027 return 2;
10028 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10029 return 4;
10030 else if ( format == RTAUDIO_FLOAT64 )
10031 return 8;
10032 else if ( format == RTAUDIO_SINT24 )
10033 return 3;
10034 else if ( format == RTAUDIO_SINT8 )
10035 return 1;
10036
10037 errorText_ = "RtApi::formatBytes: undefined format.";
10038 error( RtAudioError::WARNING );
10039
10040 return 0;
10041 }
10042
setConvertInfo(StreamMode mode,unsigned int firstChannel)10043 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10044 {
10045 if ( mode == INPUT ) { // convert device to user buffer
10046 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10047 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10048 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10049 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10050 }
10051 else { // convert user to device buffer
10052 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10053 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10054 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10055 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10056 }
10057
10058 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10059 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10060 else
10061 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10062
10063 // Set up the interleave/deinterleave offsets.
10064 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10065 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10066 ( mode == INPUT && stream_.userInterleaved ) ) {
10067 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10068 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10069 stream_.convertInfo[mode].outOffset.push_back( k );
10070 stream_.convertInfo[mode].inJump = 1;
10071 }
10072 }
10073 else {
10074 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10075 stream_.convertInfo[mode].inOffset.push_back( k );
10076 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10077 stream_.convertInfo[mode].outJump = 1;
10078 }
10079 }
10080 }
10081 else { // no (de)interleaving
10082 if ( stream_.userInterleaved ) {
10083 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10084 stream_.convertInfo[mode].inOffset.push_back( k );
10085 stream_.convertInfo[mode].outOffset.push_back( k );
10086 }
10087 }
10088 else {
10089 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10090 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10091 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10092 stream_.convertInfo[mode].inJump = 1;
10093 stream_.convertInfo[mode].outJump = 1;
10094 }
10095 }
10096 }
10097
10098 // Add channel offset.
10099 if ( firstChannel > 0 ) {
10100 if ( stream_.deviceInterleaved[mode] ) {
10101 if ( mode == OUTPUT ) {
10102 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10103 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10104 }
10105 else {
10106 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10107 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10108 }
10109 }
10110 else {
10111 if ( mode == OUTPUT ) {
10112 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10113 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10114 }
10115 else {
10116 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10117 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10118 }
10119 }
10120 }
10121 }
10122
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)10123 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10124 {
10125 // This function does format conversion, input/output channel compensation, and
10126 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10127 // the lower three bytes of a 32-bit integer.
10128
10129 // Clear our device buffer when in/out duplex device channels are different
10130 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10131 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10132 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10133
10134 int j;
10135 if (info.outFormat == RTAUDIO_FLOAT64) {
10136 Float64 scale;
10137 Float64 *out = (Float64 *)outBuffer;
10138
10139 if (info.inFormat == RTAUDIO_SINT8) {
10140 signed char *in = (signed char *)inBuffer;
10141 scale = 1.0 / 127.5;
10142 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10143 for (j=0; j<info.channels; j++) {
10144 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10145 out[info.outOffset[j]] += 0.5;
10146 out[info.outOffset[j]] *= scale;
10147 }
10148 in += info.inJump;
10149 out += info.outJump;
10150 }
10151 }
10152 else if (info.inFormat == RTAUDIO_SINT16) {
10153 Int16 *in = (Int16 *)inBuffer;
10154 scale = 1.0 / 32767.5;
10155 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10156 for (j=0; j<info.channels; j++) {
10157 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10158 out[info.outOffset[j]] += 0.5;
10159 out[info.outOffset[j]] *= scale;
10160 }
10161 in += info.inJump;
10162 out += info.outJump;
10163 }
10164 }
10165 else if (info.inFormat == RTAUDIO_SINT24) {
10166 Int24 *in = (Int24 *)inBuffer;
10167 scale = 1.0 / 8388607.5;
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10169 for (j=0; j<info.channels; j++) {
10170 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10171 out[info.outOffset[j]] += 0.5;
10172 out[info.outOffset[j]] *= scale;
10173 }
10174 in += info.inJump;
10175 out += info.outJump;
10176 }
10177 }
10178 else if (info.inFormat == RTAUDIO_SINT32) {
10179 Int32 *in = (Int32 *)inBuffer;
10180 scale = 1.0 / 2147483647.5;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10184 out[info.outOffset[j]] += 0.5;
10185 out[info.outOffset[j]] *= scale;
10186 }
10187 in += info.inJump;
10188 out += info.outJump;
10189 }
10190 }
10191 else if (info.inFormat == RTAUDIO_FLOAT32) {
10192 Float32 *in = (Float32 *)inBuffer;
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10194 for (j=0; j<info.channels; j++) {
10195 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10196 }
10197 in += info.inJump;
10198 out += info.outJump;
10199 }
10200 }
10201 else if (info.inFormat == RTAUDIO_FLOAT64) {
10202 // Channel compensation and/or (de)interleaving only.
10203 Float64 *in = (Float64 *)inBuffer;
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10205 for (j=0; j<info.channels; j++) {
10206 out[info.outOffset[j]] = in[info.inOffset[j]];
10207 }
10208 in += info.inJump;
10209 out += info.outJump;
10210 }
10211 }
10212 }
10213 else if (info.outFormat == RTAUDIO_FLOAT32) {
10214 Float32 scale;
10215 Float32 *out = (Float32 *)outBuffer;
10216
10217 if (info.inFormat == RTAUDIO_SINT8) {
10218 signed char *in = (signed char *)inBuffer;
10219 scale = (Float32) ( 1.0 / 127.5 );
10220 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10221 for (j=0; j<info.channels; j++) {
10222 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10223 out[info.outOffset[j]] += 0.5;
10224 out[info.outOffset[j]] *= scale;
10225 }
10226 in += info.inJump;
10227 out += info.outJump;
10228 }
10229 }
10230 else if (info.inFormat == RTAUDIO_SINT16) {
10231 Int16 *in = (Int16 *)inBuffer;
10232 scale = (Float32) ( 1.0 / 32767.5 );
10233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10234 for (j=0; j<info.channels; j++) {
10235 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10236 out[info.outOffset[j]] += 0.5;
10237 out[info.outOffset[j]] *= scale;
10238 }
10239 in += info.inJump;
10240 out += info.outJump;
10241 }
10242 }
10243 else if (info.inFormat == RTAUDIO_SINT24) {
10244 Int24 *in = (Int24 *)inBuffer;
10245 scale = (Float32) ( 1.0 / 8388607.5 );
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10249 out[info.outOffset[j]] += 0.5;
10250 out[info.outOffset[j]] *= scale;
10251 }
10252 in += info.inJump;
10253 out += info.outJump;
10254 }
10255 }
10256 else if (info.inFormat == RTAUDIO_SINT32) {
10257 Int32 *in = (Int32 *)inBuffer;
10258 scale = (Float32) ( 1.0 / 2147483647.5 );
10259 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10260 for (j=0; j<info.channels; j++) {
10261 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10262 out[info.outOffset[j]] += 0.5;
10263 out[info.outOffset[j]] *= scale;
10264 }
10265 in += info.inJump;
10266 out += info.outJump;
10267 }
10268 }
10269 else if (info.inFormat == RTAUDIO_FLOAT32) {
10270 // Channel compensation and/or (de)interleaving only.
10271 Float32 *in = (Float32 *)inBuffer;
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10273 for (j=0; j<info.channels; j++) {
10274 out[info.outOffset[j]] = in[info.inOffset[j]];
10275 }
10276 in += info.inJump;
10277 out += info.outJump;
10278 }
10279 }
10280 else if (info.inFormat == RTAUDIO_FLOAT64) {
10281 Float64 *in = (Float64 *)inBuffer;
10282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10283 for (j=0; j<info.channels; j++) {
10284 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10285 }
10286 in += info.inJump;
10287 out += info.outJump;
10288 }
10289 }
10290 }
10291 else if (info.outFormat == RTAUDIO_SINT32) {
10292 Int32 *out = (Int32 *)outBuffer;
10293 if (info.inFormat == RTAUDIO_SINT8) {
10294 signed char *in = (signed char *)inBuffer;
10295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10296 for (j=0; j<info.channels; j++) {
10297 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10298 out[info.outOffset[j]] <<= 24;
10299 }
10300 in += info.inJump;
10301 out += info.outJump;
10302 }
10303 }
10304 else if (info.inFormat == RTAUDIO_SINT16) {
10305 Int16 *in = (Int16 *)inBuffer;
10306 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10307 for (j=0; j<info.channels; j++) {
10308 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10309 out[info.outOffset[j]] <<= 16;
10310 }
10311 in += info.inJump;
10312 out += info.outJump;
10313 }
10314 }
10315 else if (info.inFormat == RTAUDIO_SINT24) {
10316 Int24 *in = (Int24 *)inBuffer;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10320 out[info.outOffset[j]] <<= 8;
10321 }
10322 in += info.inJump;
10323 out += info.outJump;
10324 }
10325 }
10326 else if (info.inFormat == RTAUDIO_SINT32) {
10327 // Channel compensation and/or (de)interleaving only.
10328 Int32 *in = (Int32 *)inBuffer;
10329 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10330 for (j=0; j<info.channels; j++) {
10331 out[info.outOffset[j]] = in[info.inOffset[j]];
10332 }
10333 in += info.inJump;
10334 out += info.outJump;
10335 }
10336 }
10337 else if (info.inFormat == RTAUDIO_FLOAT32) {
10338 Float32 *in = (Float32 *)inBuffer;
10339 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10340 for (j=0; j<info.channels; j++) {
10341 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10342 }
10343 in += info.inJump;
10344 out += info.outJump;
10345 }
10346 }
10347 else if (info.inFormat == RTAUDIO_FLOAT64) {
10348 Float64 *in = (Float64 *)inBuffer;
10349 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10350 for (j=0; j<info.channels; j++) {
10351 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10352 }
10353 in += info.inJump;
10354 out += info.outJump;
10355 }
10356 }
10357 }
10358 else if (info.outFormat == RTAUDIO_SINT24) {
10359 Int24 *out = (Int24 *)outBuffer;
10360 if (info.inFormat == RTAUDIO_SINT8) {
10361 signed char *in = (signed char *)inBuffer;
10362 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10363 for (j=0; j<info.channels; j++) {
10364 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10365 //out[info.outOffset[j]] <<= 16;
10366 }
10367 in += info.inJump;
10368 out += info.outJump;
10369 }
10370 }
10371 else if (info.inFormat == RTAUDIO_SINT16) {
10372 Int16 *in = (Int16 *)inBuffer;
10373 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10374 for (j=0; j<info.channels; j++) {
10375 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10376 //out[info.outOffset[j]] <<= 8;
10377 }
10378 in += info.inJump;
10379 out += info.outJump;
10380 }
10381 }
10382 else if (info.inFormat == RTAUDIO_SINT24) {
10383 // Channel compensation and/or (de)interleaving only.
10384 Int24 *in = (Int24 *)inBuffer;
10385 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10386 for (j=0; j<info.channels; j++) {
10387 out[info.outOffset[j]] = in[info.inOffset[j]];
10388 }
10389 in += info.inJump;
10390 out += info.outJump;
10391 }
10392 }
10393 else if (info.inFormat == RTAUDIO_SINT32) {
10394 Int32 *in = (Int32 *)inBuffer;
10395 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10396 for (j=0; j<info.channels; j++) {
10397 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10398 //out[info.outOffset[j]] >>= 8;
10399 }
10400 in += info.inJump;
10401 out += info.outJump;
10402 }
10403 }
10404 else if (info.inFormat == RTAUDIO_FLOAT32) {
10405 Float32 *in = (Float32 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10409 }
10410 in += info.inJump;
10411 out += info.outJump;
10412 }
10413 }
10414 else if (info.inFormat == RTAUDIO_FLOAT64) {
10415 Float64 *in = (Float64 *)inBuffer;
10416 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10417 for (j=0; j<info.channels; j++) {
10418 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10419 }
10420 in += info.inJump;
10421 out += info.outJump;
10422 }
10423 }
10424 }
10425 else if (info.outFormat == RTAUDIO_SINT16) {
10426 Int16 *out = (Int16 *)outBuffer;
10427 if (info.inFormat == RTAUDIO_SINT8) {
10428 signed char *in = (signed char *)inBuffer;
10429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10430 for (j=0; j<info.channels; j++) {
10431 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10432 out[info.outOffset[j]] <<= 8;
10433 }
10434 in += info.inJump;
10435 out += info.outJump;
10436 }
10437 }
10438 else if (info.inFormat == RTAUDIO_SINT16) {
10439 // Channel compensation and/or (de)interleaving only.
10440 Int16 *in = (Int16 *)inBuffer;
10441 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10442 for (j=0; j<info.channels; j++) {
10443 out[info.outOffset[j]] = in[info.inOffset[j]];
10444 }
10445 in += info.inJump;
10446 out += info.outJump;
10447 }
10448 }
10449 else if (info.inFormat == RTAUDIO_SINT24) {
10450 Int24 *in = (Int24 *)inBuffer;
10451 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10452 for (j=0; j<info.channels; j++) {
10453 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10454 }
10455 in += info.inJump;
10456 out += info.outJump;
10457 }
10458 }
10459 else if (info.inFormat == RTAUDIO_SINT32) {
10460 Int32 *in = (Int32 *)inBuffer;
10461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10462 for (j=0; j<info.channels; j++) {
10463 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10464 }
10465 in += info.inJump;
10466 out += info.outJump;
10467 }
10468 }
10469 else if (info.inFormat == RTAUDIO_FLOAT32) {
10470 Float32 *in = (Float32 *)inBuffer;
10471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472 for (j=0; j<info.channels; j++) {
10473 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10474 }
10475 in += info.inJump;
10476 out += info.outJump;
10477 }
10478 }
10479 else if (info.inFormat == RTAUDIO_FLOAT64) {
10480 Float64 *in = (Float64 *)inBuffer;
10481 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10482 for (j=0; j<info.channels; j++) {
10483 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10484 }
10485 in += info.inJump;
10486 out += info.outJump;
10487 }
10488 }
10489 }
10490 else if (info.outFormat == RTAUDIO_SINT8) {
10491 signed char *out = (signed char *)outBuffer;
10492 if (info.inFormat == RTAUDIO_SINT8) {
10493 // Channel compensation and/or (de)interleaving only.
10494 signed char *in = (signed char *)inBuffer;
10495 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10496 for (j=0; j<info.channels; j++) {
10497 out[info.outOffset[j]] = in[info.inOffset[j]];
10498 }
10499 in += info.inJump;
10500 out += info.outJump;
10501 }
10502 }
10503 if (info.inFormat == RTAUDIO_SINT16) {
10504 Int16 *in = (Int16 *)inBuffer;
10505 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10506 for (j=0; j<info.channels; j++) {
10507 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10508 }
10509 in += info.inJump;
10510 out += info.outJump;
10511 }
10512 }
10513 else if (info.inFormat == RTAUDIO_SINT24) {
10514 Int24 *in = (Int24 *)inBuffer;
10515 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10516 for (j=0; j<info.channels; j++) {
10517 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10518 }
10519 in += info.inJump;
10520 out += info.outJump;
10521 }
10522 }
10523 else if (info.inFormat == RTAUDIO_SINT32) {
10524 Int32 *in = (Int32 *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10528 }
10529 in += info.inJump;
10530 out += info.outJump;
10531 }
10532 }
10533 else if (info.inFormat == RTAUDIO_FLOAT32) {
10534 Float32 *in = (Float32 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10538 }
10539 in += info.inJump;
10540 out += info.outJump;
10541 }
10542 }
10543 else if (info.inFormat == RTAUDIO_FLOAT64) {
10544 Float64 *in = (Float64 *)inBuffer;
10545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10546 for (j=0; j<info.channels; j++) {
10547 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10548 }
10549 in += info.inJump;
10550 out += info.outJump;
10551 }
10552 }
10553 }
10554 }
10555
10556 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10557 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10558 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10559
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10560 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10561 {
10562 char val;
10563 char *ptr;
10564
10565 ptr = buffer;
10566 if ( format == RTAUDIO_SINT16 ) {
10567 for ( unsigned int i=0; i<samples; i++ ) {
10568 // Swap 1st and 2nd bytes.
10569 val = *(ptr);
10570 *(ptr) = *(ptr+1);
10571 *(ptr+1) = val;
10572
10573 // Increment 2 bytes.
10574 ptr += 2;
10575 }
10576 }
10577 else if ( format == RTAUDIO_SINT32 ||
10578 format == RTAUDIO_FLOAT32 ) {
10579 for ( unsigned int i=0; i<samples; i++ ) {
10580 // Swap 1st and 4th bytes.
10581 val = *(ptr);
10582 *(ptr) = *(ptr+3);
10583 *(ptr+3) = val;
10584
10585 // Swap 2nd and 3rd bytes.
10586 ptr += 1;
10587 val = *(ptr);
10588 *(ptr) = *(ptr+1);
10589 *(ptr+1) = val;
10590
10591 // Increment 3 more bytes.
10592 ptr += 3;
10593 }
10594 }
10595 else if ( format == RTAUDIO_SINT24 ) {
10596 for ( unsigned int i=0; i<samples; i++ ) {
10597 // Swap 1st and 3rd bytes.
10598 val = *(ptr);
10599 *(ptr) = *(ptr+2);
10600 *(ptr+2) = val;
10601
10602 // Increment 2 more bytes.
10603 ptr += 2;
10604 }
10605 }
10606 else if ( format == RTAUDIO_FLOAT64 ) {
10607 for ( unsigned int i=0; i<samples; i++ ) {
10608 // Swap 1st and 8th bytes
10609 val = *(ptr);
10610 *(ptr) = *(ptr+7);
10611 *(ptr+7) = val;
10612
10613 // Swap 2nd and 7th bytes
10614 ptr += 1;
10615 val = *(ptr);
10616 *(ptr) = *(ptr+5);
10617 *(ptr+5) = val;
10618
10619 // Swap 3rd and 6th bytes
10620 ptr += 1;
10621 val = *(ptr);
10622 *(ptr) = *(ptr+3);
10623 *(ptr+3) = val;
10624
10625 // Swap 4th and 5th bytes
10626 ptr += 1;
10627 val = *(ptr);
10628 *(ptr) = *(ptr+1);
10629 *(ptr+1) = val;
10630
10631 // Increment 5 more bytes.
10632 ptr += 5;
10633 }
10634 }
10635 }
10636
10637 // Indentation settings for Vim and Emacs
10638 //
10639 // Local Variables:
10640 // c-basic-offset: 2
10641 // indent-tabs-mode: nil
10642 // End:
10643 //
10644 // vim: et sts=2 sw=2
10645
10646