1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
15
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
23
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
26
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
31
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 */
40 /************************************************************************/
41
42 // RtAudio: Version 5.1.0
43
44 #include "RtAudio.h"
45 #include <iostream>
46 #include <cstdlib>
47 #include <cstring>
48 #include <climits>
49 #include <cmath>
50 #include <algorithm>
51
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
64
65 #include "tchar.h"
66
convertCharPointerToStdString(const char * text)67 static std::string convertCharPointerToStdString(const char *text)
68 {
69 return std::string(text);
70 }
71
convertCharPointerToStdString(const wchar_t * text)72 static std::string convertCharPointerToStdString(const wchar_t *text)
73 {
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
77 return s;
78 }
79
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 // pthread API
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #else
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
89 #endif
90
91 // *************************************************** //
92 //
93 // RtAudio definitions.
94 //
95 // *************************************************** //
96
getVersion(void)97 std::string RtAudio :: getVersion( void )
98 {
99 return RTAUDIO_VERSION;
100 }
101
102 // Define API names and display names.
103 // Must be in same order as API enum.
104 extern "C" {
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
107 { "alsa" , "ALSA" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
110 { "jack" , "Jack" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
113 { "asio" , "ASIO" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
116 };
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119
120 // The order here will control the order of RtAudio's API search in
121 // the constructor.
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
124 RtAudio::UNIX_JACK,
125 #endif
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
128 #endif
129 #if defined(__LINUX_ALSA__)
130 RtAudio::LINUX_ALSA,
131 #endif
132 #if defined(__LINUX_OSS__)
133 RtAudio::LINUX_OSS,
134 #endif
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
137 #endif
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
140 #endif
141 #if defined(__WINDOWS_DS__)
142 RtAudio::WINDOWS_DS,
143 #endif
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
146 #endif
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
149 #endif
150 RtAudio::UNSPECIFIED,
151 };
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
154 }
155
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
StaticAssert()158 template<bool b> class StaticAssert { private: StaticAssert() {} };
StaticAssert()159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
StaticAssertions()160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
162 }};
163
getCompiledApi(std::vector<RtAudio::Api> & apis)164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 {
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
168 }
169
getApiName(RtAudio::Api api)170 std::string RtAudio :: getApiName( RtAudio::Api api )
171 {
172 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return "";
174 return rtaudio_api_names[api][0];
175 }
176
getApiDisplayName(RtAudio::Api api)177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 {
179 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return "Unknown";
181 return rtaudio_api_names[api][1];
182 }
183
getCompiledApiByName(const std::string & name)184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
185 {
186 unsigned int i=0;
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
191 }
192
openRtApi(RtAudio::Api api)193 void RtAudio :: openRtApi( RtAudio::Api api )
194 {
195 if ( rtapi_ )
196 delete rtapi_;
197 rtapi_ = 0;
198
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
202 #endif
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
206 #endif
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
210 #endif
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
214 #endif
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
218 #endif
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
222 #endif
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
226 #endif
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
230 #endif
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
234 #endif
235 }
236
RtAudio(RtAudio::Api api)237 RtAudio :: RtAudio( RtAudio::Api api )
238 {
239 rtapi_ = 0;
240
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
243 openRtApi( api );
244 if ( rtapi_ ) return;
245
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
249 }
250
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
258 }
259
260 if ( rtapi_ ) return;
261
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
268 }
269
~RtAudio()270 RtAudio :: ~RtAudio()
271 {
272 if ( rtapi_ )
273 delete rtapi_;
274 }
275
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
283 {
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
287 }
288
289 // *************************************************** //
290 //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
293 //
294 // *************************************************** //
295
RtApi()296 RtApi :: RtApi()
297 {
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
306 }
307
~RtApi()308 RtApi :: ~RtApi()
309 {
310 MUTEX_DESTROY( &stream_.mutex );
311 }
312
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
320 {
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
324 return;
325 }
326
327 // Clear stream information potentially left from a previously open stream.
328 clearStreamInfo();
329
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
333 return;
334 }
335
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
339 return;
340 }
341
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
345 return;
346 }
347
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
351 return;
352 }
353
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
356 if ( oParams ) {
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
361 return;
362 }
363 }
364
365 unsigned int iChannels = 0;
366 if ( iParams ) {
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
371 return;
372 }
373 }
374
375 bool result;
376
377 if ( oChannels > 0 ) {
378
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
383 return;
384 }
385 }
386
387 if ( iChannels > 0 ) {
388
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
394 return;
395 }
396 }
397
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
404 }
405
getDefaultInputDevice(void)406 unsigned int RtApi :: getDefaultInputDevice( void )
407 {
408 // Should be implemented in subclasses if possible.
409 return 0;
410 }
411
getDefaultOutputDevice(void)412 unsigned int RtApi :: getDefaultOutputDevice( void )
413 {
414 // Should be implemented in subclasses if possible.
415 return 0;
416 }
417
closeStream(void)418 void RtApi :: closeStream( void )
419 {
420 // MUST be implemented in subclasses!
421 return;
422 }
423
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
428 {
429 // MUST be implemented in subclasses!
430 return FAILURE;
431 }
432
tickStreamTime(void)433 void RtApi :: tickStreamTime( void )
434 {
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
438
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
443 #endif
444 }
445
getStreamLatency(void)446 long RtApi :: getStreamLatency( void )
447 {
448 verifyStream();
449
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
455
456 return totalLatency;
457 }
458
getStreamTime(void)459 double RtApi :: getStreamTime( void )
460 {
461 verifyStream();
462
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
466 struct timeval then;
467 struct timeval now;
468
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
471
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
477 #else
478 return stream_.streamTime;
479 #endif
480 }
481
setStreamTime(double time)482 void RtApi :: setStreamTime( double time )
483 {
484 verifyStream();
485
486 if ( time >= 0.0 )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
490 #endif
491 }
492
getStreamSampleRate(void)493 unsigned int RtApi :: getStreamSampleRate( void )
494 {
495 verifyStream();
496
497 return stream_.sampleRate;
498 }
499
500
501 // *************************************************** //
502 //
503 // OS/API-specific methods.
504 //
505 // *************************************************** //
506
507 #if defined(__MACOSX_CORE__)
508
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
514 //
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
523
524 // A structure to hold various information related to the CoreAudio API
525 // implementation.
526 struct CoreHandle {
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
530 #endif
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
533 bool xrun[2];
534 char *deviceBuffer;
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
538
CoreHandleCoreHandle539 CoreHandle()
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
541 };
542
RtApiCore()543 RtApiCore:: RtApiCore()
544 {
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
549 // correctly.
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
558 }
559 #endif
560 }
561
~RtApiCore()562 RtApiCore :: ~RtApiCore()
563 {
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
568 }
569
getDeviceCount(void)570 unsigned int RtApiCore :: getDeviceCount( void )
571 {
572 // Find out how many audio devices there are, if any.
573 UInt32 dataSize;
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
579 return 0;
580 }
581
582 return dataSize / sizeof( AudioDeviceID );
583 }
584
getDefaultInputDevice(void)585 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 {
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
589
590 AudioDeviceID id;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
597 return 0;
598 }
599
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
607 return 0;
608 }
609
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
612
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
615 return 0;
616 }
617
getDefaultOutputDevice(void)618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 {
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
622
623 AudioDeviceID id;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
630 return 0;
631 }
632
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
640 return 0;
641 }
642
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
645
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
648 return 0;
649 }
650
getDeviceInfo(unsigned int device)651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 {
653 RtAudio::DeviceInfo info;
654 info.probed = false;
655
656 // Get device ID
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
661 return info;
662 }
663
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
667 return info;
668 }
669
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
680 return info;
681 }
682
683 AudioDeviceID id = deviceList[ device ];
684
685 // Get the device name.
686 info.name.erase();
687 CFStringRef cfname;
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
695 return info;
696 }
697
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 #else
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 #endif
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
708 CFRelease( cfname );
709 free(mname);
710
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
717 return info;
718 }
719
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 #else
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 #endif
728 info.name.append( (const char *)name, strlen(name) );
729 CFRelease( cfname );
730 free(name);
731
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
737 dataSize = 0;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
743 return info;
744 }
745
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
751 return info;
752 }
753
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
756 free( bufferList );
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
760 return info;
761 }
762
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
767 free( bufferList );
768
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
776 return info;
777 }
778
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
784 return info;
785 }
786
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
789 free( bufferList );
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
793 return info;
794 }
795
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
800 free( bufferList );
801
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
809
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
818 return info;
819 }
820
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
828 return info;
829 }
830
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
845
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
848
849 } else {
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
853 }
854 }
855
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
860
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
863 }
864 }
865 }
866
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
875 return info;
876 }
877
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
882
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
887
888 info.probed = true;
889 return info;
890 }
891
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
898 void* infoPointer )
899 {
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
901
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
905 else
906 return kAudioHardwareNoError;
907 }
908
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 UInt32 nAddresses,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
913 {
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
919 else
920 handle->xrun[0] = true;
921 }
922 }
923
924 return kAudioHardwareNoError;
925 }
926
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
930 void* ratePointer )
931 {
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
939 }
940
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
945 {
946 // Get device ID
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
951 return FAILURE;
952 }
953
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
957 return FAILURE;
958 }
959
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
969 return FAILURE;
970 }
971
972 AudioDeviceID id = deviceList[ device ];
973
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
977 isInput = true;
978 property.mScope = kAudioDevicePropertyScopeInput;
979 }
980 else
981 property.mScope = kAudioDevicePropertyScopeOutput;
982
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
985 dataSize = 0;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
991 return FAILURE;
992 }
993
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
998 return FAILURE;
999 }
1000
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1003 free( bufferList );
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1006 return FAILURE;
1007 }
1008
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1021
1022 // First check that the device supports the requested number of
1023 // channels.
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 free( bufferList );
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1032 return FAILURE;
1033 }
1034
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1042 foundStream = true;
1043 break;
1044 }
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1047 }
1048
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1052 monoMode = true;
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1058 }
1059
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1069 streamCount++;
1070 }
1071 }
1072
1073 free( bufferList );
1074
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1084 return FAILURE;
1085 }
1086
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1101 return FAILURE;
1102 }
1103
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1110 return FAILURE;
1111 }
1112
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1115
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 pid_t hog_pid;
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1125 return FAILURE;
1126 }
1127
1128 if ( hog_pid != getpid() ) {
1129 hog_pid = getpid();
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1134 return FAILURE;
1135 }
1136 }
1137 }
1138
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1147 return FAILURE;
1148 }
1149
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1160 return FAILURE;
1161 }
1162
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1169 return FAILURE;
1170 }
1171
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1177 usleep( 5000 );
1178 }
1179
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1186 return FAILURE;
1187 }
1188 }
1189
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1199 return FAILURE;
1200 }
1201
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1209 }
1210
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1214 }
1215
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1221 return FAILURE;
1222 }
1223 }
1224
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1231 return FAILURE;
1232 }
1233
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1244 UInt32 formatFlags;
1245
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 else
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1279 break;
1280 }
1281 }
1282
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1286 return FAILURE;
1287 }
1288 } // done setting virtual/physical formats.
1289
1290 // Get the stream / device latency.
1291 UInt32 latency;
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 else {
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1301 }
1302 }
1303
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1308
1309 // From the CoreAudio documentation, PCM data must be supplied as
1310 // 32-bit floats.
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1335 }
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1338
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1342 try {
1343 handle = new CoreHandle;
1344 }
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1347 goto error;
1348 }
1349
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1352 goto error;
1353 }
1354 stream_.apiHandle = (void *) handle;
1355 }
1356 else
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1361
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1370 goto error;
1371 }
1372
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1375 // streams.
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1384 }
1385 }
1386
1387 if ( makeBuffer ) {
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1393 goto error;
1394 }
1395 }
1396 }
1397
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1402
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1407 }
1408
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1412 else {
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 #else
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 #endif
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1422 goto error;
1423 }
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1426 else
1427 stream_.mode = mode;
1428 }
1429
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434
1435 return SUCCESS;
1436
1437 error:
1438 if ( handle ) {
1439 pthread_cond_destroy( &handle->condition );
1440 delete handle;
1441 stream_.apiHandle = 0;
1442 }
1443
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1448 }
1449 }
1450
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1454 }
1455
1456 stream_.state = STREAM_CLOSED;
1457 return FAILURE;
1458 }
1459
closeStream(void)1460 void RtApiCore :: closeStream( void )
1461 {
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1465 return;
1466 }
1467
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 if (handle) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1474
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1480 }
1481 }
1482 if ( stream_.state == STREAM_RUNNING )
1483 AudioDeviceStop( handle->id[0], callbackHandler );
1484 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else
1487 // deprecated in favor of AudioDeviceDestroyIOProcID()
1488 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1489 #endif
1490 }
1491
1492 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 if (handle) {
1494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1495 kAudioObjectPropertyScopeGlobal,
1496 kAudioObjectPropertyElementMaster };
1497
1498 property.mSelector = kAudioDeviceProcessorOverload;
1499 property.mScope = kAudioObjectPropertyScopeGlobal;
1500 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1501 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1502 error( RtAudioError::WARNING );
1503 }
1504 }
1505 if ( stream_.state == STREAM_RUNNING )
1506 AudioDeviceStop( handle->id[1], callbackHandler );
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 #else
1510 // deprecated in favor of AudioDeviceDestroyIOProcID()
1511 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1512 #endif
1513 }
1514
1515 for ( int i=0; i<2; i++ ) {
1516 if ( stream_.userBuffer[i] ) {
1517 free( stream_.userBuffer[i] );
1518 stream_.userBuffer[i] = 0;
1519 }
1520 }
1521
1522 if ( stream_.deviceBuffer ) {
1523 free( stream_.deviceBuffer );
1524 stream_.deviceBuffer = 0;
1525 }
1526
1527 // Destroy pthread condition variable.
1528 pthread_cond_destroy( &handle->condition );
1529 delete handle;
1530 stream_.apiHandle = 0;
1531
1532 stream_.mode = UNINITIALIZED;
1533 stream_.state = STREAM_CLOSED;
1534 }
1535
startStream(void)1536 void RtApiCore :: startStream( void )
1537 {
1538 verifyStream();
1539 if ( stream_.state == STREAM_RUNNING ) {
1540 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1541 error( RtAudioError::WARNING );
1542 return;
1543 }
1544
1545 #if defined( HAVE_GETTIMEOFDAY )
1546 gettimeofday( &stream_.lastTickTimestamp, NULL );
1547 #endif
1548
1549 OSStatus result = noErr;
1550 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552
1553 result = AudioDeviceStart( handle->id[0], callbackHandler );
1554 if ( result != noErr ) {
1555 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1556 errorText_ = errorStream_.str();
1557 goto unlock;
1558 }
1559 }
1560
1561 if ( stream_.mode == INPUT ||
1562 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563
1564 result = AudioDeviceStart( handle->id[1], callbackHandler );
1565 if ( result != noErr ) {
1566 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1567 errorText_ = errorStream_.str();
1568 goto unlock;
1569 }
1570 }
1571
1572 handle->drainCounter = 0;
1573 handle->internalDrain = false;
1574 stream_.state = STREAM_RUNNING;
1575
1576 unlock:
1577 if ( result == noErr ) return;
1578 error( RtAudioError::SYSTEM_ERROR );
1579 }
1580
stopStream(void)1581 void RtApiCore :: stopStream( void )
1582 {
1583 verifyStream();
1584 if ( stream_.state == STREAM_STOPPED ) {
1585 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1586 error( RtAudioError::WARNING );
1587 return;
1588 }
1589
1590 OSStatus result = noErr;
1591 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1592 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593
1594 if ( handle->drainCounter == 0 ) {
1595 handle->drainCounter = 2;
1596 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1597 }
1598
1599 result = AudioDeviceStop( handle->id[0], callbackHandler );
1600 if ( result != noErr ) {
1601 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1602 errorText_ = errorStream_.str();
1603 goto unlock;
1604 }
1605 }
1606
1607 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608
1609 result = AudioDeviceStop( handle->id[1], callbackHandler );
1610 if ( result != noErr ) {
1611 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1612 errorText_ = errorStream_.str();
1613 goto unlock;
1614 }
1615 }
1616
1617 stream_.state = STREAM_STOPPED;
1618
1619 unlock:
1620 if ( result == noErr ) return;
1621 error( RtAudioError::SYSTEM_ERROR );
1622 }
1623
abortStream(void)1624 void RtApiCore :: abortStream( void )
1625 {
1626 verifyStream();
1627 if ( stream_.state == STREAM_STOPPED ) {
1628 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1629 error( RtAudioError::WARNING );
1630 return;
1631 }
1632
1633 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1634 handle->drainCounter = 2;
1635
1636 stopStream();
1637 }
1638
1639 // This function will be called by a spawned thread when the user
1640 // callback function signals that the stream should be stopped or
1641 // aborted. It is better to handle it this way because the
1642 // callbackEvent() function probably should return before the AudioDeviceStop()
1643 // function is called.
coreStopStream(void * ptr)1644 static void *coreStopStream( void *ptr )
1645 {
1646 CallbackInfo *info = (CallbackInfo *) ptr;
1647 RtApiCore *object = (RtApiCore *) info->object;
1648
1649 object->stopStream();
1650 pthread_exit( NULL );
1651 }
1652
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1653 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1654 const AudioBufferList *inBufferList,
1655 const AudioBufferList *outBufferList )
1656 {
1657 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1658 if ( stream_.state == STREAM_CLOSED ) {
1659 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1660 error( RtAudioError::WARNING );
1661 return FAILURE;
1662 }
1663
1664 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1665 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666
1667 // Check if we were draining the stream and signal is finished.
1668 if ( handle->drainCounter > 3 ) {
1669 ThreadHandle threadId;
1670
1671 stream_.state = STREAM_STOPPING;
1672 if ( handle->internalDrain == true )
1673 pthread_create( &threadId, NULL, coreStopStream, info );
1674 else // external call to stopStream()
1675 pthread_cond_signal( &handle->condition );
1676 return SUCCESS;
1677 }
1678
1679 AudioDeviceID outputDevice = handle->id[0];
1680
1681 // Invoke user callback to get fresh output data UNLESS we are
1682 // draining stream or duplex mode AND the input/output devices are
1683 // different AND this function is called for the input device.
1684 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1685 RtAudioCallback callback = (RtAudioCallback) info->callback;
1686 double streamTime = getStreamTime();
1687 RtAudioStreamStatus status = 0;
1688 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1689 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1690 handle->xrun[0] = false;
1691 }
1692 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1693 status |= RTAUDIO_INPUT_OVERFLOW;
1694 handle->xrun[1] = false;
1695 }
1696
1697 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1698 stream_.bufferSize, streamTime, status, info->userData );
1699 if ( cbReturnValue == 2 ) {
1700 stream_.state = STREAM_STOPPING;
1701 handle->drainCounter = 2;
1702 abortStream();
1703 return SUCCESS;
1704 }
1705 else if ( cbReturnValue == 1 ) {
1706 handle->drainCounter = 1;
1707 handle->internalDrain = true;
1708 }
1709 }
1710
1711 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712
1713 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714
1715 if ( handle->nStreams[0] == 1 ) {
1716 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 0,
1718 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 }
1720 else { // fill multiple streams with zeros
1721 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1722 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 0,
1724 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1725 }
1726 }
1727 }
1728 else if ( handle->nStreams[0] == 1 ) {
1729 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1730 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1731 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 }
1733 else { // copy from user buffer
1734 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1735 stream_.userBuffer[0],
1736 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1737 }
1738 }
1739 else { // fill multiple streams
1740 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1741 if ( stream_.doConvertBuffer[0] ) {
1742 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1743 inBuffer = (Float32 *) stream_.deviceBuffer;
1744 }
1745
1746 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1747 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1748 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1749 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1750 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1751 }
1752 }
1753 else { // fill multiple multi-channel streams with interleaved data
1754 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1755 Float32 *out, *in;
1756
1757 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1758 UInt32 inChannels = stream_.nUserChannels[0];
1759 if ( stream_.doConvertBuffer[0] ) {
1760 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1761 inChannels = stream_.nDeviceChannels[0];
1762 }
1763
1764 if ( inInterleaved ) inOffset = 1;
1765 else inOffset = stream_.bufferSize;
1766
1767 channelsLeft = inChannels;
1768 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 in = inBuffer;
1770 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1771 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1772
1773 outJump = 0;
1774 // Account for possible channel offset in first stream
1775 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1776 streamChannels -= stream_.channelOffset[0];
1777 outJump = stream_.channelOffset[0];
1778 out += outJump;
1779 }
1780
1781 // Account for possible unfilled channels at end of the last stream
1782 if ( streamChannels > channelsLeft ) {
1783 outJump = streamChannels - channelsLeft;
1784 streamChannels = channelsLeft;
1785 }
1786
1787 // Determine input buffer offsets and skips
1788 if ( inInterleaved ) {
1789 inJump = inChannels;
1790 in += inChannels - channelsLeft;
1791 }
1792 else {
1793 inJump = 1;
1794 in += (inChannels - channelsLeft) * inOffset;
1795 }
1796
1797 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1798 for ( unsigned int j=0; j<streamChannels; j++ ) {
1799 *out++ = in[j*inOffset];
1800 }
1801 out += outJump;
1802 in += inJump;
1803 }
1804 channelsLeft -= streamChannels;
1805 }
1806 }
1807 }
1808 }
1809
1810 // Don't bother draining input
1811 if ( handle->drainCounter ) {
1812 handle->drainCounter++;
1813 goto unlock;
1814 }
1815
1816 AudioDeviceID inputDevice;
1817 inputDevice = handle->id[1];
1818 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819
1820 if ( handle->nStreams[1] == 1 ) {
1821 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1822 convertBuffer( stream_.userBuffer[1],
1823 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1824 stream_.convertInfo[1] );
1825 }
1826 else { // copy to user buffer
1827 memcpy( stream_.userBuffer[1],
1828 inBufferList->mBuffers[handle->iStream[1]].mData,
1829 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1830 }
1831 }
1832 else { // read from multiple streams
1833 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1834 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835
1836 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1837 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1838 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1839 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1840 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1841 }
1842 }
1843 else { // read from multiple multi-channel streams
1844 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1845 Float32 *out, *in;
1846
1847 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1848 UInt32 outChannels = stream_.nUserChannels[1];
1849 if ( stream_.doConvertBuffer[1] ) {
1850 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1851 outChannels = stream_.nDeviceChannels[1];
1852 }
1853
1854 if ( outInterleaved ) outOffset = 1;
1855 else outOffset = stream_.bufferSize;
1856
1857 channelsLeft = outChannels;
1858 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 out = outBuffer;
1860 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1861 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1862
1863 inJump = 0;
1864 // Account for possible channel offset in first stream
1865 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1866 streamChannels -= stream_.channelOffset[1];
1867 inJump = stream_.channelOffset[1];
1868 in += inJump;
1869 }
1870
1871 // Account for possible unread channels at end of the last stream
1872 if ( streamChannels > channelsLeft ) {
1873 inJump = streamChannels - channelsLeft;
1874 streamChannels = channelsLeft;
1875 }
1876
1877 // Determine output buffer offsets and skips
1878 if ( outInterleaved ) {
1879 outJump = outChannels;
1880 out += outChannels - channelsLeft;
1881 }
1882 else {
1883 outJump = 1;
1884 out += (outChannels - channelsLeft) * outOffset;
1885 }
1886
1887 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1888 for ( unsigned int j=0; j<streamChannels; j++ ) {
1889 out[j*outOffset] = *in++;
1890 }
1891 out += outJump;
1892 in += inJump;
1893 }
1894 channelsLeft -= streamChannels;
1895 }
1896 }
1897
1898 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1899 convertBuffer( stream_.userBuffer[1],
1900 stream_.deviceBuffer,
1901 stream_.convertInfo[1] );
1902 }
1903 }
1904 }
1905
1906 unlock:
1907 //MUTEX_UNLOCK( &stream_.mutex );
1908
1909 // Make sure to only tick duplex stream time once if using two devices
1910 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1911 RtApi::tickStreamTime();
1912
1913 return SUCCESS;
1914 }
1915
getErrorCode(OSStatus code)1916 const char* RtApiCore :: getErrorCode( OSStatus code )
1917 {
1918 switch( code ) {
1919
1920 case kAudioHardwareNotRunningError:
1921 return "kAudioHardwareNotRunningError";
1922
1923 case kAudioHardwareUnspecifiedError:
1924 return "kAudioHardwareUnspecifiedError";
1925
1926 case kAudioHardwareUnknownPropertyError:
1927 return "kAudioHardwareUnknownPropertyError";
1928
1929 case kAudioHardwareBadPropertySizeError:
1930 return "kAudioHardwareBadPropertySizeError";
1931
1932 case kAudioHardwareIllegalOperationError:
1933 return "kAudioHardwareIllegalOperationError";
1934
1935 case kAudioHardwareBadObjectError:
1936 return "kAudioHardwareBadObjectError";
1937
1938 case kAudioHardwareBadDeviceError:
1939 return "kAudioHardwareBadDeviceError";
1940
1941 case kAudioHardwareBadStreamError:
1942 return "kAudioHardwareBadStreamError";
1943
1944 case kAudioHardwareUnsupportedOperationError:
1945 return "kAudioHardwareUnsupportedOperationError";
1946
1947 case kAudioDeviceUnsupportedFormatError:
1948 return "kAudioDeviceUnsupportedFormatError";
1949
1950 case kAudioDevicePermissionsError:
1951 return "kAudioDevicePermissionsError";
1952
1953 default:
1954 return "CoreAudio unknown error";
1955 }
1956 }
1957
1958 //******************** End of __MACOSX_CORE__ *********************//
1959 #endif
1960
1961 #if defined(__UNIX_JACK__)
1962
1963 // JACK is a low-latency audio server, originally written for the
1964 // GNU/Linux operating system and now also ported to OS-X. It can
1965 // connect a number of different applications to an audio device, as
1966 // well as allowing them to share audio between themselves.
1967 //
1968 // When using JACK with RtAudio, "devices" refer to JACK clients that
1969 // have ports connected to the server. The JACK server is typically
1970 // started in a terminal as follows:
1971 //
1972 // .jackd -d alsa -d hw:0
1973 //
1974 // or through an interface program such as qjackctl. Many of the
1975 // parameters normally set for a stream are fixed by the JACK server
1976 // and can be specified when the JACK server is started. In
1977 // particular,
1978 //
1979 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1980 //
1981 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1982 // frames, and number of buffers = 4. Once the server is running, it
1983 // is not possible to override these values. If the values are not
1984 // specified in the command-line, the JACK server uses default values.
1985 //
1986 // The JACK server does not have to be running when an instance of
1987 // RtApiJack is created, though the function getDeviceCount() will
1988 // report 0 devices found until JACK has been started. When no
1989 // devices are available (i.e., the JACK server is not running), a
1990 // stream cannot be opened.
1991
1992 #include <jack/jack.h>
1993 #include <unistd.h>
1994 #include <cstdio>
1995
1996 // A structure to hold various information related to the Jack API
1997 // implementation.
1998 struct JackHandle {
1999 jack_client_t *client;
2000 jack_port_t **ports[2];
2001 std::string deviceName[2];
2002 bool xrun[2];
2003 pthread_cond_t condition;
2004 int drainCounter; // Tracks callback counts when draining
2005 bool internalDrain; // Indicates if stop is initiated from callback or not.
2006
JackHandleJackHandle2007 JackHandle()
2008 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2009 };
2010
2011 #if !defined(__RTAUDIO_DEBUG__)
jackSilentError(const char *)2012 static void jackSilentError( const char * ) {};
2013 #endif
2014
RtApiJack()2015 RtApiJack :: RtApiJack()
2016 :shouldAutoconnect_(true) {
2017 // Nothing to do here.
2018 #if !defined(__RTAUDIO_DEBUG__)
2019 // Turn off Jack's internal error reporting.
2020 jack_set_error_function( &jackSilentError );
2021 #endif
2022 }
2023
~RtApiJack()2024 RtApiJack :: ~RtApiJack()
2025 {
2026 if ( stream_.state != STREAM_CLOSED ) closeStream();
2027 }
2028
getDeviceCount(void)2029 unsigned int RtApiJack :: getDeviceCount( void )
2030 {
2031 // See if we can become a jack client.
2032 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2033 jack_status_t *status = NULL;
2034 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2035 if ( client == 0 ) return 0;
2036
2037 const char **ports;
2038 std::string port, previousPort;
2039 unsigned int nChannels = 0, nDevices = 0;
2040 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2041 if ( ports ) {
2042 // Parse the port names up to the first colon (:).
2043 size_t iColon = 0;
2044 do {
2045 port = (char *) ports[ nChannels ];
2046 iColon = port.find(":");
2047 if ( iColon != std::string::npos ) {
2048 port = port.substr( 0, iColon + 1 );
2049 if ( port != previousPort ) {
2050 nDevices++;
2051 previousPort = port;
2052 }
2053 }
2054 } while ( ports[++nChannels] );
2055 free( ports );
2056 }
2057
2058 jack_client_close( client );
2059 return nDevices;
2060 }
2061
getDeviceInfo(unsigned int device)2062 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2063 {
2064 RtAudio::DeviceInfo info;
2065 info.probed = false;
2066
2067 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2068 jack_status_t *status = NULL;
2069 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2070 if ( client == 0 ) {
2071 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2072 error( RtAudioError::WARNING );
2073 return info;
2074 }
2075
2076 const char **ports;
2077 std::string port, previousPort;
2078 unsigned int nPorts = 0, nDevices = 0;
2079 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2080 if ( ports ) {
2081 // Parse the port names up to the first colon (:).
2082 size_t iColon = 0;
2083 do {
2084 port = (char *) ports[ nPorts ];
2085 iColon = port.find(":");
2086 if ( iColon != std::string::npos ) {
2087 port = port.substr( 0, iColon );
2088 if ( port != previousPort ) {
2089 if ( nDevices == device ) info.name = port;
2090 nDevices++;
2091 previousPort = port;
2092 }
2093 }
2094 } while ( ports[++nPorts] );
2095 free( ports );
2096 }
2097
2098 if ( device >= nDevices ) {
2099 jack_client_close( client );
2100 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2101 error( RtAudioError::INVALID_USE );
2102 return info;
2103 }
2104
2105 // Get the current jack server sample rate.
2106 info.sampleRates.clear();
2107
2108 info.preferredSampleRate = jack_get_sample_rate( client );
2109 info.sampleRates.push_back( info.preferredSampleRate );
2110
2111 // Count the available ports containing the client name as device
2112 // channels. Jack "input ports" equal RtAudio output channels.
2113 unsigned int nChannels = 0;
2114 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2115 if ( ports ) {
2116 while ( ports[ nChannels ] ) nChannels++;
2117 free( ports );
2118 info.outputChannels = nChannels;
2119 }
2120
2121 // Jack "output ports" equal RtAudio input channels.
2122 nChannels = 0;
2123 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2124 if ( ports ) {
2125 while ( ports[ nChannels ] ) nChannels++;
2126 free( ports );
2127 info.inputChannels = nChannels;
2128 }
2129
2130 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2131 jack_client_close(client);
2132 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2133 error( RtAudioError::WARNING );
2134 return info;
2135 }
2136
2137 // If device opens for both playback and capture, we determine the channels.
2138 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2139 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2140
2141 // Jack always uses 32-bit floats.
2142 info.nativeFormats = RTAUDIO_FLOAT32;
2143
2144 // Jack doesn't provide default devices so we'll use the first available one.
2145 if ( device == 0 && info.outputChannels > 0 )
2146 info.isDefaultOutput = true;
2147 if ( device == 0 && info.inputChannels > 0 )
2148 info.isDefaultInput = true;
2149
2150 jack_client_close(client);
2151 info.probed = true;
2152 return info;
2153 }
2154
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2155 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2156 {
2157 CallbackInfo *info = (CallbackInfo *) infoPointer;
2158
2159 RtApiJack *object = (RtApiJack *) info->object;
2160 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161
2162 return 0;
2163 }
2164
2165 // This function will be called by a spawned thread when the Jack
2166 // server signals that it is shutting down. It is necessary to handle
2167 // it this way because the jackShutdown() function must return before
2168 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2169 static void *jackCloseStream( void *ptr )
2170 {
2171 CallbackInfo *info = (CallbackInfo *) ptr;
2172 RtApiJack *object = (RtApiJack *) info->object;
2173
2174 object->closeStream();
2175
2176 pthread_exit( NULL );
2177 }
jackShutdown(void * infoPointer)2178 static void jackShutdown( void *infoPointer )
2179 {
2180 CallbackInfo *info = (CallbackInfo *) infoPointer;
2181 RtApiJack *object = (RtApiJack *) info->object;
2182
2183 // Check current stream state. If stopped, then we'll assume this
2184 // was called as a result of a call to RtApiJack::stopStream (the
2185 // deactivation of a client handle causes this function to be called).
2186 // If not, we'll assume the Jack server is shutting down or some
2187 // other problem occurred and we should close the stream.
2188 if ( object->isStreamRunning() == false ) return;
2189
2190 ThreadHandle threadId;
2191 pthread_create( &threadId, NULL, jackCloseStream, info );
2192 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2193 }
2194
jackXrun(void * infoPointer)2195 static int jackXrun( void *infoPointer )
2196 {
2197 JackHandle *handle = *((JackHandle **) infoPointer);
2198
2199 if ( handle->ports[0] ) handle->xrun[0] = true;
2200 if ( handle->ports[1] ) handle->xrun[1] = true;
2201
2202 return 0;
2203 }
2204
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2205 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2206 unsigned int firstChannel, unsigned int sampleRate,
2207 RtAudioFormat format, unsigned int *bufferSize,
2208 RtAudio::StreamOptions *options )
2209 {
2210 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2211
2212 // Look for jack server and try to become a client (only do once per stream).
2213 jack_client_t *client = 0;
2214 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2215 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2216 jack_status_t *status = NULL;
2217 if ( options && !options->streamName.empty() )
2218 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2219 else
2220 client = jack_client_open( "RtApiJack", jackoptions, status );
2221 if ( client == 0 ) {
2222 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2223 error( RtAudioError::WARNING );
2224 return FAILURE;
2225 }
2226 }
2227 else {
2228 // The handle must have been created on an earlier pass.
2229 client = handle->client;
2230 }
2231
2232 const char **ports;
2233 std::string port, previousPort, deviceName;
2234 unsigned int nPorts = 0, nDevices = 0;
2235 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2236 if ( ports ) {
2237 // Parse the port names up to the first colon (:).
2238 size_t iColon = 0;
2239 do {
2240 port = (char *) ports[ nPorts ];
2241 iColon = port.find(":");
2242 if ( iColon != std::string::npos ) {
2243 port = port.substr( 0, iColon );
2244 if ( port != previousPort ) {
2245 if ( nDevices == device ) deviceName = port;
2246 nDevices++;
2247 previousPort = port;
2248 }
2249 }
2250 } while ( ports[++nPorts] );
2251 free( ports );
2252 }
2253
2254 if ( device >= nDevices ) {
2255 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2256 return FAILURE;
2257 }
2258
2259 unsigned long flag = JackPortIsInput;
2260 if ( mode == INPUT ) flag = JackPortIsOutput;
2261
2262 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2263 // Count the available ports containing the client name as device
2264 // channels. Jack "input ports" equal RtAudio output channels.
2265 unsigned int nChannels = 0;
2266 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2267 if ( ports ) {
2268 while ( ports[ nChannels ] ) nChannels++;
2269 free( ports );
2270 }
2271 // Compare the jack ports for specified client to the requested number of channels.
2272 if ( nChannels < (channels + firstChannel) ) {
2273 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2274 errorText_ = errorStream_.str();
2275 return FAILURE;
2276 }
2277 }
2278
2279 // Check the jack server sample rate.
2280 unsigned int jackRate = jack_get_sample_rate( client );
2281 if ( sampleRate != jackRate ) {
2282 jack_client_close( client );
2283 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2284 errorText_ = errorStream_.str();
2285 return FAILURE;
2286 }
2287 stream_.sampleRate = jackRate;
2288
2289 // Get the latency of the JACK port.
2290 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2291 if ( ports[ firstChannel ] ) {
2292 // Added by Ge Wang
2293 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2294 // the range (usually the min and max are equal)
2295 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2296 // get the latency range
2297 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2298 // be optimistic, use the min!
2299 stream_.latency[mode] = latrange.min;
2300 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2301 }
2302 free( ports );
2303
2304 // The jack server always uses 32-bit floating-point data.
2305 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2306 stream_.userFormat = format;
2307
2308 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2309 else stream_.userInterleaved = true;
2310
2311 // Jack always uses non-interleaved buffers.
2312 stream_.deviceInterleaved[mode] = false;
2313
2314 // Jack always provides host byte-ordered data.
2315 stream_.doByteSwap[mode] = false;
2316
2317 // Get the buffer size. The buffer size and number of buffers
2318 // (periods) is set when the jack server is started.
2319 stream_.bufferSize = (int) jack_get_buffer_size( client );
2320 *bufferSize = stream_.bufferSize;
2321
2322 stream_.nDeviceChannels[mode] = channels;
2323 stream_.nUserChannels[mode] = channels;
2324
2325 // Set flags for buffer conversion.
2326 stream_.doConvertBuffer[mode] = false;
2327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2328 stream_.doConvertBuffer[mode] = true;
2329 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2330 stream_.nUserChannels[mode] > 1 )
2331 stream_.doConvertBuffer[mode] = true;
2332
2333 // Allocate our JackHandle structure for the stream.
2334 if ( handle == 0 ) {
2335 try {
2336 handle = new JackHandle;
2337 }
2338 catch ( std::bad_alloc& ) {
2339 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2340 goto error;
2341 }
2342
2343 if ( pthread_cond_init(&handle->condition, NULL) ) {
2344 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2345 goto error;
2346 }
2347 stream_.apiHandle = (void *) handle;
2348 handle->client = client;
2349 }
2350 handle->deviceName[mode] = deviceName;
2351
2352 // Allocate necessary internal buffers.
2353 unsigned long bufferBytes;
2354 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2355 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2356 if ( stream_.userBuffer[mode] == NULL ) {
2357 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2358 goto error;
2359 }
2360
2361 if ( stream_.doConvertBuffer[mode] ) {
2362
2363 bool makeBuffer = true;
2364 if ( mode == OUTPUT )
2365 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2366 else { // mode == INPUT
2367 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2368 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2369 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2370 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 }
2372 }
2373
2374 if ( makeBuffer ) {
2375 bufferBytes *= *bufferSize;
2376 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2377 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2378 if ( stream_.deviceBuffer == NULL ) {
2379 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2380 goto error;
2381 }
2382 }
2383 }
2384
2385 // Allocate memory for the Jack ports (channels) identifiers.
2386 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2387 if ( handle->ports[mode] == NULL ) {
2388 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2389 goto error;
2390 }
2391
2392 stream_.device[mode] = device;
2393 stream_.channelOffset[mode] = firstChannel;
2394 stream_.state = STREAM_STOPPED;
2395 stream_.callbackInfo.object = (void *) this;
2396
2397 if ( stream_.mode == OUTPUT && mode == INPUT )
2398 // We had already set up the stream for output.
2399 stream_.mode = DUPLEX;
2400 else {
2401 stream_.mode = mode;
2402 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2403 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2404 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2405 }
2406
2407 // Register our ports.
2408 char label[64];
2409 if ( mode == OUTPUT ) {
2410 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2411 snprintf( label, 64, "outport %d", i );
2412 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2413 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2414 }
2415 }
2416 else {
2417 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2418 snprintf( label, 64, "inport %d", i );
2419 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2420 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2421 }
2422 }
2423
2424 // Setup the buffer conversion information structure. We don't use
2425 // buffers to do channel offsets, so we override that parameter
2426 // here.
2427 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2428
2429 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2430
2431 return SUCCESS;
2432
2433 error:
2434 if ( handle ) {
2435 pthread_cond_destroy( &handle->condition );
2436 jack_client_close( handle->client );
2437
2438 if ( handle->ports[0] ) free( handle->ports[0] );
2439 if ( handle->ports[1] ) free( handle->ports[1] );
2440
2441 delete handle;
2442 stream_.apiHandle = 0;
2443 }
2444
2445 for ( int i=0; i<2; i++ ) {
2446 if ( stream_.userBuffer[i] ) {
2447 free( stream_.userBuffer[i] );
2448 stream_.userBuffer[i] = 0;
2449 }
2450 }
2451
2452 if ( stream_.deviceBuffer ) {
2453 free( stream_.deviceBuffer );
2454 stream_.deviceBuffer = 0;
2455 }
2456
2457 return FAILURE;
2458 }
2459
closeStream(void)2460 void RtApiJack :: closeStream( void )
2461 {
2462 if ( stream_.state == STREAM_CLOSED ) {
2463 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2464 error( RtAudioError::WARNING );
2465 return;
2466 }
2467
2468 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2469 if ( handle ) {
2470
2471 if ( stream_.state == STREAM_RUNNING )
2472 jack_deactivate( handle->client );
2473
2474 jack_client_close( handle->client );
2475 }
2476
2477 if ( handle ) {
2478 if ( handle->ports[0] ) free( handle->ports[0] );
2479 if ( handle->ports[1] ) free( handle->ports[1] );
2480 pthread_cond_destroy( &handle->condition );
2481 delete handle;
2482 stream_.apiHandle = 0;
2483 }
2484
2485 for ( int i=0; i<2; i++ ) {
2486 if ( stream_.userBuffer[i] ) {
2487 free( stream_.userBuffer[i] );
2488 stream_.userBuffer[i] = 0;
2489 }
2490 }
2491
2492 if ( stream_.deviceBuffer ) {
2493 free( stream_.deviceBuffer );
2494 stream_.deviceBuffer = 0;
2495 }
2496
2497 stream_.mode = UNINITIALIZED;
2498 stream_.state = STREAM_CLOSED;
2499 }
2500
startStream(void)2501 void RtApiJack :: startStream( void )
2502 {
2503 verifyStream();
2504 if ( stream_.state == STREAM_RUNNING ) {
2505 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2506 error( RtAudioError::WARNING );
2507 return;
2508 }
2509
2510 #if defined( HAVE_GETTIMEOFDAY )
2511 gettimeofday( &stream_.lastTickTimestamp, NULL );
2512 #endif
2513
2514 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2515 int result = jack_activate( handle->client );
2516 if ( result ) {
2517 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2518 goto unlock;
2519 }
2520
2521 const char **ports;
2522
2523 // Get the list of available ports.
2524 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2525 result = 1;
2526 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2527 if ( ports == NULL) {
2528 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2529 goto unlock;
2530 }
2531
2532 // Now make the port connections. Since RtAudio wasn't designed to
2533 // allow the user to select particular channels of a device, we'll
2534 // just open the first "nChannels" ports with offset.
2535 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2536 result = 1;
2537 if ( ports[ stream_.channelOffset[0] + i ] )
2538 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2539 if ( result ) {
2540 free( ports );
2541 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2542 goto unlock;
2543 }
2544 }
2545 free(ports);
2546 }
2547
2548 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2549 result = 1;
2550 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2551 if ( ports == NULL) {
2552 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2553 goto unlock;
2554 }
2555
2556 // Now make the port connections. See note above.
2557 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2558 result = 1;
2559 if ( ports[ stream_.channelOffset[1] + i ] )
2560 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2561 if ( result ) {
2562 free( ports );
2563 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2564 goto unlock;
2565 }
2566 }
2567 free(ports);
2568 }
2569
2570 handle->drainCounter = 0;
2571 handle->internalDrain = false;
2572 stream_.state = STREAM_RUNNING;
2573
2574 unlock:
2575 if ( result == 0 ) return;
2576 error( RtAudioError::SYSTEM_ERROR );
2577 }
2578
stopStream(void)2579 void RtApiJack :: stopStream( void )
2580 {
2581 verifyStream();
2582 if ( stream_.state == STREAM_STOPPED ) {
2583 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2584 error( RtAudioError::WARNING );
2585 return;
2586 }
2587
2588 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2589 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2590
2591 if ( handle->drainCounter == 0 ) {
2592 handle->drainCounter = 2;
2593 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2594 }
2595 }
2596
2597 jack_deactivate( handle->client );
2598 stream_.state = STREAM_STOPPED;
2599 }
2600
abortStream(void)2601 void RtApiJack :: abortStream( void )
2602 {
2603 verifyStream();
2604 if ( stream_.state == STREAM_STOPPED ) {
2605 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2606 error( RtAudioError::WARNING );
2607 return;
2608 }
2609
2610 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2611 handle->drainCounter = 2;
2612
2613 stopStream();
2614 }
2615
2616 // This function will be called by a spawned thread when the user
2617 // callback function signals that the stream should be stopped or
2618 // aborted. It is necessary to handle it this way because the
2619 // callbackEvent() function must return before the jack_deactivate()
2620 // function will return.
jackStopStream(void * ptr)2621 static void *jackStopStream( void *ptr )
2622 {
2623 CallbackInfo *info = (CallbackInfo *) ptr;
2624 RtApiJack *object = (RtApiJack *) info->object;
2625
2626 object->stopStream();
2627 pthread_exit( NULL );
2628 }
2629
callbackEvent(unsigned long nframes)2630 bool RtApiJack :: callbackEvent( unsigned long nframes )
2631 {
2632 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2633 if ( stream_.state == STREAM_CLOSED ) {
2634 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2635 error( RtAudioError::WARNING );
2636 return FAILURE;
2637 }
2638 if ( stream_.bufferSize != nframes ) {
2639 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2640 error( RtAudioError::WARNING );
2641 return FAILURE;
2642 }
2643
2644 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2645 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2646
2647 // Check if we were draining the stream and signal is finished.
2648 if ( handle->drainCounter > 3 ) {
2649 ThreadHandle threadId;
2650
2651 stream_.state = STREAM_STOPPING;
2652 if ( handle->internalDrain == true )
2653 pthread_create( &threadId, NULL, jackStopStream, info );
2654 else
2655 pthread_cond_signal( &handle->condition );
2656 return SUCCESS;
2657 }
2658
2659 // Invoke user callback first, to get fresh output data.
2660 if ( handle->drainCounter == 0 ) {
2661 RtAudioCallback callback = (RtAudioCallback) info->callback;
2662 double streamTime = getStreamTime();
2663 RtAudioStreamStatus status = 0;
2664 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2665 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2666 handle->xrun[0] = false;
2667 }
2668 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2669 status |= RTAUDIO_INPUT_OVERFLOW;
2670 handle->xrun[1] = false;
2671 }
2672 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2673 stream_.bufferSize, streamTime, status, info->userData );
2674 if ( cbReturnValue == 2 ) {
2675 stream_.state = STREAM_STOPPING;
2676 handle->drainCounter = 2;
2677 ThreadHandle id;
2678 pthread_create( &id, NULL, jackStopStream, info );
2679 return SUCCESS;
2680 }
2681 else if ( cbReturnValue == 1 ) {
2682 handle->drainCounter = 1;
2683 handle->internalDrain = true;
2684 }
2685 }
2686
2687 jack_default_audio_sample_t *jackbuffer;
2688 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2689 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2690
2691 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2692
2693 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2694 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2695 memset( jackbuffer, 0, bufferBytes );
2696 }
2697
2698 }
2699 else if ( stream_.doConvertBuffer[0] ) {
2700
2701 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2702
2703 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2704 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2705 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2706 }
2707 }
2708 else { // no buffer conversion
2709 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2710 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2711 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 }
2713 }
2714 }
2715
2716 // Don't bother draining input
2717 if ( handle->drainCounter ) {
2718 handle->drainCounter++;
2719 goto unlock;
2720 }
2721
2722 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2723
2724 if ( stream_.doConvertBuffer[1] ) {
2725 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2726 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2727 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2728 }
2729 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2730 }
2731 else { // no buffer conversion
2732 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2733 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2734 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2735 }
2736 }
2737 }
2738
2739 unlock:
2740 RtApi::tickStreamTime();
2741 return SUCCESS;
2742 }
2743 //******************** End of __UNIX_JACK__ *********************//
2744 #endif
2745
2746 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2747
2748 // The ASIO API is designed around a callback scheme, so this
2749 // implementation is similar to that used for OS-X CoreAudio and Linux
2750 // Jack. The primary constraint with ASIO is that it only allows
2751 // access to a single driver at a time. Thus, it is not possible to
2752 // have more than one simultaneous RtAudio stream.
2753 //
2754 // This implementation also requires a number of external ASIO files
2755 // and a few global variables. The ASIO callback scheme does not
2756 // allow for the passing of user data, so we must create a global
2757 // pointer to our callbackInfo structure.
2758 //
2759 // On unix systems, we make use of a pthread condition variable.
2760 // Since there is no equivalent in Windows, I hacked something based
2761 // on information found in
2762 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2763
2764 #include "asiosys.h"
2765 #include "asio.h"
2766 #include "iasiothiscallresolver.h"
2767 #include "asiodrivers.h"
2768 #include <cmath>
2769
2770 static AsioDrivers drivers;
2771 static ASIOCallbacks asioCallbacks;
2772 static ASIODriverInfo driverInfo;
2773 static CallbackInfo *asioCallbackInfo;
2774 static bool asioXRun;
2775
2776 struct AsioHandle {
2777 int drainCounter; // Tracks callback counts when draining
2778 bool internalDrain; // Indicates if stop is initiated from callback or not.
2779 ASIOBufferInfo *bufferInfos;
2780 HANDLE condition;
2781
AsioHandleAsioHandle2782 AsioHandle()
2783 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2784 };
2785
2786 // Function declarations (definitions at end of section)
2787 static const char* getAsioErrorString( ASIOError result );
2788 static void sampleRateChanged( ASIOSampleRate sRate );
2789 static long asioMessages( long selector, long value, void* message, double* opt );
2790
RtApiAsio()2791 RtApiAsio :: RtApiAsio()
2792 {
2793 // ASIO cannot run on a multi-threaded appartment. You can call
2794 // CoInitialize beforehand, but it must be for appartment threading
2795 // (in which case, CoInitilialize will return S_FALSE here).
2796 coInitialized_ = false;
2797 HRESULT hr = CoInitialize( NULL );
2798 if ( FAILED(hr) ) {
2799 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2800 error( RtAudioError::WARNING );
2801 }
2802 coInitialized_ = true;
2803
2804 drivers.removeCurrentDriver();
2805 driverInfo.asioVersion = 2;
2806
2807 // See note in DirectSound implementation about GetDesktopWindow().
2808 driverInfo.sysRef = GetForegroundWindow();
2809 }
2810
~RtApiAsio()2811 RtApiAsio :: ~RtApiAsio()
2812 {
2813 if ( stream_.state != STREAM_CLOSED ) closeStream();
2814 if ( coInitialized_ ) CoUninitialize();
2815 }
2816
getDeviceCount(void)2817 unsigned int RtApiAsio :: getDeviceCount( void )
2818 {
2819 return (unsigned int) drivers.asioGetNumDev();
2820 }
2821
getDeviceInfo(unsigned int device)2822 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2823 {
2824 RtAudio::DeviceInfo info;
2825 info.probed = false;
2826
2827 // Get device ID
2828 unsigned int nDevices = getDeviceCount();
2829 if ( nDevices == 0 ) {
2830 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2831 error( RtAudioError::INVALID_USE );
2832 return info;
2833 }
2834
2835 if ( device >= nDevices ) {
2836 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2837 error( RtAudioError::INVALID_USE );
2838 return info;
2839 }
2840
2841 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2842 if ( stream_.state != STREAM_CLOSED ) {
2843 if ( device >= devices_.size() ) {
2844 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2845 error( RtAudioError::WARNING );
2846 return info;
2847 }
2848 return devices_[ device ];
2849 }
2850
2851 char driverName[32];
2852 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2853 if ( result != ASE_OK ) {
2854 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2855 errorText_ = errorStream_.str();
2856 error( RtAudioError::WARNING );
2857 return info;
2858 }
2859
2860 info.name = driverName;
2861
2862 if ( !drivers.loadDriver( driverName ) ) {
2863 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2864 errorText_ = errorStream_.str();
2865 error( RtAudioError::WARNING );
2866 return info;
2867 }
2868
2869 result = ASIOInit( &driverInfo );
2870 if ( result != ASE_OK ) {
2871 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2872 errorText_ = errorStream_.str();
2873 error( RtAudioError::WARNING );
2874 return info;
2875 }
2876
2877 // Determine the device channel information.
2878 long inputChannels, outputChannels;
2879 result = ASIOGetChannels( &inputChannels, &outputChannels );
2880 if ( result != ASE_OK ) {
2881 drivers.removeCurrentDriver();
2882 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2883 errorText_ = errorStream_.str();
2884 error( RtAudioError::WARNING );
2885 return info;
2886 }
2887
2888 info.outputChannels = outputChannels;
2889 info.inputChannels = inputChannels;
2890 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2891 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2892
2893 // Determine the supported sample rates.
2894 info.sampleRates.clear();
2895 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2896 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2897 if ( result == ASE_OK ) {
2898 info.sampleRates.push_back( SAMPLE_RATES[i] );
2899
2900 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2901 info.preferredSampleRate = SAMPLE_RATES[i];
2902 }
2903 }
2904
2905 // Determine supported data types ... just check first channel and assume rest are the same.
2906 ASIOChannelInfo channelInfo;
2907 channelInfo.channel = 0;
2908 channelInfo.isInput = true;
2909 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2910 result = ASIOGetChannelInfo( &channelInfo );
2911 if ( result != ASE_OK ) {
2912 drivers.removeCurrentDriver();
2913 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2914 errorText_ = errorStream_.str();
2915 error( RtAudioError::WARNING );
2916 return info;
2917 }
2918
2919 info.nativeFormats = 0;
2920 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2921 info.nativeFormats |= RTAUDIO_SINT16;
2922 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2923 info.nativeFormats |= RTAUDIO_SINT32;
2924 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2925 info.nativeFormats |= RTAUDIO_FLOAT32;
2926 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2927 info.nativeFormats |= RTAUDIO_FLOAT64;
2928 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2929 info.nativeFormats |= RTAUDIO_SINT24;
2930
2931 if ( info.outputChannels > 0 )
2932 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2933 if ( info.inputChannels > 0 )
2934 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2935
2936 info.probed = true;
2937 drivers.removeCurrentDriver();
2938 return info;
2939 }
2940
bufferSwitch(long index,ASIOBool)2941 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2942 {
2943 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2944 object->callbackEvent( index );
2945 }
2946
saveDeviceInfo(void)2947 void RtApiAsio :: saveDeviceInfo( void )
2948 {
2949 devices_.clear();
2950
2951 unsigned int nDevices = getDeviceCount();
2952 devices_.resize( nDevices );
2953 for ( unsigned int i=0; i<nDevices; i++ )
2954 devices_[i] = getDeviceInfo( i );
2955 }
2956
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2957 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2958 unsigned int firstChannel, unsigned int sampleRate,
2959 RtAudioFormat format, unsigned int *bufferSize,
2960 RtAudio::StreamOptions *options )
2961 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2962
2963 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2964
2965 // For ASIO, a duplex stream MUST use the same driver.
2966 if ( isDuplexInput && stream_.device[0] != device ) {
2967 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2968 return FAILURE;
2969 }
2970
2971 char driverName[32];
2972 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2973 if ( result != ASE_OK ) {
2974 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2975 errorText_ = errorStream_.str();
2976 return FAILURE;
2977 }
2978
2979 // Only load the driver once for duplex stream.
2980 if ( !isDuplexInput ) {
2981 // The getDeviceInfo() function will not work when a stream is open
2982 // because ASIO does not allow multiple devices to run at the same
2983 // time. Thus, we'll probe the system before opening a stream and
2984 // save the results for use by getDeviceInfo().
2985 this->saveDeviceInfo();
2986
2987 if ( !drivers.loadDriver( driverName ) ) {
2988 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2989 errorText_ = errorStream_.str();
2990 return FAILURE;
2991 }
2992
2993 result = ASIOInit( &driverInfo );
2994 if ( result != ASE_OK ) {
2995 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2996 errorText_ = errorStream_.str();
2997 return FAILURE;
2998 }
2999 }
3000
3001 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3002 bool buffersAllocated = false;
3003 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3004 unsigned int nChannels;
3005
3006
3007 // Check the device channel count.
3008 long inputChannels, outputChannels;
3009 result = ASIOGetChannels( &inputChannels, &outputChannels );
3010 if ( result != ASE_OK ) {
3011 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3012 errorText_ = errorStream_.str();
3013 goto error;
3014 }
3015
3016 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3017 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3018 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3019 errorText_ = errorStream_.str();
3020 goto error;
3021 }
3022 stream_.nDeviceChannels[mode] = channels;
3023 stream_.nUserChannels[mode] = channels;
3024 stream_.channelOffset[mode] = firstChannel;
3025
3026 // Verify the sample rate is supported.
3027 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3028 if ( result != ASE_OK ) {
3029 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3030 errorText_ = errorStream_.str();
3031 goto error;
3032 }
3033
3034 // Get the current sample rate
3035 ASIOSampleRate currentRate;
3036 result = ASIOGetSampleRate( ¤tRate );
3037 if ( result != ASE_OK ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3039 errorText_ = errorStream_.str();
3040 goto error;
3041 }
3042
3043 // Set the sample rate only if necessary
3044 if ( currentRate != sampleRate ) {
3045 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3046 if ( result != ASE_OK ) {
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3048 errorText_ = errorStream_.str();
3049 goto error;
3050 }
3051 }
3052
3053 // Determine the driver data type.
3054 ASIOChannelInfo channelInfo;
3055 channelInfo.channel = 0;
3056 if ( mode == OUTPUT ) channelInfo.isInput = false;
3057 else channelInfo.isInput = true;
3058 result = ASIOGetChannelInfo( &channelInfo );
3059 if ( result != ASE_OK ) {
3060 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3061 errorText_ = errorStream_.str();
3062 goto error;
3063 }
3064
3065 // Assuming WINDOWS host is always little-endian.
3066 stream_.doByteSwap[mode] = false;
3067 stream_.userFormat = format;
3068 stream_.deviceFormat[mode] = 0;
3069 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3071 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3072 }
3073 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3075 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3076 }
3077 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3079 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3080 }
3081 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3083 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3084 }
3085 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3086 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3087 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3088 }
3089
3090 if ( stream_.deviceFormat[mode] == 0 ) {
3091 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3092 errorText_ = errorStream_.str();
3093 goto error;
3094 }
3095
3096 // Set the buffer size. For a duplex stream, this will end up
3097 // setting the buffer size based on the input constraints, which
3098 // should be ok.
3099 long minSize, maxSize, preferSize, granularity;
3100 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3101 if ( result != ASE_OK ) {
3102 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3103 errorText_ = errorStream_.str();
3104 goto error;
3105 }
3106
3107 if ( isDuplexInput ) {
3108 // When this is the duplex input (output was opened before), then we have to use the same
3109 // buffersize as the output, because it might use the preferred buffer size, which most
3110 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3111 // So instead of throwing an error, make them equal. The caller uses the reference
3112 // to the "bufferSize" param as usual to set up processing buffers.
3113
3114 *bufferSize = stream_.bufferSize;
3115
3116 } else {
3117 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3118 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3119 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3120 else if ( granularity == -1 ) {
3121 // Make sure bufferSize is a power of two.
3122 int log2_of_min_size = 0;
3123 int log2_of_max_size = 0;
3124
3125 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3126 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3127 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3128 }
3129
3130 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3131 int min_delta_num = log2_of_min_size;
3132
3133 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3134 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3135 if (current_delta < min_delta) {
3136 min_delta = current_delta;
3137 min_delta_num = i;
3138 }
3139 }
3140
3141 *bufferSize = ( (unsigned int)1 << min_delta_num );
3142 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3143 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3144 }
3145 else if ( granularity != 0 ) {
3146 // Set to an even multiple of granularity, rounding up.
3147 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 }
3149 }
3150
3151 /*
3152 // we don't use it anymore, see above!
3153 // Just left it here for the case...
3154 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3155 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 goto error;
3157 }
3158 */
3159
3160 stream_.bufferSize = *bufferSize;
3161 stream_.nBuffers = 2;
3162
3163 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3164 else stream_.userInterleaved = true;
3165
3166 // ASIO always uses non-interleaved buffers.
3167 stream_.deviceInterleaved[mode] = false;
3168
3169 // Allocate, if necessary, our AsioHandle structure for the stream.
3170 if ( handle == 0 ) {
3171 try {
3172 handle = new AsioHandle;
3173 }
3174 catch ( std::bad_alloc& ) {
3175 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3176 goto error;
3177 }
3178 handle->bufferInfos = 0;
3179
3180 // Create a manual-reset event.
3181 handle->condition = CreateEvent( NULL, // no security
3182 TRUE, // manual-reset
3183 FALSE, // non-signaled initially
3184 NULL ); // unnamed
3185 stream_.apiHandle = (void *) handle;
3186 }
3187
3188 // Create the ASIO internal buffers. Since RtAudio sets up input
3189 // and output separately, we'll have to dispose of previously
3190 // created output buffers for a duplex stream.
3191 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3192 ASIODisposeBuffers();
3193 if ( handle->bufferInfos ) free( handle->bufferInfos );
3194 }
3195
3196 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3197 unsigned int i;
3198 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3199 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3200 if ( handle->bufferInfos == NULL ) {
3201 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3202 errorText_ = errorStream_.str();
3203 goto error;
3204 }
3205
3206 ASIOBufferInfo *infos;
3207 infos = handle->bufferInfos;
3208 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3209 infos->isInput = ASIOFalse;
3210 infos->channelNum = i + stream_.channelOffset[0];
3211 infos->buffers[0] = infos->buffers[1] = 0;
3212 }
3213 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3214 infos->isInput = ASIOTrue;
3215 infos->channelNum = i + stream_.channelOffset[1];
3216 infos->buffers[0] = infos->buffers[1] = 0;
3217 }
3218
3219 // prepare for callbacks
3220 stream_.sampleRate = sampleRate;
3221 stream_.device[mode] = device;
3222 stream_.mode = isDuplexInput ? DUPLEX : mode;
3223
3224 // store this class instance before registering callbacks, that are going to use it
3225 asioCallbackInfo = &stream_.callbackInfo;
3226 stream_.callbackInfo.object = (void *) this;
3227
3228 // Set up the ASIO callback structure and create the ASIO data buffers.
3229 asioCallbacks.bufferSwitch = &bufferSwitch;
3230 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3231 asioCallbacks.asioMessage = &asioMessages;
3232 asioCallbacks.bufferSwitchTimeInfo = NULL;
3233 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3234 if ( result != ASE_OK ) {
3235 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3236 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3237 // In that case, let's be naïve and try that instead.
3238 *bufferSize = preferSize;
3239 stream_.bufferSize = *bufferSize;
3240 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3241 }
3242
3243 if ( result != ASE_OK ) {
3244 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3245 errorText_ = errorStream_.str();
3246 goto error;
3247 }
3248 buffersAllocated = true;
3249 stream_.state = STREAM_STOPPED;
3250
3251 // Set flags for buffer conversion.
3252 stream_.doConvertBuffer[mode] = false;
3253 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3254 stream_.doConvertBuffer[mode] = true;
3255 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3256 stream_.nUserChannels[mode] > 1 )
3257 stream_.doConvertBuffer[mode] = true;
3258
3259 // Allocate necessary internal buffers
3260 unsigned long bufferBytes;
3261 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3262 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3263 if ( stream_.userBuffer[mode] == NULL ) {
3264 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3265 goto error;
3266 }
3267
3268 if ( stream_.doConvertBuffer[mode] ) {
3269
3270 bool makeBuffer = true;
3271 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3272 if ( isDuplexInput && stream_.deviceBuffer ) {
3273 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3274 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3275 }
3276
3277 if ( makeBuffer ) {
3278 bufferBytes *= *bufferSize;
3279 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3280 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3281 if ( stream_.deviceBuffer == NULL ) {
3282 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3283 goto error;
3284 }
3285 }
3286 }
3287
3288 // Determine device latencies
3289 long inputLatency, outputLatency;
3290 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3291 if ( result != ASE_OK ) {
3292 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3293 errorText_ = errorStream_.str();
3294 error( RtAudioError::WARNING); // warn but don't fail
3295 }
3296 else {
3297 stream_.latency[0] = outputLatency;
3298 stream_.latency[1] = inputLatency;
3299 }
3300
3301 // Setup the buffer conversion information structure. We don't use
3302 // buffers to do channel offsets, so we override that parameter
3303 // here.
3304 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305
3306 return SUCCESS;
3307
3308 error:
3309 if ( !isDuplexInput ) {
3310 // the cleanup for error in the duplex input, is done by RtApi::openStream
3311 // So we clean up for single channel only
3312
3313 if ( buffersAllocated )
3314 ASIODisposeBuffers();
3315
3316 drivers.removeCurrentDriver();
3317
3318 if ( handle ) {
3319 CloseHandle( handle->condition );
3320 if ( handle->bufferInfos )
3321 free( handle->bufferInfos );
3322
3323 delete handle;
3324 stream_.apiHandle = 0;
3325 }
3326
3327
3328 if ( stream_.userBuffer[mode] ) {
3329 free( stream_.userBuffer[mode] );
3330 stream_.userBuffer[mode] = 0;
3331 }
3332
3333 if ( stream_.deviceBuffer ) {
3334 free( stream_.deviceBuffer );
3335 stream_.deviceBuffer = 0;
3336 }
3337 }
3338
3339 return FAILURE;
3340 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3341
closeStream()3342 void RtApiAsio :: closeStream()
3343 {
3344 if ( stream_.state == STREAM_CLOSED ) {
3345 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3346 error( RtAudioError::WARNING );
3347 return;
3348 }
3349
3350 if ( stream_.state == STREAM_RUNNING ) {
3351 stream_.state = STREAM_STOPPED;
3352 ASIOStop();
3353 }
3354 ASIODisposeBuffers();
3355 drivers.removeCurrentDriver();
3356
3357 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3358 if ( handle ) {
3359 CloseHandle( handle->condition );
3360 if ( handle->bufferInfos )
3361 free( handle->bufferInfos );
3362 delete handle;
3363 stream_.apiHandle = 0;
3364 }
3365
3366 for ( int i=0; i<2; i++ ) {
3367 if ( stream_.userBuffer[i] ) {
3368 free( stream_.userBuffer[i] );
3369 stream_.userBuffer[i] = 0;
3370 }
3371 }
3372
3373 if ( stream_.deviceBuffer ) {
3374 free( stream_.deviceBuffer );
3375 stream_.deviceBuffer = 0;
3376 }
3377
3378 stream_.mode = UNINITIALIZED;
3379 stream_.state = STREAM_CLOSED;
3380 }
3381
3382 bool stopThreadCalled = false;
3383
startStream()3384 void RtApiAsio :: startStream()
3385 {
3386 verifyStream();
3387 if ( stream_.state == STREAM_RUNNING ) {
3388 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3389 error( RtAudioError::WARNING );
3390 return;
3391 }
3392
3393 #if defined( HAVE_GETTIMEOFDAY )
3394 gettimeofday( &stream_.lastTickTimestamp, NULL );
3395 #endif
3396
3397 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3398 ASIOError result = ASIOStart();
3399 if ( result != ASE_OK ) {
3400 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3401 errorText_ = errorStream_.str();
3402 goto unlock;
3403 }
3404
3405 handle->drainCounter = 0;
3406 handle->internalDrain = false;
3407 ResetEvent( handle->condition );
3408 stream_.state = STREAM_RUNNING;
3409 asioXRun = false;
3410
3411 unlock:
3412 stopThreadCalled = false;
3413
3414 if ( result == ASE_OK ) return;
3415 error( RtAudioError::SYSTEM_ERROR );
3416 }
3417
stopStream()3418 void RtApiAsio :: stopStream()
3419 {
3420 verifyStream();
3421 if ( stream_.state == STREAM_STOPPED ) {
3422 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3423 error( RtAudioError::WARNING );
3424 return;
3425 }
3426
3427 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3429 if ( handle->drainCounter == 0 ) {
3430 handle->drainCounter = 2;
3431 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3432 }
3433 }
3434
3435 stream_.state = STREAM_STOPPED;
3436
3437 ASIOError result = ASIOStop();
3438 if ( result != ASE_OK ) {
3439 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3440 errorText_ = errorStream_.str();
3441 }
3442
3443 if ( result == ASE_OK ) return;
3444 error( RtAudioError::SYSTEM_ERROR );
3445 }
3446
abortStream()3447 void RtApiAsio :: abortStream()
3448 {
3449 verifyStream();
3450 if ( stream_.state == STREAM_STOPPED ) {
3451 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3452 error( RtAudioError::WARNING );
3453 return;
3454 }
3455
3456 // The following lines were commented-out because some behavior was
3457 // noted where the device buffers need to be zeroed to avoid
3458 // continuing sound, even when the device buffers are completely
3459 // disposed. So now, calling abort is the same as calling stop.
3460 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3461 // handle->drainCounter = 2;
3462 stopStream();
3463 }
3464
3465 // This function will be called by a spawned thread when the user
3466 // callback function signals that the stream should be stopped or
3467 // aborted. It is necessary to handle it this way because the
3468 // callbackEvent() function must return before the ASIOStop()
3469 // function will return.
asioStopStream(void * ptr)3470 static unsigned __stdcall asioStopStream( void *ptr )
3471 {
3472 CallbackInfo *info = (CallbackInfo *) ptr;
3473 RtApiAsio *object = (RtApiAsio *) info->object;
3474
3475 object->stopStream();
3476 _endthreadex( 0 );
3477 return 0;
3478 }
3479
callbackEvent(long bufferIndex)3480 bool RtApiAsio :: callbackEvent( long bufferIndex )
3481 {
3482 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3483 if ( stream_.state == STREAM_CLOSED ) {
3484 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3485 error( RtAudioError::WARNING );
3486 return FAILURE;
3487 }
3488
3489 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3490 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3491
3492 // Check if we were draining the stream and signal if finished.
3493 if ( handle->drainCounter > 3 ) {
3494
3495 stream_.state = STREAM_STOPPING;
3496 if ( handle->internalDrain == false )
3497 SetEvent( handle->condition );
3498 else { // spawn a thread to stop the stream
3499 unsigned threadId;
3500 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3501 &stream_.callbackInfo, 0, &threadId );
3502 }
3503 return SUCCESS;
3504 }
3505
3506 // Invoke user callback to get fresh output data UNLESS we are
3507 // draining stream.
3508 if ( handle->drainCounter == 0 ) {
3509 RtAudioCallback callback = (RtAudioCallback) info->callback;
3510 double streamTime = getStreamTime();
3511 RtAudioStreamStatus status = 0;
3512 if ( stream_.mode != INPUT && asioXRun == true ) {
3513 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3514 asioXRun = false;
3515 }
3516 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3517 status |= RTAUDIO_INPUT_OVERFLOW;
3518 asioXRun = false;
3519 }
3520 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3521 stream_.bufferSize, streamTime, status, info->userData );
3522 if ( cbReturnValue == 2 ) {
3523 stream_.state = STREAM_STOPPING;
3524 handle->drainCounter = 2;
3525 unsigned threadId;
3526 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3527 &stream_.callbackInfo, 0, &threadId );
3528 return SUCCESS;
3529 }
3530 else if ( cbReturnValue == 1 ) {
3531 handle->drainCounter = 1;
3532 handle->internalDrain = true;
3533 }
3534 }
3535
3536 unsigned int nChannels, bufferBytes, i, j;
3537 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3538 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3539
3540 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3541
3542 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3543
3544 for ( i=0, j=0; i<nChannels; i++ ) {
3545 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3546 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3547 }
3548
3549 }
3550 else if ( stream_.doConvertBuffer[0] ) {
3551
3552 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3553 if ( stream_.doByteSwap[0] )
3554 byteSwapBuffer( stream_.deviceBuffer,
3555 stream_.bufferSize * stream_.nDeviceChannels[0],
3556 stream_.deviceFormat[0] );
3557
3558 for ( i=0, j=0; i<nChannels; i++ ) {
3559 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3560 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3561 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3562 }
3563
3564 }
3565 else {
3566
3567 if ( stream_.doByteSwap[0] )
3568 byteSwapBuffer( stream_.userBuffer[0],
3569 stream_.bufferSize * stream_.nUserChannels[0],
3570 stream_.userFormat );
3571
3572 for ( i=0, j=0; i<nChannels; i++ ) {
3573 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3574 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3575 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3576 }
3577
3578 }
3579 }
3580
3581 // Don't bother draining input
3582 if ( handle->drainCounter ) {
3583 handle->drainCounter++;
3584 goto unlock;
3585 }
3586
3587 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3588
3589 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3590
3591 if (stream_.doConvertBuffer[1]) {
3592
3593 // Always interleave ASIO input data.
3594 for ( i=0, j=0; i<nChannels; i++ ) {
3595 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3596 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3597 handle->bufferInfos[i].buffers[bufferIndex],
3598 bufferBytes );
3599 }
3600
3601 if ( stream_.doByteSwap[1] )
3602 byteSwapBuffer( stream_.deviceBuffer,
3603 stream_.bufferSize * stream_.nDeviceChannels[1],
3604 stream_.deviceFormat[1] );
3605 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3606
3607 }
3608 else {
3609 for ( i=0, j=0; i<nChannels; i++ ) {
3610 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3611 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3612 handle->bufferInfos[i].buffers[bufferIndex],
3613 bufferBytes );
3614 }
3615 }
3616
3617 if ( stream_.doByteSwap[1] )
3618 byteSwapBuffer( stream_.userBuffer[1],
3619 stream_.bufferSize * stream_.nUserChannels[1],
3620 stream_.userFormat );
3621 }
3622 }
3623
3624 unlock:
3625 // The following call was suggested by Malte Clasen. While the API
3626 // documentation indicates it should not be required, some device
3627 // drivers apparently do not function correctly without it.
3628 ASIOOutputReady();
3629
3630 RtApi::tickStreamTime();
3631 return SUCCESS;
3632 }
3633
sampleRateChanged(ASIOSampleRate sRate)3634 static void sampleRateChanged( ASIOSampleRate sRate )
3635 {
3636 // The ASIO documentation says that this usually only happens during
3637 // external sync. Audio processing is not stopped by the driver,
3638 // actual sample rate might not have even changed, maybe only the
3639 // sample rate status of an AES/EBU or S/PDIF digital input at the
3640 // audio device.
3641
3642 RtApi *object = (RtApi *) asioCallbackInfo->object;
3643 try {
3644 object->stopStream();
3645 }
3646 catch ( RtAudioError &exception ) {
3647 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3648 return;
3649 }
3650
3651 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3652 }
3653
asioMessages(long selector,long value,void *,double *)3654 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3655 {
3656 long ret = 0;
3657
3658 switch( selector ) {
3659 case kAsioSelectorSupported:
3660 if ( value == kAsioResetRequest
3661 || value == kAsioEngineVersion
3662 || value == kAsioResyncRequest
3663 || value == kAsioLatenciesChanged
3664 // The following three were added for ASIO 2.0, you don't
3665 // necessarily have to support them.
3666 || value == kAsioSupportsTimeInfo
3667 || value == kAsioSupportsTimeCode
3668 || value == kAsioSupportsInputMonitor)
3669 ret = 1L;
3670 break;
3671 case kAsioResetRequest:
3672 // Defer the task and perform the reset of the driver during the
3673 // next "safe" situation. You cannot reset the driver right now,
3674 // as this code is called from the driver. Reset the driver is
3675 // done by completely destruct is. I.e. ASIOStop(),
3676 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3677 // driver again.
3678 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3679 ret = 1L;
3680 break;
3681 case kAsioResyncRequest:
3682 // This informs the application that the driver encountered some
3683 // non-fatal data loss. It is used for synchronization purposes
3684 // of different media. Added mainly to work around the Win16Mutex
3685 // problems in Windows 95/98 with the Windows Multimedia system,
3686 // which could lose data because the Mutex was held too long by
3687 // another thread. However a driver can issue it in other
3688 // situations, too.
3689 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3690 asioXRun = true;
3691 ret = 1L;
3692 break;
3693 case kAsioLatenciesChanged:
3694 // This will inform the host application that the drivers were
3695 // latencies changed. Beware, it this does not mean that the
3696 // buffer sizes have changed! You might need to update internal
3697 // delay data.
3698 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3699 ret = 1L;
3700 break;
3701 case kAsioEngineVersion:
3702 // Return the supported ASIO version of the host application. If
3703 // a host application does not implement this selector, ASIO 1.0
3704 // is assumed by the driver.
3705 ret = 2L;
3706 break;
3707 case kAsioSupportsTimeInfo:
3708 // Informs the driver whether the
3709 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3710 // For compatibility with ASIO 1.0 drivers the host application
3711 // should always support the "old" bufferSwitch method, too.
3712 ret = 0;
3713 break;
3714 case kAsioSupportsTimeCode:
3715 // Informs the driver whether application is interested in time
3716 // code info. If an application does not need to know about time
3717 // code, the driver has less work to do.
3718 ret = 0;
3719 break;
3720 }
3721 return ret;
3722 }
3723
getAsioErrorString(ASIOError result)3724 static const char* getAsioErrorString( ASIOError result )
3725 {
3726 struct Messages
3727 {
3728 ASIOError value;
3729 const char*message;
3730 };
3731
3732 static const Messages m[] =
3733 {
3734 { ASE_NotPresent, "Hardware input or output is not present or available." },
3735 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3736 { ASE_InvalidParameter, "Invalid input parameter." },
3737 { ASE_InvalidMode, "Invalid mode." },
3738 { ASE_SPNotAdvancing, "Sample position not advancing." },
3739 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3740 { ASE_NoMemory, "Not enough memory to complete the request." }
3741 };
3742
3743 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3744 if ( m[i].value == result ) return m[i].message;
3745
3746 return "Unknown error.";
3747 }
3748
3749 //******************** End of __WINDOWS_ASIO__ *********************//
3750 #endif
3751
3752
3753 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3754
3755 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3756 // - Introduces support for the Windows WASAPI API
3757 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3758 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3759 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3760
3761 #ifndef INITGUID
3762 #define INITGUID
3763 #endif
3764
3765 #include <mfapi.h>
3766 #include <mferror.h>
3767 #include <mfplay.h>
3768 #include <mftransform.h>
3769 #include <wmcodecdsp.h>
3770
3771 #include <audioclient.h>
3772 #include <avrt.h>
3773 #include <mmdeviceapi.h>
3774 #include <functiondiscoverykeys_devpkey.h>
3775
3776 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3777 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3778 #endif
3779
3780 #ifndef MFSTARTUP_NOSOCKET
3781 #define MFSTARTUP_NOSOCKET 0x1
3782 #endif
3783
3784 #ifdef _MSC_VER
3785 #pragma comment( lib, "ksuser" )
3786 #pragma comment( lib, "mfplat.lib" )
3787 #pragma comment( lib, "mfuuid.lib" )
3788 #pragma comment( lib, "wmcodecdspuuid" )
3789 #endif
3790
3791 //=============================================================================
3792
3793 #define SAFE_RELEASE( objectPtr )\
3794 if ( objectPtr )\
3795 {\
3796 objectPtr->Release();\
3797 objectPtr = NULL;\
3798 }
3799
3800 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3801
3802 //-----------------------------------------------------------------------------
3803
3804 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3805 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3806 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3807 // provide intermediate storage for read / write synchronization.
3808 class WasapiBuffer
3809 {
3810 public:
WasapiBuffer()3811 WasapiBuffer()
3812 : buffer_( NULL ),
3813 bufferSize_( 0 ),
3814 inIndex_( 0 ),
3815 outIndex_( 0 ) {}
3816
~WasapiBuffer()3817 ~WasapiBuffer() {
3818 free( buffer_ );
3819 }
3820
3821 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3822 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3823 free( buffer_ );
3824
3825 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3826
3827 bufferSize_ = bufferSize;
3828 inIndex_ = 0;
3829 outIndex_ = 0;
3830 }
3831
3832 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3833 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3834 {
3835 if ( !buffer || // incoming buffer is NULL
3836 bufferSize == 0 || // incoming buffer has no data
3837 bufferSize > bufferSize_ ) // incoming buffer too large
3838 {
3839 return false;
3840 }
3841
3842 unsigned int relOutIndex = outIndex_;
3843 unsigned int inIndexEnd = inIndex_ + bufferSize;
3844 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3845 relOutIndex += bufferSize_;
3846 }
3847
3848 // the "IN" index CAN BEGIN at the "OUT" index
3849 // the "IN" index CANNOT END at the "OUT" index
3850 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3851 return false; // not enough space between "in" index and "out" index
3852 }
3853
3854 // copy buffer from external to internal
3855 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3856 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3857 int fromInSize = bufferSize - fromZeroSize;
3858
3859 switch( format )
3860 {
3861 case RTAUDIO_SINT8:
3862 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3863 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3864 break;
3865 case RTAUDIO_SINT16:
3866 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3867 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3868 break;
3869 case RTAUDIO_SINT24:
3870 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3871 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3872 break;
3873 case RTAUDIO_SINT32:
3874 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3875 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3876 break;
3877 case RTAUDIO_FLOAT32:
3878 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3879 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3880 break;
3881 case RTAUDIO_FLOAT64:
3882 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3883 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3884 break;
3885 }
3886
3887 // update "in" index
3888 inIndex_ += bufferSize;
3889 inIndex_ %= bufferSize_;
3890
3891 return true;
3892 }
3893
3894 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3895 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3896 {
3897 if ( !buffer || // incoming buffer is NULL
3898 bufferSize == 0 || // incoming buffer has no data
3899 bufferSize > bufferSize_ ) // incoming buffer too large
3900 {
3901 return false;
3902 }
3903
3904 unsigned int relInIndex = inIndex_;
3905 unsigned int outIndexEnd = outIndex_ + bufferSize;
3906 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3907 relInIndex += bufferSize_;
3908 }
3909
3910 // the "OUT" index CANNOT BEGIN at the "IN" index
3911 // the "OUT" index CAN END at the "IN" index
3912 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3913 return false; // not enough space between "out" index and "in" index
3914 }
3915
3916 // copy buffer from internal to external
3917 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3918 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3919 int fromOutSize = bufferSize - fromZeroSize;
3920
3921 switch( format )
3922 {
3923 case RTAUDIO_SINT8:
3924 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3925 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3926 break;
3927 case RTAUDIO_SINT16:
3928 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3929 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3930 break;
3931 case RTAUDIO_SINT24:
3932 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3933 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3934 break;
3935 case RTAUDIO_SINT32:
3936 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3937 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3938 break;
3939 case RTAUDIO_FLOAT32:
3940 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3941 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3942 break;
3943 case RTAUDIO_FLOAT64:
3944 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3945 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3946 break;
3947 }
3948
3949 // update "out" index
3950 outIndex_ += bufferSize;
3951 outIndex_ %= bufferSize_;
3952
3953 return true;
3954 }
3955
3956 private:
3957 char* buffer_;
3958 unsigned int bufferSize_;
3959 unsigned int inIndex_;
3960 unsigned int outIndex_;
3961 };
3962
3963 //-----------------------------------------------------------------------------
3964
3965 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3966 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3967 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3968 class WasapiResampler
3969 {
3970 public:
WasapiResampler(bool isFloat,unsigned int bitsPerSample,unsigned int channelCount,unsigned int inSampleRate,unsigned int outSampleRate)3971 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3972 unsigned int inSampleRate, unsigned int outSampleRate )
3973 : _bytesPerSample( bitsPerSample / 8 )
3974 , _channelCount( channelCount )
3975 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3976 , _transformUnk( NULL )
3977 , _transform( NULL )
3978 , _mediaType( NULL )
3979 , _inputMediaType( NULL )
3980 , _outputMediaType( NULL )
3981
3982 #ifdef __IWMResamplerProps_FWD_DEFINED__
3983 , _resamplerProps( NULL )
3984 #endif
3985 {
3986 // 1. Initialization
3987
3988 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3989
3990 // 2. Create Resampler Transform Object
3991
3992 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3993 IID_IUnknown, ( void** ) &_transformUnk );
3994
3995 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3996
3997 #ifdef __IWMResamplerProps_FWD_DEFINED__
3998 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3999 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4000 #endif
4001
4002 // 3. Specify input / output format
4003
4004 MFCreateMediaType( &_mediaType );
4005 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4006 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4007 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4008 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4009 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4010 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4011 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4012 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4013
4014 MFCreateMediaType( &_inputMediaType );
4015 _mediaType->CopyAllItems( _inputMediaType );
4016
4017 _transform->SetInputType( 0, _inputMediaType, 0 );
4018
4019 MFCreateMediaType( &_outputMediaType );
4020 _mediaType->CopyAllItems( _outputMediaType );
4021
4022 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4023 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4024
4025 _transform->SetOutputType( 0, _outputMediaType, 0 );
4026
4027 // 4. Send stream start messages to Resampler
4028
4029 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4030 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4031 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4032 }
4033
~WasapiResampler()4034 ~WasapiResampler()
4035 {
4036 // 8. Send stream stop messages to Resampler
4037
4038 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4039 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4040
4041 // 9. Cleanup
4042
4043 MFShutdown();
4044
4045 SAFE_RELEASE( _transformUnk );
4046 SAFE_RELEASE( _transform );
4047 SAFE_RELEASE( _mediaType );
4048 SAFE_RELEASE( _inputMediaType );
4049 SAFE_RELEASE( _outputMediaType );
4050
4051 #ifdef __IWMResamplerProps_FWD_DEFINED__
4052 SAFE_RELEASE( _resamplerProps );
4053 #endif
4054 }
4055
Convert(char * outBuffer,const char * inBuffer,unsigned int inSampleCount,unsigned int & outSampleCount)4056 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4057 {
4058 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4059 if ( _sampleRatio == 1 )
4060 {
4061 // no sample rate conversion required
4062 memcpy( outBuffer, inBuffer, inputBufferSize );
4063 outSampleCount = inSampleCount;
4064 return;
4065 }
4066
4067 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4068
4069 IMFMediaBuffer* rInBuffer;
4070 IMFSample* rInSample;
4071 BYTE* rInByteBuffer = NULL;
4072
4073 // 5. Create Sample object from input data
4074
4075 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4076
4077 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4078 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4079 rInBuffer->Unlock();
4080 rInByteBuffer = NULL;
4081
4082 rInBuffer->SetCurrentLength( inputBufferSize );
4083
4084 MFCreateSample( &rInSample );
4085 rInSample->AddBuffer( rInBuffer );
4086
4087 // 6. Pass input data to Resampler
4088
4089 _transform->ProcessInput( 0, rInSample, 0 );
4090
4091 SAFE_RELEASE( rInBuffer );
4092 SAFE_RELEASE( rInSample );
4093
4094 // 7. Perform sample rate conversion
4095
4096 IMFMediaBuffer* rOutBuffer = NULL;
4097 BYTE* rOutByteBuffer = NULL;
4098
4099 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4100 DWORD rStatus;
4101 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4102
4103 // 7.1 Create Sample object for output data
4104
4105 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4106 MFCreateSample( &( rOutDataBuffer.pSample ) );
4107 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4108 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4109 rOutDataBuffer.dwStreamID = 0;
4110 rOutDataBuffer.dwStatus = 0;
4111 rOutDataBuffer.pEvents = NULL;
4112
4113 // 7.2 Get output data from Resampler
4114
4115 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4116 {
4117 outSampleCount = 0;
4118 SAFE_RELEASE( rOutBuffer );
4119 SAFE_RELEASE( rOutDataBuffer.pSample );
4120 return;
4121 }
4122
4123 // 7.3 Write output data to outBuffer
4124
4125 SAFE_RELEASE( rOutBuffer );
4126 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4127 rOutBuffer->GetCurrentLength( &rBytes );
4128
4129 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4130 memcpy( outBuffer, rOutByteBuffer, rBytes );
4131 rOutBuffer->Unlock();
4132 rOutByteBuffer = NULL;
4133
4134 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4135 SAFE_RELEASE( rOutBuffer );
4136 SAFE_RELEASE( rOutDataBuffer.pSample );
4137 }
4138
4139 private:
4140 unsigned int _bytesPerSample;
4141 unsigned int _channelCount;
4142 float _sampleRatio;
4143
4144 IUnknown* _transformUnk;
4145 IMFTransform* _transform;
4146 IMFMediaType* _mediaType;
4147 IMFMediaType* _inputMediaType;
4148 IMFMediaType* _outputMediaType;
4149
4150 #ifdef __IWMResamplerProps_FWD_DEFINED__
4151 IWMResamplerProps* _resamplerProps;
4152 #endif
4153 };
4154
4155 //-----------------------------------------------------------------------------
4156
4157 // A structure to hold various information related to the WASAPI implementation.
4158 struct WasapiHandle
4159 {
4160 IAudioClient* captureAudioClient;
4161 IAudioClient* renderAudioClient;
4162 IAudioCaptureClient* captureClient;
4163 IAudioRenderClient* renderClient;
4164 HANDLE captureEvent;
4165 HANDLE renderEvent;
4166
WasapiHandleWasapiHandle4167 WasapiHandle()
4168 : captureAudioClient( NULL ),
4169 renderAudioClient( NULL ),
4170 captureClient( NULL ),
4171 renderClient( NULL ),
4172 captureEvent( NULL ),
4173 renderEvent( NULL ) {}
4174 };
4175
4176 //=============================================================================
4177
RtApiWasapi()4178 RtApiWasapi::RtApiWasapi()
4179 : coInitialized_( false ), deviceEnumerator_( NULL )
4180 {
4181 // WASAPI can run either apartment or multi-threaded
4182 HRESULT hr = CoInitialize( NULL );
4183 if ( !FAILED( hr ) )
4184 coInitialized_ = true;
4185
4186 // Instantiate device enumerator
4187 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4188 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4189 ( void** ) &deviceEnumerator_ );
4190
4191 // If this runs on an old Windows, it will fail. Ignore and proceed.
4192 if ( FAILED( hr ) )
4193 deviceEnumerator_ = NULL;
4194 }
4195
4196 //-----------------------------------------------------------------------------
4197
~RtApiWasapi()4198 RtApiWasapi::~RtApiWasapi()
4199 {
4200 if ( stream_.state != STREAM_CLOSED )
4201 closeStream();
4202
4203 SAFE_RELEASE( deviceEnumerator_ );
4204
4205 // If this object previously called CoInitialize()
4206 if ( coInitialized_ )
4207 CoUninitialize();
4208 }
4209
4210 //=============================================================================
4211
getDeviceCount(void)4212 unsigned int RtApiWasapi::getDeviceCount( void )
4213 {
4214 unsigned int captureDeviceCount = 0;
4215 unsigned int renderDeviceCount = 0;
4216
4217 IMMDeviceCollection* captureDevices = NULL;
4218 IMMDeviceCollection* renderDevices = NULL;
4219
4220 if ( !deviceEnumerator_ )
4221 return 0;
4222
4223 // Count capture devices
4224 errorText_.clear();
4225 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4228 goto Exit;
4229 }
4230
4231 hr = captureDevices->GetCount( &captureDeviceCount );
4232 if ( FAILED( hr ) ) {
4233 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4234 goto Exit;
4235 }
4236
4237 // Count render devices
4238 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4241 goto Exit;
4242 }
4243
4244 hr = renderDevices->GetCount( &renderDeviceCount );
4245 if ( FAILED( hr ) ) {
4246 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4247 goto Exit;
4248 }
4249
4250 Exit:
4251 // release all references
4252 SAFE_RELEASE( captureDevices );
4253 SAFE_RELEASE( renderDevices );
4254
4255 if ( errorText_.empty() )
4256 return captureDeviceCount + renderDeviceCount;
4257
4258 error( RtAudioError::DRIVER_ERROR );
4259 return 0;
4260 }
4261
4262 //-----------------------------------------------------------------------------
4263
getDeviceInfo(unsigned int device)4264 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4265 {
4266 RtAudio::DeviceInfo info;
4267 unsigned int captureDeviceCount = 0;
4268 unsigned int renderDeviceCount = 0;
4269 std::string defaultDeviceName;
4270 bool isCaptureDevice = false;
4271
4272 PROPVARIANT deviceNameProp;
4273 PROPVARIANT defaultDeviceNameProp;
4274
4275 IMMDeviceCollection* captureDevices = NULL;
4276 IMMDeviceCollection* renderDevices = NULL;
4277 IMMDevice* devicePtr = NULL;
4278 IMMDevice* defaultDevicePtr = NULL;
4279 IAudioClient* audioClient = NULL;
4280 IPropertyStore* devicePropStore = NULL;
4281 IPropertyStore* defaultDevicePropStore = NULL;
4282
4283 WAVEFORMATEX* deviceFormat = NULL;
4284 WAVEFORMATEX* closestMatchFormat = NULL;
4285
4286 // probed
4287 info.probed = false;
4288
4289 // Count capture devices
4290 errorText_.clear();
4291 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4292 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4295 goto Exit;
4296 }
4297
4298 hr = captureDevices->GetCount( &captureDeviceCount );
4299 if ( FAILED( hr ) ) {
4300 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4301 goto Exit;
4302 }
4303
4304 // Count render devices
4305 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4308 goto Exit;
4309 }
4310
4311 hr = renderDevices->GetCount( &renderDeviceCount );
4312 if ( FAILED( hr ) ) {
4313 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4314 goto Exit;
4315 }
4316
4317 // validate device index
4318 if ( device >= captureDeviceCount + renderDeviceCount ) {
4319 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4320 errorType = RtAudioError::INVALID_USE;
4321 goto Exit;
4322 }
4323
4324 // determine whether index falls within capture or render devices
4325 if ( device >= renderDeviceCount ) {
4326 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4327 if ( FAILED( hr ) ) {
4328 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4329 goto Exit;
4330 }
4331 isCaptureDevice = true;
4332 }
4333 else {
4334 hr = renderDevices->Item( device, &devicePtr );
4335 if ( FAILED( hr ) ) {
4336 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4337 goto Exit;
4338 }
4339 isCaptureDevice = false;
4340 }
4341
4342 // get default device name
4343 if ( isCaptureDevice ) {
4344 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4345 if ( FAILED( hr ) ) {
4346 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4347 goto Exit;
4348 }
4349 }
4350 else {
4351 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4352 if ( FAILED( hr ) ) {
4353 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4354 goto Exit;
4355 }
4356 }
4357
4358 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4359 if ( FAILED( hr ) ) {
4360 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4361 goto Exit;
4362 }
4363 PropVariantInit( &defaultDeviceNameProp );
4364
4365 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4366 if ( FAILED( hr ) ) {
4367 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4368 goto Exit;
4369 }
4370
4371 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4372
4373 // name
4374 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4375 if ( FAILED( hr ) ) {
4376 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4377 goto Exit;
4378 }
4379
4380 PropVariantInit( &deviceNameProp );
4381
4382 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4383 if ( FAILED( hr ) ) {
4384 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4385 goto Exit;
4386 }
4387
4388 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4389
4390 // is default
4391 if ( isCaptureDevice ) {
4392 info.isDefaultInput = info.name == defaultDeviceName;
4393 info.isDefaultOutput = false;
4394 }
4395 else {
4396 info.isDefaultInput = false;
4397 info.isDefaultOutput = info.name == defaultDeviceName;
4398 }
4399
4400 // channel count
4401 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4404 goto Exit;
4405 }
4406
4407 hr = audioClient->GetMixFormat( &deviceFormat );
4408 if ( FAILED( hr ) ) {
4409 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4410 goto Exit;
4411 }
4412
4413 if ( isCaptureDevice ) {
4414 info.inputChannels = deviceFormat->nChannels;
4415 info.outputChannels = 0;
4416 info.duplexChannels = 0;
4417 }
4418 else {
4419 info.inputChannels = 0;
4420 info.outputChannels = deviceFormat->nChannels;
4421 info.duplexChannels = 0;
4422 }
4423
4424 // sample rates
4425 info.sampleRates.clear();
4426
4427 // allow support for all sample rates as we have a built-in sample rate converter
4428 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4429 info.sampleRates.push_back( SAMPLE_RATES[i] );
4430 }
4431 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4432
4433 // native format
4434 info.nativeFormats = 0;
4435
4436 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4437 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4438 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4439 {
4440 if ( deviceFormat->wBitsPerSample == 32 ) {
4441 info.nativeFormats |= RTAUDIO_FLOAT32;
4442 }
4443 else if ( deviceFormat->wBitsPerSample == 64 ) {
4444 info.nativeFormats |= RTAUDIO_FLOAT64;
4445 }
4446 }
4447 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4448 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4449 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4450 {
4451 if ( deviceFormat->wBitsPerSample == 8 ) {
4452 info.nativeFormats |= RTAUDIO_SINT8;
4453 }
4454 else if ( deviceFormat->wBitsPerSample == 16 ) {
4455 info.nativeFormats |= RTAUDIO_SINT16;
4456 }
4457 else if ( deviceFormat->wBitsPerSample == 24 ) {
4458 info.nativeFormats |= RTAUDIO_SINT24;
4459 }
4460 else if ( deviceFormat->wBitsPerSample == 32 ) {
4461 info.nativeFormats |= RTAUDIO_SINT32;
4462 }
4463 }
4464
4465 // probed
4466 info.probed = true;
4467
4468 Exit:
4469 // release all references
4470 PropVariantClear( &deviceNameProp );
4471 PropVariantClear( &defaultDeviceNameProp );
4472
4473 SAFE_RELEASE( captureDevices );
4474 SAFE_RELEASE( renderDevices );
4475 SAFE_RELEASE( devicePtr );
4476 SAFE_RELEASE( defaultDevicePtr );
4477 SAFE_RELEASE( audioClient );
4478 SAFE_RELEASE( devicePropStore );
4479 SAFE_RELEASE( defaultDevicePropStore );
4480
4481 CoTaskMemFree( deviceFormat );
4482 CoTaskMemFree( closestMatchFormat );
4483
4484 if ( !errorText_.empty() )
4485 error( errorType );
4486 return info;
4487 }
4488
4489 //-----------------------------------------------------------------------------
4490
getDefaultOutputDevice(void)4491 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4492 {
4493 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4494 if ( getDeviceInfo( i ).isDefaultOutput ) {
4495 return i;
4496 }
4497 }
4498
4499 return 0;
4500 }
4501
4502 //-----------------------------------------------------------------------------
4503
getDefaultInputDevice(void)4504 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4505 {
4506 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4507 if ( getDeviceInfo( i ).isDefaultInput ) {
4508 return i;
4509 }
4510 }
4511
4512 return 0;
4513 }
4514
4515 //-----------------------------------------------------------------------------
4516
closeStream(void)4517 void RtApiWasapi::closeStream( void )
4518 {
4519 if ( stream_.state == STREAM_CLOSED ) {
4520 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4521 error( RtAudioError::WARNING );
4522 return;
4523 }
4524
4525 if ( stream_.state != STREAM_STOPPED )
4526 stopStream();
4527
4528 // clean up stream memory
4529 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4530 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4531
4532 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4533 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4534
4535 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4536 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4537
4538 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4539 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4540
4541 delete ( WasapiHandle* ) stream_.apiHandle;
4542 stream_.apiHandle = NULL;
4543
4544 for ( int i = 0; i < 2; i++ ) {
4545 if ( stream_.userBuffer[i] ) {
4546 free( stream_.userBuffer[i] );
4547 stream_.userBuffer[i] = 0;
4548 }
4549 }
4550
4551 if ( stream_.deviceBuffer ) {
4552 free( stream_.deviceBuffer );
4553 stream_.deviceBuffer = 0;
4554 }
4555
4556 // update stream state
4557 stream_.state = STREAM_CLOSED;
4558 }
4559
4560 //-----------------------------------------------------------------------------
4561
startStream(void)4562 void RtApiWasapi::startStream( void )
4563 {
4564 verifyStream();
4565
4566 if ( stream_.state == STREAM_RUNNING ) {
4567 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4568 error( RtAudioError::WARNING );
4569 return;
4570 }
4571
4572 #if defined( HAVE_GETTIMEOFDAY )
4573 gettimeofday( &stream_.lastTickTimestamp, NULL );
4574 #endif
4575
4576 // update stream state
4577 stream_.state = STREAM_RUNNING;
4578
4579 // create WASAPI stream thread
4580 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4581
4582 if ( !stream_.callbackInfo.thread ) {
4583 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4584 error( RtAudioError::THREAD_ERROR );
4585 }
4586 else {
4587 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4588 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4589 }
4590 }
4591
4592 //-----------------------------------------------------------------------------
4593
stopStream(void)4594 void RtApiWasapi::stopStream( void )
4595 {
4596 verifyStream();
4597
4598 if ( stream_.state == STREAM_STOPPED ) {
4599 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4600 error( RtAudioError::WARNING );
4601 return;
4602 }
4603
4604 // inform stream thread by setting stream state to STREAM_STOPPING
4605 stream_.state = STREAM_STOPPING;
4606
4607 // wait until stream thread is stopped
4608 while( stream_.state != STREAM_STOPPED ) {
4609 Sleep( 1 );
4610 }
4611
4612 // Wait for the last buffer to play before stopping.
4613 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4614
4615 // close thread handle
4616 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4617 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4618 error( RtAudioError::THREAD_ERROR );
4619 return;
4620 }
4621
4622 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4623 }
4624
4625 //-----------------------------------------------------------------------------
4626
abortStream(void)4627 void RtApiWasapi::abortStream( void )
4628 {
4629 verifyStream();
4630
4631 if ( stream_.state == STREAM_STOPPED ) {
4632 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4633 error( RtAudioError::WARNING );
4634 return;
4635 }
4636
4637 // inform stream thread by setting stream state to STREAM_STOPPING
4638 stream_.state = STREAM_STOPPING;
4639
4640 // wait until stream thread is stopped
4641 while ( stream_.state != STREAM_STOPPED ) {
4642 Sleep( 1 );
4643 }
4644
4645 // close thread handle
4646 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4647 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4648 error( RtAudioError::THREAD_ERROR );
4649 return;
4650 }
4651
4652 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4653 }
4654
4655 //-----------------------------------------------------------------------------
4656
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4657 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4658 unsigned int firstChannel, unsigned int sampleRate,
4659 RtAudioFormat format, unsigned int* bufferSize,
4660 RtAudio::StreamOptions* options )
4661 {
4662 bool methodResult = FAILURE;
4663 unsigned int captureDeviceCount = 0;
4664 unsigned int renderDeviceCount = 0;
4665
4666 IMMDeviceCollection* captureDevices = NULL;
4667 IMMDeviceCollection* renderDevices = NULL;
4668 IMMDevice* devicePtr = NULL;
4669 WAVEFORMATEX* deviceFormat = NULL;
4670 unsigned int bufferBytes;
4671 stream_.state = STREAM_STOPPED;
4672
4673 // create API Handle if not already created
4674 if ( !stream_.apiHandle )
4675 stream_.apiHandle = ( void* ) new WasapiHandle();
4676
4677 // Count capture devices
4678 errorText_.clear();
4679 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4680 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4681 if ( FAILED( hr ) ) {
4682 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4683 goto Exit;
4684 }
4685
4686 hr = captureDevices->GetCount( &captureDeviceCount );
4687 if ( FAILED( hr ) ) {
4688 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4689 goto Exit;
4690 }
4691
4692 // Count render devices
4693 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4694 if ( FAILED( hr ) ) {
4695 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4696 goto Exit;
4697 }
4698
4699 hr = renderDevices->GetCount( &renderDeviceCount );
4700 if ( FAILED( hr ) ) {
4701 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4702 goto Exit;
4703 }
4704
4705 // validate device index
4706 if ( device >= captureDeviceCount + renderDeviceCount ) {
4707 errorType = RtAudioError::INVALID_USE;
4708 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4709 goto Exit;
4710 }
4711
4712 // if device index falls within capture devices
4713 if ( device >= renderDeviceCount ) {
4714 if ( mode != INPUT ) {
4715 errorType = RtAudioError::INVALID_USE;
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4717 goto Exit;
4718 }
4719
4720 // retrieve captureAudioClient from devicePtr
4721 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4722
4723 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4724 if ( FAILED( hr ) ) {
4725 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4726 goto Exit;
4727 }
4728
4729 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4730 NULL, ( void** ) &captureAudioClient );
4731 if ( FAILED( hr ) ) {
4732 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4733 goto Exit;
4734 }
4735
4736 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4737 if ( FAILED( hr ) ) {
4738 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4739 goto Exit;
4740 }
4741
4742 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4743 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4744 }
4745
4746 // if device index falls within render devices and is configured for loopback
4747 if ( device < renderDeviceCount && mode == INPUT )
4748 {
4749 // if renderAudioClient is not initialised, initialise it now
4750 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4751 if ( !renderAudioClient )
4752 {
4753 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4754 }
4755
4756 // retrieve captureAudioClient from devicePtr
4757 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4758
4759 hr = renderDevices->Item( device, &devicePtr );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4762 goto Exit;
4763 }
4764
4765 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4766 NULL, ( void** ) &captureAudioClient );
4767 if ( FAILED( hr ) ) {
4768 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4769 goto Exit;
4770 }
4771
4772 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4773 if ( FAILED( hr ) ) {
4774 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4775 goto Exit;
4776 }
4777
4778 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4779 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4780 }
4781
4782 // if device index falls within render devices and is configured for output
4783 if ( device < renderDeviceCount && mode == OUTPUT )
4784 {
4785 // if renderAudioClient is already initialised, don't initialise it again
4786 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4787 if ( renderAudioClient )
4788 {
4789 methodResult = SUCCESS;
4790 goto Exit;
4791 }
4792
4793 hr = renderDevices->Item( device, &devicePtr );
4794 if ( FAILED( hr ) ) {
4795 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4796 goto Exit;
4797 }
4798
4799 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4800 NULL, ( void** ) &renderAudioClient );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4803 goto Exit;
4804 }
4805
4806 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4809 goto Exit;
4810 }
4811
4812 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4813 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4814 }
4815
4816 // fill stream data
4817 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4818 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4819 stream_.mode = DUPLEX;
4820 }
4821 else {
4822 stream_.mode = mode;
4823 }
4824
4825 stream_.device[mode] = device;
4826 stream_.doByteSwap[mode] = false;
4827 stream_.sampleRate = sampleRate;
4828 stream_.bufferSize = *bufferSize;
4829 stream_.nBuffers = 1;
4830 stream_.nUserChannels[mode] = channels;
4831 stream_.channelOffset[mode] = firstChannel;
4832 stream_.userFormat = format;
4833 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4834
4835 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4836 stream_.userInterleaved = false;
4837 else
4838 stream_.userInterleaved = true;
4839 stream_.deviceInterleaved[mode] = true;
4840
4841 // Set flags for buffer conversion.
4842 stream_.doConvertBuffer[mode] = false;
4843 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4844 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4845 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4846 stream_.doConvertBuffer[mode] = true;
4847 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4848 stream_.nUserChannels[mode] > 1 )
4849 stream_.doConvertBuffer[mode] = true;
4850
4851 if ( stream_.doConvertBuffer[mode] )
4852 setConvertInfo( mode, 0 );
4853
4854 // Allocate necessary internal buffers
4855 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4856
4857 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4858 if ( !stream_.userBuffer[mode] ) {
4859 errorType = RtAudioError::MEMORY_ERROR;
4860 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4861 goto Exit;
4862 }
4863
4864 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4865 stream_.callbackInfo.priority = 15;
4866 else
4867 stream_.callbackInfo.priority = 0;
4868
4869 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4870 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4871
4872 methodResult = SUCCESS;
4873
4874 Exit:
4875 //clean up
4876 SAFE_RELEASE( captureDevices );
4877 SAFE_RELEASE( renderDevices );
4878 SAFE_RELEASE( devicePtr );
4879 CoTaskMemFree( deviceFormat );
4880
4881 // if method failed, close the stream
4882 if ( methodResult == FAILURE )
4883 closeStream();
4884
4885 if ( !errorText_.empty() )
4886 error( errorType );
4887 return methodResult;
4888 }
4889
4890 //=============================================================================
4891
runWasapiThread(void * wasapiPtr)4892 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4893 {
4894 if ( wasapiPtr )
4895 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4896
4897 return 0;
4898 }
4899
stopWasapiThread(void * wasapiPtr)4900 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4901 {
4902 if ( wasapiPtr )
4903 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4904
4905 return 0;
4906 }
4907
abortWasapiThread(void * wasapiPtr)4908 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4909 {
4910 if ( wasapiPtr )
4911 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4912
4913 return 0;
4914 }
4915
4916 //-----------------------------------------------------------------------------
4917
wasapiThread()4918 void RtApiWasapi::wasapiThread()
4919 {
4920 // as this is a new thread, we must CoInitialize it
4921 CoInitialize( NULL );
4922
4923 HRESULT hr;
4924
4925 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4926 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4927 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4928 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4929 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4930 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4931
4932 WAVEFORMATEX* captureFormat = NULL;
4933 WAVEFORMATEX* renderFormat = NULL;
4934 float captureSrRatio = 0.0f;
4935 float renderSrRatio = 0.0f;
4936 WasapiBuffer captureBuffer;
4937 WasapiBuffer renderBuffer;
4938 WasapiResampler* captureResampler = NULL;
4939 WasapiResampler* renderResampler = NULL;
4940
4941 // declare local stream variables
4942 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4943 BYTE* streamBuffer = NULL;
4944 unsigned long captureFlags = 0;
4945 unsigned int bufferFrameCount = 0;
4946 unsigned int numFramesPadding = 0;
4947 unsigned int convBufferSize = 0;
4948 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4949 bool callbackPushed = true;
4950 bool callbackPulled = false;
4951 bool callbackStopped = false;
4952 int callbackResult = 0;
4953
4954 // convBuffer is used to store converted buffers between WASAPI and the user
4955 char* convBuffer = NULL;
4956 unsigned int convBuffSize = 0;
4957 unsigned int deviceBuffSize = 0;
4958
4959 std::string errorText;
4960 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4961
4962 // Attempt to assign "Pro Audio" characteristic to thread
4963 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4964 if ( AvrtDll ) {
4965 DWORD taskIndex = 0;
4966 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4967 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4968 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4969 FreeLibrary( AvrtDll );
4970 }
4971
4972 // start capture stream if applicable
4973 if ( captureAudioClient ) {
4974 hr = captureAudioClient->GetMixFormat( &captureFormat );
4975 if ( FAILED( hr ) ) {
4976 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4977 goto Exit;
4978 }
4979
4980 // init captureResampler
4981 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4982 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4983 captureFormat->nSamplesPerSec, stream_.sampleRate );
4984
4985 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4986
4987 if ( !captureClient ) {
4988 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4989 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4990 0,
4991 0,
4992 captureFormat,
4993 NULL );
4994 if ( FAILED( hr ) ) {
4995 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4996 goto Exit;
4997 }
4998
4999 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5000 ( void** ) &captureClient );
5001 if ( FAILED( hr ) ) {
5002 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5003 goto Exit;
5004 }
5005
5006 // don't configure captureEvent if in loopback mode
5007 if ( !loopbackEnabled )
5008 {
5009 // configure captureEvent to trigger on every available capture buffer
5010 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5011 if ( !captureEvent ) {
5012 errorType = RtAudioError::SYSTEM_ERROR;
5013 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5014 goto Exit;
5015 }
5016
5017 hr = captureAudioClient->SetEventHandle( captureEvent );
5018 if ( FAILED( hr ) ) {
5019 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5020 goto Exit;
5021 }
5022
5023 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5024 }
5025
5026 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5027
5028 // reset the capture stream
5029 hr = captureAudioClient->Reset();
5030 if ( FAILED( hr ) ) {
5031 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5032 goto Exit;
5033 }
5034
5035 // start the capture stream
5036 hr = captureAudioClient->Start();
5037 if ( FAILED( hr ) ) {
5038 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5039 goto Exit;
5040 }
5041 }
5042
5043 unsigned int inBufferSize = 0;
5044 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5045 if ( FAILED( hr ) ) {
5046 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5047 goto Exit;
5048 }
5049
5050 // scale outBufferSize according to stream->user sample rate ratio
5051 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5052 inBufferSize *= stream_.nDeviceChannels[INPUT];
5053
5054 // set captureBuffer size
5055 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5056 }
5057
5058 // start render stream if applicable
5059 if ( renderAudioClient ) {
5060 hr = renderAudioClient->GetMixFormat( &renderFormat );
5061 if ( FAILED( hr ) ) {
5062 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5063 goto Exit;
5064 }
5065
5066 // init renderResampler
5067 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5068 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5069 stream_.sampleRate, renderFormat->nSamplesPerSec );
5070
5071 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5072
5073 if ( !renderClient ) {
5074 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5075 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5076 0,
5077 0,
5078 renderFormat,
5079 NULL );
5080 if ( FAILED( hr ) ) {
5081 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5082 goto Exit;
5083 }
5084
5085 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5086 ( void** ) &renderClient );
5087 if ( FAILED( hr ) ) {
5088 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5089 goto Exit;
5090 }
5091
5092 // configure renderEvent to trigger on every available render buffer
5093 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5094 if ( !renderEvent ) {
5095 errorType = RtAudioError::SYSTEM_ERROR;
5096 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5097 goto Exit;
5098 }
5099
5100 hr = renderAudioClient->SetEventHandle( renderEvent );
5101 if ( FAILED( hr ) ) {
5102 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5103 goto Exit;
5104 }
5105
5106 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5107 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5108
5109 // reset the render stream
5110 hr = renderAudioClient->Reset();
5111 if ( FAILED( hr ) ) {
5112 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5113 goto Exit;
5114 }
5115
5116 // start the render stream
5117 hr = renderAudioClient->Start();
5118 if ( FAILED( hr ) ) {
5119 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5120 goto Exit;
5121 }
5122 }
5123
5124 unsigned int outBufferSize = 0;
5125 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5126 if ( FAILED( hr ) ) {
5127 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5128 goto Exit;
5129 }
5130
5131 // scale inBufferSize according to user->stream sample rate ratio
5132 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5133 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5134
5135 // set renderBuffer size
5136 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5137 }
5138
5139 // malloc buffer memory
5140 if ( stream_.mode == INPUT )
5141 {
5142 using namespace std; // for ceilf
5143 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5144 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5145 }
5146 else if ( stream_.mode == OUTPUT )
5147 {
5148 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5149 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5150 }
5151 else if ( stream_.mode == DUPLEX )
5152 {
5153 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5154 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5155 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5156 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5157 }
5158
5159 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5160 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5161 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5162 if ( !convBuffer || !stream_.deviceBuffer ) {
5163 errorType = RtAudioError::MEMORY_ERROR;
5164 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5165 goto Exit;
5166 }
5167
5168 // stream process loop
5169 while ( stream_.state != STREAM_STOPPING ) {
5170 if ( !callbackPulled ) {
5171 // Callback Input
5172 // ==============
5173 // 1. Pull callback buffer from inputBuffer
5174 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5175 // Convert callback buffer to user format
5176
5177 if ( captureAudioClient )
5178 {
5179 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5180 if ( captureSrRatio != 1 )
5181 {
5182 // account for remainders
5183 samplesToPull--;
5184 }
5185
5186 convBufferSize = 0;
5187 while ( convBufferSize < stream_.bufferSize )
5188 {
5189 // Pull callback buffer from inputBuffer
5190 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5191 samplesToPull * stream_.nDeviceChannels[INPUT],
5192 stream_.deviceFormat[INPUT] );
5193
5194 if ( !callbackPulled )
5195 {
5196 break;
5197 }
5198
5199 // Convert callback buffer to user sample rate
5200 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5201 unsigned int convSamples = 0;
5202
5203 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5204 convBuffer,
5205 samplesToPull,
5206 convSamples );
5207
5208 convBufferSize += convSamples;
5209 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5210 }
5211
5212 if ( callbackPulled )
5213 {
5214 if ( stream_.doConvertBuffer[INPUT] ) {
5215 // Convert callback buffer to user format
5216 convertBuffer( stream_.userBuffer[INPUT],
5217 stream_.deviceBuffer,
5218 stream_.convertInfo[INPUT] );
5219 }
5220 else {
5221 // no further conversion, simple copy deviceBuffer to userBuffer
5222 memcpy( stream_.userBuffer[INPUT],
5223 stream_.deviceBuffer,
5224 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5225 }
5226 }
5227 }
5228 else {
5229 // if there is no capture stream, set callbackPulled flag
5230 callbackPulled = true;
5231 }
5232
5233 // Execute Callback
5234 // ================
5235 // 1. Execute user callback method
5236 // 2. Handle return value from callback
5237
5238 // if callback has not requested the stream to stop
5239 if ( callbackPulled && !callbackStopped ) {
5240 // Execute user callback method
5241 callbackResult = callback( stream_.userBuffer[OUTPUT],
5242 stream_.userBuffer[INPUT],
5243 stream_.bufferSize,
5244 getStreamTime(),
5245 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5246 stream_.callbackInfo.userData );
5247
5248 // tick stream time
5249 RtApi::tickStreamTime();
5250
5251 // Handle return value from callback
5252 if ( callbackResult == 1 ) {
5253 // instantiate a thread to stop this thread
5254 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5255 if ( !threadHandle ) {
5256 errorType = RtAudioError::THREAD_ERROR;
5257 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5258 goto Exit;
5259 }
5260 else if ( !CloseHandle( threadHandle ) ) {
5261 errorType = RtAudioError::THREAD_ERROR;
5262 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5263 goto Exit;
5264 }
5265
5266 callbackStopped = true;
5267 }
5268 else if ( callbackResult == 2 ) {
5269 // instantiate a thread to stop this thread
5270 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5271 if ( !threadHandle ) {
5272 errorType = RtAudioError::THREAD_ERROR;
5273 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5274 goto Exit;
5275 }
5276 else if ( !CloseHandle( threadHandle ) ) {
5277 errorType = RtAudioError::THREAD_ERROR;
5278 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5279 goto Exit;
5280 }
5281
5282 callbackStopped = true;
5283 }
5284 }
5285 }
5286
5287 // Callback Output
5288 // ===============
5289 // 1. Convert callback buffer to stream format
5290 // 2. Convert callback buffer to stream sample rate and channel count
5291 // 3. Push callback buffer into outputBuffer
5292
5293 if ( renderAudioClient && callbackPulled )
5294 {
5295 // if the last call to renderBuffer.PushBuffer() was successful
5296 if ( callbackPushed || convBufferSize == 0 )
5297 {
5298 if ( stream_.doConvertBuffer[OUTPUT] )
5299 {
5300 // Convert callback buffer to stream format
5301 convertBuffer( stream_.deviceBuffer,
5302 stream_.userBuffer[OUTPUT],
5303 stream_.convertInfo[OUTPUT] );
5304
5305 }
5306 else {
5307 // no further conversion, simple copy userBuffer to deviceBuffer
5308 memcpy( stream_.deviceBuffer,
5309 stream_.userBuffer[OUTPUT],
5310 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5311 }
5312
5313 // Convert callback buffer to stream sample rate
5314 renderResampler->Convert( convBuffer,
5315 stream_.deviceBuffer,
5316 stream_.bufferSize,
5317 convBufferSize );
5318 }
5319
5320 // Push callback buffer into outputBuffer
5321 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5322 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5323 stream_.deviceFormat[OUTPUT] );
5324 }
5325 else {
5326 // if there is no render stream, set callbackPushed flag
5327 callbackPushed = true;
5328 }
5329
5330 // Stream Capture
5331 // ==============
5332 // 1. Get capture buffer from stream
5333 // 2. Push capture buffer into inputBuffer
5334 // 3. If 2. was successful: Release capture buffer
5335
5336 if ( captureAudioClient ) {
5337 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5338 if ( !callbackPulled ) {
5339 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5340 }
5341
5342 // Get capture buffer from stream
5343 hr = captureClient->GetBuffer( &streamBuffer,
5344 &bufferFrameCount,
5345 &captureFlags, NULL, NULL );
5346 if ( FAILED( hr ) ) {
5347 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5348 goto Exit;
5349 }
5350
5351 if ( bufferFrameCount != 0 ) {
5352 // Push capture buffer into inputBuffer
5353 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5354 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5355 stream_.deviceFormat[INPUT] ) )
5356 {
5357 // Release capture buffer
5358 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5359 if ( FAILED( hr ) ) {
5360 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5361 goto Exit;
5362 }
5363 }
5364 else
5365 {
5366 // Inform WASAPI that capture was unsuccessful
5367 hr = captureClient->ReleaseBuffer( 0 );
5368 if ( FAILED( hr ) ) {
5369 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5370 goto Exit;
5371 }
5372 }
5373 }
5374 else
5375 {
5376 // Inform WASAPI that capture was unsuccessful
5377 hr = captureClient->ReleaseBuffer( 0 );
5378 if ( FAILED( hr ) ) {
5379 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5380 goto Exit;
5381 }
5382 }
5383 }
5384
5385 // Stream Render
5386 // =============
5387 // 1. Get render buffer from stream
5388 // 2. Pull next buffer from outputBuffer
5389 // 3. If 2. was successful: Fill render buffer with next buffer
5390 // Release render buffer
5391
5392 if ( renderAudioClient ) {
5393 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5394 if ( callbackPulled && !callbackPushed ) {
5395 WaitForSingleObject( renderEvent, INFINITE );
5396 }
5397
5398 // Get render buffer from stream
5399 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5400 if ( FAILED( hr ) ) {
5401 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5402 goto Exit;
5403 }
5404
5405 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5406 if ( FAILED( hr ) ) {
5407 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5408 goto Exit;
5409 }
5410
5411 bufferFrameCount -= numFramesPadding;
5412
5413 if ( bufferFrameCount != 0 ) {
5414 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5415 if ( FAILED( hr ) ) {
5416 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5417 goto Exit;
5418 }
5419
5420 // Pull next buffer from outputBuffer
5421 // Fill render buffer with next buffer
5422 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5423 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5424 stream_.deviceFormat[OUTPUT] ) )
5425 {
5426 // Release render buffer
5427 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5430 goto Exit;
5431 }
5432 }
5433 else
5434 {
5435 // Inform WASAPI that render was unsuccessful
5436 hr = renderClient->ReleaseBuffer( 0, 0 );
5437 if ( FAILED( hr ) ) {
5438 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5439 goto Exit;
5440 }
5441 }
5442 }
5443 else
5444 {
5445 // Inform WASAPI that render was unsuccessful
5446 hr = renderClient->ReleaseBuffer( 0, 0 );
5447 if ( FAILED( hr ) ) {
5448 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5449 goto Exit;
5450 }
5451 }
5452 }
5453
5454 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5455 if ( callbackPushed ) {
5456 // unsetting the callbackPulled flag lets the stream know that
5457 // the audio device is ready for another callback output buffer.
5458 callbackPulled = false;
5459 }
5460
5461 }
5462
5463 Exit:
5464 // clean up
5465 CoTaskMemFree( captureFormat );
5466 CoTaskMemFree( renderFormat );
5467
5468 free ( convBuffer );
5469 delete renderResampler;
5470 delete captureResampler;
5471
5472 CoUninitialize();
5473
5474 // update stream state
5475 stream_.state = STREAM_STOPPED;
5476
5477 if ( !errorText.empty() )
5478 {
5479 errorText_ = errorText;
5480 error( errorType );
5481 }
5482 }
5483
5484 //******************** End of __WINDOWS_WASAPI__ *********************//
5485 #endif
5486
5487
5488 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5489
5490 // Modified by Robin Davies, October 2005
5491 // - Improvements to DirectX pointer chasing.
5492 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5493 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5494 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5495 // Changed device query structure for RtAudio 4.0.7, January 2010
5496
5497 #include <windows.h>
5498 #include <process.h>
5499 #include <mmsystem.h>
5500 #include <mmreg.h>
5501 #include <dsound.h>
5502 #include <assert.h>
5503 #include <algorithm>
5504
5505 #if defined(__MINGW32__)
5506 // missing from latest mingw winapi
5507 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5508 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5509 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5510 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5511 #endif
5512
5513 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5514
5515 #ifdef _MSC_VER // if Microsoft Visual C++
5516 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5517 #endif
5518
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5519 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5520 {
5521 if ( pointer > bufferSize ) pointer -= bufferSize;
5522 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5523 if ( pointer < earlierPointer ) pointer += bufferSize;
5524 return pointer >= earlierPointer && pointer < laterPointer;
5525 }
5526
5527 // A structure to hold various information related to the DirectSound
5528 // API implementation.
5529 struct DsHandle {
5530 unsigned int drainCounter; // Tracks callback counts when draining
5531 bool internalDrain; // Indicates if stop is initiated from callback or not.
5532 void *id[2];
5533 void *buffer[2];
5534 bool xrun[2];
5535 UINT bufferPointer[2];
5536 DWORD dsBufferSize[2];
5537 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5538 HANDLE condition;
5539
DsHandleDsHandle5540 DsHandle()
5541 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5542 };
5543
5544 // Declarations for utility functions, callbacks, and structures
5545 // specific to the DirectSound implementation.
5546 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5547 LPCTSTR description,
5548 LPCTSTR module,
5549 LPVOID lpContext );
5550
5551 static const char* getErrorString( int code );
5552
5553 static unsigned __stdcall callbackHandler( void *ptr );
5554
5555 struct DsDevice {
5556 LPGUID id[2];
5557 bool validId[2];
5558 bool found;
5559 std::string name;
5560
DsDeviceDsDevice5561 DsDevice()
5562 : found(false) { validId[0] = false; validId[1] = false; }
5563 };
5564
5565 struct DsProbeData {
5566 bool isInput;
5567 std::vector<struct DsDevice>* dsDevices;
5568 };
5569
RtApiDs()5570 RtApiDs :: RtApiDs()
5571 {
5572 // Dsound will run both-threaded. If CoInitialize fails, then just
5573 // accept whatever the mainline chose for a threading model.
5574 coInitialized_ = false;
5575 HRESULT hr = CoInitialize( NULL );
5576 if ( !FAILED( hr ) ) coInitialized_ = true;
5577 }
5578
~RtApiDs()5579 RtApiDs :: ~RtApiDs()
5580 {
5581 if ( stream_.state != STREAM_CLOSED ) closeStream();
5582 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5583 }
5584
5585 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5586 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5587 {
5588 return 0;
5589 }
5590
5591 // The DirectSound default input is always the first input device,
5592 // which is the first capture device enumerated.
getDefaultInputDevice(void)5593 unsigned int RtApiDs :: getDefaultInputDevice( void )
5594 {
5595 return 0;
5596 }
5597
getDeviceCount(void)5598 unsigned int RtApiDs :: getDeviceCount( void )
5599 {
5600 // Set query flag for previously found devices to false, so that we
5601 // can check for any devices that have disappeared.
5602 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5603 dsDevices[i].found = false;
5604
5605 // Query DirectSound devices.
5606 struct DsProbeData probeInfo;
5607 probeInfo.isInput = false;
5608 probeInfo.dsDevices = &dsDevices;
5609 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5610 if ( FAILED( result ) ) {
5611 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5612 errorText_ = errorStream_.str();
5613 error( RtAudioError::WARNING );
5614 }
5615
5616 // Query DirectSoundCapture devices.
5617 probeInfo.isInput = true;
5618 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5619 if ( FAILED( result ) ) {
5620 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5621 errorText_ = errorStream_.str();
5622 error( RtAudioError::WARNING );
5623 }
5624
5625 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5626 for ( unsigned int i=0; i<dsDevices.size(); ) {
5627 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5628 else i++;
5629 }
5630
5631 return static_cast<unsigned int>(dsDevices.size());
5632 }
5633
getDeviceInfo(unsigned int device)5634 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5635 {
5636 RtAudio::DeviceInfo info;
5637 info.probed = false;
5638
5639 if ( dsDevices.size() == 0 ) {
5640 // Force a query of all devices
5641 getDeviceCount();
5642 if ( dsDevices.size() == 0 ) {
5643 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5644 error( RtAudioError::INVALID_USE );
5645 return info;
5646 }
5647 }
5648
5649 if ( device >= dsDevices.size() ) {
5650 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5651 error( RtAudioError::INVALID_USE );
5652 return info;
5653 }
5654
5655 HRESULT result;
5656 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5657
5658 LPDIRECTSOUND output;
5659 DSCAPS outCaps;
5660 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5661 if ( FAILED( result ) ) {
5662 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5663 errorText_ = errorStream_.str();
5664 error( RtAudioError::WARNING );
5665 goto probeInput;
5666 }
5667
5668 outCaps.dwSize = sizeof( outCaps );
5669 result = output->GetCaps( &outCaps );
5670 if ( FAILED( result ) ) {
5671 output->Release();
5672 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5673 errorText_ = errorStream_.str();
5674 error( RtAudioError::WARNING );
5675 goto probeInput;
5676 }
5677
5678 // Get output channel information.
5679 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5680
5681 // Get sample rate information.
5682 info.sampleRates.clear();
5683 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5684 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5685 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5686 info.sampleRates.push_back( SAMPLE_RATES[k] );
5687
5688 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5689 info.preferredSampleRate = SAMPLE_RATES[k];
5690 }
5691 }
5692
5693 // Get format information.
5694 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5695 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5696
5697 output->Release();
5698
5699 if ( getDefaultOutputDevice() == device )
5700 info.isDefaultOutput = true;
5701
5702 if ( dsDevices[ device ].validId[1] == false ) {
5703 info.name = dsDevices[ device ].name;
5704 info.probed = true;
5705 return info;
5706 }
5707
5708 probeInput:
5709
5710 LPDIRECTSOUNDCAPTURE input;
5711 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5712 if ( FAILED( result ) ) {
5713 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5714 errorText_ = errorStream_.str();
5715 error( RtAudioError::WARNING );
5716 return info;
5717 }
5718
5719 DSCCAPS inCaps;
5720 inCaps.dwSize = sizeof( inCaps );
5721 result = input->GetCaps( &inCaps );
5722 if ( FAILED( result ) ) {
5723 input->Release();
5724 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5725 errorText_ = errorStream_.str();
5726 error( RtAudioError::WARNING );
5727 return info;
5728 }
5729
5730 // Get input channel information.
5731 info.inputChannels = inCaps.dwChannels;
5732
5733 // Get sample rate and format information.
5734 std::vector<unsigned int> rates;
5735 if ( inCaps.dwChannels >= 2 ) {
5736 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5737 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5738 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5739 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5740 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5741 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5742 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5743 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5744
5745 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5746 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5747 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5748 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5749 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5750 }
5751 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5752 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5753 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5754 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5755 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5756 }
5757 }
5758 else if ( inCaps.dwChannels == 1 ) {
5759 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5762 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5763 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5764 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5765 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5766 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5767
5768 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5769 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5770 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5773 }
5774 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5775 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5776 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5777 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5778 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5779 }
5780 }
5781 else info.inputChannels = 0; // technically, this would be an error
5782
5783 input->Release();
5784
5785 if ( info.inputChannels == 0 ) return info;
5786
5787 // Copy the supported rates to the info structure but avoid duplication.
5788 bool found;
5789 for ( unsigned int i=0; i<rates.size(); i++ ) {
5790 found = false;
5791 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5792 if ( rates[i] == info.sampleRates[j] ) {
5793 found = true;
5794 break;
5795 }
5796 }
5797 if ( found == false ) info.sampleRates.push_back( rates[i] );
5798 }
5799 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5800
5801 // If device opens for both playback and capture, we determine the channels.
5802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5804
5805 if ( device == 0 ) info.isDefaultInput = true;
5806
5807 // Copy name and return.
5808 info.name = dsDevices[ device ].name;
5809 info.probed = true;
5810 return info;
5811 }
5812
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5813 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5814 unsigned int firstChannel, unsigned int sampleRate,
5815 RtAudioFormat format, unsigned int *bufferSize,
5816 RtAudio::StreamOptions *options )
5817 {
5818 if ( channels + firstChannel > 2 ) {
5819 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5820 return FAILURE;
5821 }
5822
5823 size_t nDevices = dsDevices.size();
5824 if ( nDevices == 0 ) {
5825 // This should not happen because a check is made before this function is called.
5826 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5827 return FAILURE;
5828 }
5829
5830 if ( device >= nDevices ) {
5831 // This should not happen because a check is made before this function is called.
5832 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5833 return FAILURE;
5834 }
5835
5836 if ( mode == OUTPUT ) {
5837 if ( dsDevices[ device ].validId[0] == false ) {
5838 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5839 errorText_ = errorStream_.str();
5840 return FAILURE;
5841 }
5842 }
5843 else { // mode == INPUT
5844 if ( dsDevices[ device ].validId[1] == false ) {
5845 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5846 errorText_ = errorStream_.str();
5847 return FAILURE;
5848 }
5849 }
5850
5851 // According to a note in PortAudio, using GetDesktopWindow()
5852 // instead of GetForegroundWindow() is supposed to avoid problems
5853 // that occur when the application's window is not the foreground
5854 // window. Also, if the application window closes before the
5855 // DirectSound buffer, DirectSound can crash. In the past, I had
5856 // problems when using GetDesktopWindow() but it seems fine now
5857 // (January 2010). I'll leave it commented here.
5858 // HWND hWnd = GetForegroundWindow();
5859 HWND hWnd = GetDesktopWindow();
5860
5861 // Check the numberOfBuffers parameter and limit the lowest value to
5862 // two. This is a judgement call and a value of two is probably too
5863 // low for capture, but it should work for playback.
5864 int nBuffers = 0;
5865 if ( options ) nBuffers = options->numberOfBuffers;
5866 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5867 if ( nBuffers < 2 ) nBuffers = 3;
5868
5869 // Check the lower range of the user-specified buffer size and set
5870 // (arbitrarily) to a lower bound of 32.
5871 if ( *bufferSize < 32 ) *bufferSize = 32;
5872
5873 // Create the wave format structure. The data format setting will
5874 // be determined later.
5875 WAVEFORMATEX waveFormat;
5876 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5877 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5878 waveFormat.nChannels = channels + firstChannel;
5879 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5880
5881 // Determine the device buffer size. By default, we'll use the value
5882 // defined above (32K), but we will grow it to make allowances for
5883 // very large software buffer sizes.
5884 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5885 DWORD dsPointerLeadTime = 0;
5886
5887 void *ohandle = 0, *bhandle = 0;
5888 HRESULT result;
5889 if ( mode == OUTPUT ) {
5890
5891 LPDIRECTSOUND output;
5892 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5893 if ( FAILED( result ) ) {
5894 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5895 errorText_ = errorStream_.str();
5896 return FAILURE;
5897 }
5898
5899 DSCAPS outCaps;
5900 outCaps.dwSize = sizeof( outCaps );
5901 result = output->GetCaps( &outCaps );
5902 if ( FAILED( result ) ) {
5903 output->Release();
5904 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5905 errorText_ = errorStream_.str();
5906 return FAILURE;
5907 }
5908
5909 // Check channel information.
5910 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5911 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5912 errorText_ = errorStream_.str();
5913 return FAILURE;
5914 }
5915
5916 // Check format information. Use 16-bit format unless not
5917 // supported or user requests 8-bit.
5918 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5919 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5920 waveFormat.wBitsPerSample = 16;
5921 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5922 }
5923 else {
5924 waveFormat.wBitsPerSample = 8;
5925 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5926 }
5927 stream_.userFormat = format;
5928
5929 // Update wave format structure and buffer information.
5930 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5931 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5932 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5933
5934 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5935 while ( dsPointerLeadTime * 2U > dsBufferSize )
5936 dsBufferSize *= 2;
5937
5938 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5939 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5940 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5941 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5942 if ( FAILED( result ) ) {
5943 output->Release();
5944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5945 errorText_ = errorStream_.str();
5946 return FAILURE;
5947 }
5948
5949 // Even though we will write to the secondary buffer, we need to
5950 // access the primary buffer to set the correct output format
5951 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5952 // buffer description.
5953 DSBUFFERDESC bufferDescription;
5954 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5955 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5956 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5957
5958 // Obtain the primary buffer
5959 LPDIRECTSOUNDBUFFER buffer;
5960 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5961 if ( FAILED( result ) ) {
5962 output->Release();
5963 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5964 errorText_ = errorStream_.str();
5965 return FAILURE;
5966 }
5967
5968 // Set the primary DS buffer sound format.
5969 result = buffer->SetFormat( &waveFormat );
5970 if ( FAILED( result ) ) {
5971 output->Release();
5972 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5973 errorText_ = errorStream_.str();
5974 return FAILURE;
5975 }
5976
5977 // Setup the secondary DS buffer description.
5978 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5979 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5980 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5981 DSBCAPS_GLOBALFOCUS |
5982 DSBCAPS_GETCURRENTPOSITION2 |
5983 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5984 bufferDescription.dwBufferBytes = dsBufferSize;
5985 bufferDescription.lpwfxFormat = &waveFormat;
5986
5987 // Try to create the secondary DS buffer. If that doesn't work,
5988 // try to use software mixing. Otherwise, there's a problem.
5989 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5990 if ( FAILED( result ) ) {
5991 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5992 DSBCAPS_GLOBALFOCUS |
5993 DSBCAPS_GETCURRENTPOSITION2 |
5994 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5995 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5996 if ( FAILED( result ) ) {
5997 output->Release();
5998 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5999 errorText_ = errorStream_.str();
6000 return FAILURE;
6001 }
6002 }
6003
6004 // Get the buffer size ... might be different from what we specified.
6005 DSBCAPS dsbcaps;
6006 dsbcaps.dwSize = sizeof( DSBCAPS );
6007 result = buffer->GetCaps( &dsbcaps );
6008 if ( FAILED( result ) ) {
6009 output->Release();
6010 buffer->Release();
6011 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6012 errorText_ = errorStream_.str();
6013 return FAILURE;
6014 }
6015
6016 dsBufferSize = dsbcaps.dwBufferBytes;
6017
6018 // Lock the DS buffer
6019 LPVOID audioPtr;
6020 DWORD dataLen;
6021 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6022 if ( FAILED( result ) ) {
6023 output->Release();
6024 buffer->Release();
6025 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6026 errorText_ = errorStream_.str();
6027 return FAILURE;
6028 }
6029
6030 // Zero the DS buffer
6031 ZeroMemory( audioPtr, dataLen );
6032
6033 // Unlock the DS buffer
6034 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6035 if ( FAILED( result ) ) {
6036 output->Release();
6037 buffer->Release();
6038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6039 errorText_ = errorStream_.str();
6040 return FAILURE;
6041 }
6042
6043 ohandle = (void *) output;
6044 bhandle = (void *) buffer;
6045 }
6046
6047 if ( mode == INPUT ) {
6048
6049 LPDIRECTSOUNDCAPTURE input;
6050 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6051 if ( FAILED( result ) ) {
6052 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6053 errorText_ = errorStream_.str();
6054 return FAILURE;
6055 }
6056
6057 DSCCAPS inCaps;
6058 inCaps.dwSize = sizeof( inCaps );
6059 result = input->GetCaps( &inCaps );
6060 if ( FAILED( result ) ) {
6061 input->Release();
6062 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6063 errorText_ = errorStream_.str();
6064 return FAILURE;
6065 }
6066
6067 // Check channel information.
6068 if ( inCaps.dwChannels < channels + firstChannel ) {
6069 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6070 return FAILURE;
6071 }
6072
6073 // Check format information. Use 16-bit format unless user
6074 // requests 8-bit.
6075 DWORD deviceFormats;
6076 if ( channels + firstChannel == 2 ) {
6077 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6078 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6079 waveFormat.wBitsPerSample = 8;
6080 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6081 }
6082 else { // assume 16-bit is supported
6083 waveFormat.wBitsPerSample = 16;
6084 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6085 }
6086 }
6087 else { // channel == 1
6088 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6089 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6090 waveFormat.wBitsPerSample = 8;
6091 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6092 }
6093 else { // assume 16-bit is supported
6094 waveFormat.wBitsPerSample = 16;
6095 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6096 }
6097 }
6098 stream_.userFormat = format;
6099
6100 // Update wave format structure and buffer information.
6101 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6102 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6103 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6104
6105 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6106 while ( dsPointerLeadTime * 2U > dsBufferSize )
6107 dsBufferSize *= 2;
6108
6109 // Setup the secondary DS buffer description.
6110 DSCBUFFERDESC bufferDescription;
6111 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6112 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6113 bufferDescription.dwFlags = 0;
6114 bufferDescription.dwReserved = 0;
6115 bufferDescription.dwBufferBytes = dsBufferSize;
6116 bufferDescription.lpwfxFormat = &waveFormat;
6117
6118 // Create the capture buffer.
6119 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6120 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6121 if ( FAILED( result ) ) {
6122 input->Release();
6123 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6124 errorText_ = errorStream_.str();
6125 return FAILURE;
6126 }
6127
6128 // Get the buffer size ... might be different from what we specified.
6129 DSCBCAPS dscbcaps;
6130 dscbcaps.dwSize = sizeof( DSCBCAPS );
6131 result = buffer->GetCaps( &dscbcaps );
6132 if ( FAILED( result ) ) {
6133 input->Release();
6134 buffer->Release();
6135 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6136 errorText_ = errorStream_.str();
6137 return FAILURE;
6138 }
6139
6140 dsBufferSize = dscbcaps.dwBufferBytes;
6141
6142 // NOTE: We could have a problem here if this is a duplex stream
6143 // and the play and capture hardware buffer sizes are different
6144 // (I'm actually not sure if that is a problem or not).
6145 // Currently, we are not verifying that.
6146
6147 // Lock the capture buffer
6148 LPVOID audioPtr;
6149 DWORD dataLen;
6150 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6151 if ( FAILED( result ) ) {
6152 input->Release();
6153 buffer->Release();
6154 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6155 errorText_ = errorStream_.str();
6156 return FAILURE;
6157 }
6158
6159 // Zero the buffer
6160 ZeroMemory( audioPtr, dataLen );
6161
6162 // Unlock the buffer
6163 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6164 if ( FAILED( result ) ) {
6165 input->Release();
6166 buffer->Release();
6167 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6168 errorText_ = errorStream_.str();
6169 return FAILURE;
6170 }
6171
6172 ohandle = (void *) input;
6173 bhandle = (void *) buffer;
6174 }
6175
6176 // Set various stream parameters
6177 DsHandle *handle = 0;
6178 stream_.nDeviceChannels[mode] = channels + firstChannel;
6179 stream_.nUserChannels[mode] = channels;
6180 stream_.bufferSize = *bufferSize;
6181 stream_.channelOffset[mode] = firstChannel;
6182 stream_.deviceInterleaved[mode] = true;
6183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6184 else stream_.userInterleaved = true;
6185
6186 // Set flag for buffer conversion
6187 stream_.doConvertBuffer[mode] = false;
6188 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6189 stream_.doConvertBuffer[mode] = true;
6190 if (stream_.userFormat != stream_.deviceFormat[mode])
6191 stream_.doConvertBuffer[mode] = true;
6192 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6193 stream_.nUserChannels[mode] > 1 )
6194 stream_.doConvertBuffer[mode] = true;
6195
6196 // Allocate necessary internal buffers
6197 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6198 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6199 if ( stream_.userBuffer[mode] == NULL ) {
6200 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6201 goto error;
6202 }
6203
6204 if ( stream_.doConvertBuffer[mode] ) {
6205
6206 bool makeBuffer = true;
6207 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6208 if ( mode == INPUT ) {
6209 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6210 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6211 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6212 }
6213 }
6214
6215 if ( makeBuffer ) {
6216 bufferBytes *= *bufferSize;
6217 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6218 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6219 if ( stream_.deviceBuffer == NULL ) {
6220 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6221 goto error;
6222 }
6223 }
6224 }
6225
6226 // Allocate our DsHandle structures for the stream.
6227 if ( stream_.apiHandle == 0 ) {
6228 try {
6229 handle = new DsHandle;
6230 }
6231 catch ( std::bad_alloc& ) {
6232 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6233 goto error;
6234 }
6235
6236 // Create a manual-reset event.
6237 handle->condition = CreateEvent( NULL, // no security
6238 TRUE, // manual-reset
6239 FALSE, // non-signaled initially
6240 NULL ); // unnamed
6241 stream_.apiHandle = (void *) handle;
6242 }
6243 else
6244 handle = (DsHandle *) stream_.apiHandle;
6245 handle->id[mode] = ohandle;
6246 handle->buffer[mode] = bhandle;
6247 handle->dsBufferSize[mode] = dsBufferSize;
6248 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6249
6250 stream_.device[mode] = device;
6251 stream_.state = STREAM_STOPPED;
6252 if ( stream_.mode == OUTPUT && mode == INPUT )
6253 // We had already set up an output stream.
6254 stream_.mode = DUPLEX;
6255 else
6256 stream_.mode = mode;
6257 stream_.nBuffers = nBuffers;
6258 stream_.sampleRate = sampleRate;
6259
6260 // Setup the buffer conversion information structure.
6261 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6262
6263 // Setup the callback thread.
6264 if ( stream_.callbackInfo.isRunning == false ) {
6265 unsigned threadId;
6266 stream_.callbackInfo.isRunning = true;
6267 stream_.callbackInfo.object = (void *) this;
6268 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6269 &stream_.callbackInfo, 0, &threadId );
6270 if ( stream_.callbackInfo.thread == 0 ) {
6271 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6272 goto error;
6273 }
6274
6275 // Boost DS thread priority
6276 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6277 }
6278 return SUCCESS;
6279
6280 error:
6281 if ( handle ) {
6282 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6283 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6284 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6285 if ( buffer ) buffer->Release();
6286 object->Release();
6287 }
6288 if ( handle->buffer[1] ) {
6289 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6290 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6291 if ( buffer ) buffer->Release();
6292 object->Release();
6293 }
6294 CloseHandle( handle->condition );
6295 delete handle;
6296 stream_.apiHandle = 0;
6297 }
6298
6299 for ( int i=0; i<2; i++ ) {
6300 if ( stream_.userBuffer[i] ) {
6301 free( stream_.userBuffer[i] );
6302 stream_.userBuffer[i] = 0;
6303 }
6304 }
6305
6306 if ( stream_.deviceBuffer ) {
6307 free( stream_.deviceBuffer );
6308 stream_.deviceBuffer = 0;
6309 }
6310
6311 stream_.state = STREAM_CLOSED;
6312 return FAILURE;
6313 }
6314
closeStream()6315 void RtApiDs :: closeStream()
6316 {
6317 if ( stream_.state == STREAM_CLOSED ) {
6318 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6319 error( RtAudioError::WARNING );
6320 return;
6321 }
6322
6323 // Stop the callback thread.
6324 stream_.callbackInfo.isRunning = false;
6325 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6326 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6327
6328 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6329 if ( handle ) {
6330 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6331 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6332 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6333 if ( buffer ) {
6334 buffer->Stop();
6335 buffer->Release();
6336 }
6337 object->Release();
6338 }
6339 if ( handle->buffer[1] ) {
6340 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6341 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6342 if ( buffer ) {
6343 buffer->Stop();
6344 buffer->Release();
6345 }
6346 object->Release();
6347 }
6348 CloseHandle( handle->condition );
6349 delete handle;
6350 stream_.apiHandle = 0;
6351 }
6352
6353 for ( int i=0; i<2; i++ ) {
6354 if ( stream_.userBuffer[i] ) {
6355 free( stream_.userBuffer[i] );
6356 stream_.userBuffer[i] = 0;
6357 }
6358 }
6359
6360 if ( stream_.deviceBuffer ) {
6361 free( stream_.deviceBuffer );
6362 stream_.deviceBuffer = 0;
6363 }
6364
6365 stream_.mode = UNINITIALIZED;
6366 stream_.state = STREAM_CLOSED;
6367 }
6368
startStream()6369 void RtApiDs :: startStream()
6370 {
6371 verifyStream();
6372 if ( stream_.state == STREAM_RUNNING ) {
6373 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6374 error( RtAudioError::WARNING );
6375 return;
6376 }
6377
6378 #if defined( HAVE_GETTIMEOFDAY )
6379 gettimeofday( &stream_.lastTickTimestamp, NULL );
6380 #endif
6381
6382 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6383
6384 // Increase scheduler frequency on lesser windows (a side-effect of
6385 // increasing timer accuracy). On greater windows (Win2K or later),
6386 // this is already in effect.
6387 timeBeginPeriod( 1 );
6388
6389 buffersRolling = false;
6390 duplexPrerollBytes = 0;
6391
6392 if ( stream_.mode == DUPLEX ) {
6393 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6394 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6395 }
6396
6397 HRESULT result = 0;
6398 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6399
6400 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6401 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6402 if ( FAILED( result ) ) {
6403 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6404 errorText_ = errorStream_.str();
6405 goto unlock;
6406 }
6407 }
6408
6409 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6410
6411 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6412 result = buffer->Start( DSCBSTART_LOOPING );
6413 if ( FAILED( result ) ) {
6414 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6415 errorText_ = errorStream_.str();
6416 goto unlock;
6417 }
6418 }
6419
6420 handle->drainCounter = 0;
6421 handle->internalDrain = false;
6422 ResetEvent( handle->condition );
6423 stream_.state = STREAM_RUNNING;
6424
6425 unlock:
6426 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6427 }
6428
stopStream()6429 void RtApiDs :: stopStream()
6430 {
6431 verifyStream();
6432 if ( stream_.state == STREAM_STOPPED ) {
6433 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6434 error( RtAudioError::WARNING );
6435 return;
6436 }
6437
6438 HRESULT result = 0;
6439 LPVOID audioPtr;
6440 DWORD dataLen;
6441 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6442 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6443 if ( handle->drainCounter == 0 ) {
6444 handle->drainCounter = 2;
6445 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6446 }
6447
6448 stream_.state = STREAM_STOPPED;
6449
6450 MUTEX_LOCK( &stream_.mutex );
6451
6452 // Stop the buffer and clear memory
6453 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6454 result = buffer->Stop();
6455 if ( FAILED( result ) ) {
6456 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6457 errorText_ = errorStream_.str();
6458 goto unlock;
6459 }
6460
6461 // Lock the buffer and clear it so that if we start to play again,
6462 // we won't have old data playing.
6463 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6464 if ( FAILED( result ) ) {
6465 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6466 errorText_ = errorStream_.str();
6467 goto unlock;
6468 }
6469
6470 // Zero the DS buffer
6471 ZeroMemory( audioPtr, dataLen );
6472
6473 // Unlock the DS buffer
6474 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6475 if ( FAILED( result ) ) {
6476 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6477 errorText_ = errorStream_.str();
6478 goto unlock;
6479 }
6480
6481 // If we start playing again, we must begin at beginning of buffer.
6482 handle->bufferPointer[0] = 0;
6483 }
6484
6485 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6486 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6487 audioPtr = NULL;
6488 dataLen = 0;
6489
6490 stream_.state = STREAM_STOPPED;
6491
6492 if ( stream_.mode != DUPLEX )
6493 MUTEX_LOCK( &stream_.mutex );
6494
6495 result = buffer->Stop();
6496 if ( FAILED( result ) ) {
6497 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6498 errorText_ = errorStream_.str();
6499 goto unlock;
6500 }
6501
6502 // Lock the buffer and clear it so that if we start to play again,
6503 // we won't have old data playing.
6504 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6505 if ( FAILED( result ) ) {
6506 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6507 errorText_ = errorStream_.str();
6508 goto unlock;
6509 }
6510
6511 // Zero the DS buffer
6512 ZeroMemory( audioPtr, dataLen );
6513
6514 // Unlock the DS buffer
6515 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6516 if ( FAILED( result ) ) {
6517 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6518 errorText_ = errorStream_.str();
6519 goto unlock;
6520 }
6521
6522 // If we start recording again, we must begin at beginning of buffer.
6523 handle->bufferPointer[1] = 0;
6524 }
6525
6526 unlock:
6527 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6528 MUTEX_UNLOCK( &stream_.mutex );
6529
6530 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6531 }
6532
abortStream()6533 void RtApiDs :: abortStream()
6534 {
6535 verifyStream();
6536 if ( stream_.state == STREAM_STOPPED ) {
6537 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6538 error( RtAudioError::WARNING );
6539 return;
6540 }
6541
6542 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6543 handle->drainCounter = 2;
6544
6545 stopStream();
6546 }
6547
callbackEvent()6548 void RtApiDs :: callbackEvent()
6549 {
6550 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6551 Sleep( 50 ); // sleep 50 milliseconds
6552 return;
6553 }
6554
6555 if ( stream_.state == STREAM_CLOSED ) {
6556 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6557 error( RtAudioError::WARNING );
6558 return;
6559 }
6560
6561 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6562 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6563
6564 // Check if we were draining the stream and signal is finished.
6565 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6566
6567 stream_.state = STREAM_STOPPING;
6568 if ( handle->internalDrain == false )
6569 SetEvent( handle->condition );
6570 else
6571 stopStream();
6572 return;
6573 }
6574
6575 // Invoke user callback to get fresh output data UNLESS we are
6576 // draining stream.
6577 if ( handle->drainCounter == 0 ) {
6578 RtAudioCallback callback = (RtAudioCallback) info->callback;
6579 double streamTime = getStreamTime();
6580 RtAudioStreamStatus status = 0;
6581 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6582 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6583 handle->xrun[0] = false;
6584 }
6585 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6586 status |= RTAUDIO_INPUT_OVERFLOW;
6587 handle->xrun[1] = false;
6588 }
6589 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6590 stream_.bufferSize, streamTime, status, info->userData );
6591 if ( cbReturnValue == 2 ) {
6592 stream_.state = STREAM_STOPPING;
6593 handle->drainCounter = 2;
6594 abortStream();
6595 return;
6596 }
6597 else if ( cbReturnValue == 1 ) {
6598 handle->drainCounter = 1;
6599 handle->internalDrain = true;
6600 }
6601 }
6602
6603 HRESULT result;
6604 DWORD currentWritePointer, safeWritePointer;
6605 DWORD currentReadPointer, safeReadPointer;
6606 UINT nextWritePointer;
6607
6608 LPVOID buffer1 = NULL;
6609 LPVOID buffer2 = NULL;
6610 DWORD bufferSize1 = 0;
6611 DWORD bufferSize2 = 0;
6612
6613 char *buffer;
6614 long bufferBytes;
6615
6616 MUTEX_LOCK( &stream_.mutex );
6617 if ( stream_.state == STREAM_STOPPED ) {
6618 MUTEX_UNLOCK( &stream_.mutex );
6619 return;
6620 }
6621
6622 if ( buffersRolling == false ) {
6623 if ( stream_.mode == DUPLEX ) {
6624 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6625
6626 // It takes a while for the devices to get rolling. As a result,
6627 // there's no guarantee that the capture and write device pointers
6628 // will move in lockstep. Wait here for both devices to start
6629 // rolling, and then set our buffer pointers accordingly.
6630 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6631 // bytes later than the write buffer.
6632
6633 // Stub: a serious risk of having a pre-emptive scheduling round
6634 // take place between the two GetCurrentPosition calls... but I'm
6635 // really not sure how to solve the problem. Temporarily boost to
6636 // Realtime priority, maybe; but I'm not sure what priority the
6637 // DirectSound service threads run at. We *should* be roughly
6638 // within a ms or so of correct.
6639
6640 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6641 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6642
6643 DWORD startSafeWritePointer, startSafeReadPointer;
6644
6645 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6646 if ( FAILED( result ) ) {
6647 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6648 errorText_ = errorStream_.str();
6649 MUTEX_UNLOCK( &stream_.mutex );
6650 error( RtAudioError::SYSTEM_ERROR );
6651 return;
6652 }
6653 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6654 if ( FAILED( result ) ) {
6655 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6656 errorText_ = errorStream_.str();
6657 MUTEX_UNLOCK( &stream_.mutex );
6658 error( RtAudioError::SYSTEM_ERROR );
6659 return;
6660 }
6661 while ( true ) {
6662 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6663 if ( FAILED( result ) ) {
6664 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6665 errorText_ = errorStream_.str();
6666 MUTEX_UNLOCK( &stream_.mutex );
6667 error( RtAudioError::SYSTEM_ERROR );
6668 return;
6669 }
6670 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6671 if ( FAILED( result ) ) {
6672 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6673 errorText_ = errorStream_.str();
6674 MUTEX_UNLOCK( &stream_.mutex );
6675 error( RtAudioError::SYSTEM_ERROR );
6676 return;
6677 }
6678 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6679 Sleep( 1 );
6680 }
6681
6682 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6683
6684 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6685 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6686 handle->bufferPointer[1] = safeReadPointer;
6687 }
6688 else if ( stream_.mode == OUTPUT ) {
6689
6690 // Set the proper nextWritePosition after initial startup.
6691 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6692 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6693 if ( FAILED( result ) ) {
6694 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6695 errorText_ = errorStream_.str();
6696 MUTEX_UNLOCK( &stream_.mutex );
6697 error( RtAudioError::SYSTEM_ERROR );
6698 return;
6699 }
6700 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6701 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6702 }
6703
6704 buffersRolling = true;
6705 }
6706
6707 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6708
6709 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6710
6711 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6712 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6713 bufferBytes *= formatBytes( stream_.userFormat );
6714 memset( stream_.userBuffer[0], 0, bufferBytes );
6715 }
6716
6717 // Setup parameters and do buffer conversion if necessary.
6718 if ( stream_.doConvertBuffer[0] ) {
6719 buffer = stream_.deviceBuffer;
6720 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6721 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6722 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6723 }
6724 else {
6725 buffer = stream_.userBuffer[0];
6726 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6727 bufferBytes *= formatBytes( stream_.userFormat );
6728 }
6729
6730 // No byte swapping necessary in DirectSound implementation.
6731
6732 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6733 // unsigned. So, we need to convert our signed 8-bit data here to
6734 // unsigned.
6735 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6736 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6737
6738 DWORD dsBufferSize = handle->dsBufferSize[0];
6739 nextWritePointer = handle->bufferPointer[0];
6740
6741 DWORD endWrite, leadPointer;
6742 while ( true ) {
6743 // Find out where the read and "safe write" pointers are.
6744 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6745 if ( FAILED( result ) ) {
6746 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6747 errorText_ = errorStream_.str();
6748 MUTEX_UNLOCK( &stream_.mutex );
6749 error( RtAudioError::SYSTEM_ERROR );
6750 return;
6751 }
6752
6753 // We will copy our output buffer into the region between
6754 // safeWritePointer and leadPointer. If leadPointer is not
6755 // beyond the next endWrite position, wait until it is.
6756 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6757 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6758 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6759 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6760 endWrite = nextWritePointer + bufferBytes;
6761
6762 // Check whether the entire write region is behind the play pointer.
6763 if ( leadPointer >= endWrite ) break;
6764
6765 // If we are here, then we must wait until the leadPointer advances
6766 // beyond the end of our next write region. We use the
6767 // Sleep() function to suspend operation until that happens.
6768 double millis = ( endWrite - leadPointer ) * 1000.0;
6769 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6770 if ( millis < 1.0 ) millis = 1.0;
6771 Sleep( (DWORD) millis );
6772 }
6773
6774 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6775 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6776 // We've strayed into the forbidden zone ... resync the read pointer.
6777 handle->xrun[0] = true;
6778 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6779 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6780 handle->bufferPointer[0] = nextWritePointer;
6781 endWrite = nextWritePointer + bufferBytes;
6782 }
6783
6784 // Lock free space in the buffer
6785 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6786 &bufferSize1, &buffer2, &bufferSize2, 0 );
6787 if ( FAILED( result ) ) {
6788 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6789 errorText_ = errorStream_.str();
6790 MUTEX_UNLOCK( &stream_.mutex );
6791 error( RtAudioError::SYSTEM_ERROR );
6792 return;
6793 }
6794
6795 // Copy our buffer into the DS buffer
6796 CopyMemory( buffer1, buffer, bufferSize1 );
6797 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6798
6799 // Update our buffer offset and unlock sound buffer
6800 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6801 if ( FAILED( result ) ) {
6802 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6803 errorText_ = errorStream_.str();
6804 MUTEX_UNLOCK( &stream_.mutex );
6805 error( RtAudioError::SYSTEM_ERROR );
6806 return;
6807 }
6808 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6809 handle->bufferPointer[0] = nextWritePointer;
6810 }
6811
6812 // Don't bother draining input
6813 if ( handle->drainCounter ) {
6814 handle->drainCounter++;
6815 goto unlock;
6816 }
6817
6818 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6819
6820 // Setup parameters.
6821 if ( stream_.doConvertBuffer[1] ) {
6822 buffer = stream_.deviceBuffer;
6823 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6824 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6825 }
6826 else {
6827 buffer = stream_.userBuffer[1];
6828 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6829 bufferBytes *= formatBytes( stream_.userFormat );
6830 }
6831
6832 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6833 long nextReadPointer = handle->bufferPointer[1];
6834 DWORD dsBufferSize = handle->dsBufferSize[1];
6835
6836 // Find out where the write and "safe read" pointers are.
6837 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6838 if ( FAILED( result ) ) {
6839 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6840 errorText_ = errorStream_.str();
6841 MUTEX_UNLOCK( &stream_.mutex );
6842 error( RtAudioError::SYSTEM_ERROR );
6843 return;
6844 }
6845
6846 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6847 DWORD endRead = nextReadPointer + bufferBytes;
6848
6849 // Handling depends on whether we are INPUT or DUPLEX.
6850 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6851 // then a wait here will drag the write pointers into the forbidden zone.
6852 //
6853 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6854 // it's in a safe position. This causes dropouts, but it seems to be the only
6855 // practical way to sync up the read and write pointers reliably, given the
6856 // the very complex relationship between phase and increment of the read and write
6857 // pointers.
6858 //
6859 // In order to minimize audible dropouts in DUPLEX mode, we will
6860 // provide a pre-roll period of 0.5 seconds in which we return
6861 // zeros from the read buffer while the pointers sync up.
6862
6863 if ( stream_.mode == DUPLEX ) {
6864 if ( safeReadPointer < endRead ) {
6865 if ( duplexPrerollBytes <= 0 ) {
6866 // Pre-roll time over. Be more agressive.
6867 int adjustment = endRead-safeReadPointer;
6868
6869 handle->xrun[1] = true;
6870 // Two cases:
6871 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6872 // and perform fine adjustments later.
6873 // - small adjustments: back off by twice as much.
6874 if ( adjustment >= 2*bufferBytes )
6875 nextReadPointer = safeReadPointer-2*bufferBytes;
6876 else
6877 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6878
6879 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6880
6881 }
6882 else {
6883 // In pre=roll time. Just do it.
6884 nextReadPointer = safeReadPointer - bufferBytes;
6885 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6886 }
6887 endRead = nextReadPointer + bufferBytes;
6888 }
6889 }
6890 else { // mode == INPUT
6891 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6892 // See comments for playback.
6893 double millis = (endRead - safeReadPointer) * 1000.0;
6894 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6895 if ( millis < 1.0 ) millis = 1.0;
6896 Sleep( (DWORD) millis );
6897
6898 // Wake up and find out where we are now.
6899 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6900 if ( FAILED( result ) ) {
6901 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6902 errorText_ = errorStream_.str();
6903 MUTEX_UNLOCK( &stream_.mutex );
6904 error( RtAudioError::SYSTEM_ERROR );
6905 return;
6906 }
6907
6908 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6909 }
6910 }
6911
6912 // Lock free space in the buffer
6913 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6914 &bufferSize1, &buffer2, &bufferSize2, 0 );
6915 if ( FAILED( result ) ) {
6916 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6917 errorText_ = errorStream_.str();
6918 MUTEX_UNLOCK( &stream_.mutex );
6919 error( RtAudioError::SYSTEM_ERROR );
6920 return;
6921 }
6922
6923 if ( duplexPrerollBytes <= 0 ) {
6924 // Copy our buffer into the DS buffer
6925 CopyMemory( buffer, buffer1, bufferSize1 );
6926 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6927 }
6928 else {
6929 memset( buffer, 0, bufferSize1 );
6930 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6931 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6932 }
6933
6934 // Update our buffer offset and unlock sound buffer
6935 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6936 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6937 if ( FAILED( result ) ) {
6938 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6939 errorText_ = errorStream_.str();
6940 MUTEX_UNLOCK( &stream_.mutex );
6941 error( RtAudioError::SYSTEM_ERROR );
6942 return;
6943 }
6944 handle->bufferPointer[1] = nextReadPointer;
6945
6946 // No byte swapping necessary in DirectSound implementation.
6947
6948 // If necessary, convert 8-bit data from unsigned to signed.
6949 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6950 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6951
6952 // Do buffer conversion if necessary.
6953 if ( stream_.doConvertBuffer[1] )
6954 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6955 }
6956
6957 unlock:
6958 MUTEX_UNLOCK( &stream_.mutex );
6959 RtApi::tickStreamTime();
6960 }
6961
6962 // Definitions for utility functions and callbacks
6963 // specific to the DirectSound implementation.
6964
callbackHandler(void * ptr)6965 static unsigned __stdcall callbackHandler( void *ptr )
6966 {
6967 CallbackInfo *info = (CallbackInfo *) ptr;
6968 RtApiDs *object = (RtApiDs *) info->object;
6969 bool* isRunning = &info->isRunning;
6970
6971 while ( *isRunning == true ) {
6972 object->callbackEvent();
6973 }
6974
6975 _endthreadex( 0 );
6976 return 0;
6977 }
6978
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)6979 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6980 LPCTSTR description,
6981 LPCTSTR /*module*/,
6982 LPVOID lpContext )
6983 {
6984 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6985 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6986
6987 HRESULT hr;
6988 bool validDevice = false;
6989 if ( probeInfo.isInput == true ) {
6990 DSCCAPS caps;
6991 LPDIRECTSOUNDCAPTURE object;
6992
6993 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6994 if ( hr != DS_OK ) return TRUE;
6995
6996 caps.dwSize = sizeof(caps);
6997 hr = object->GetCaps( &caps );
6998 if ( hr == DS_OK ) {
6999 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7000 validDevice = true;
7001 }
7002 object->Release();
7003 }
7004 else {
7005 DSCAPS caps;
7006 LPDIRECTSOUND object;
7007 hr = DirectSoundCreate( lpguid, &object, NULL );
7008 if ( hr != DS_OK ) return TRUE;
7009
7010 caps.dwSize = sizeof(caps);
7011 hr = object->GetCaps( &caps );
7012 if ( hr == DS_OK ) {
7013 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7014 validDevice = true;
7015 }
7016 object->Release();
7017 }
7018
7019 // If good device, then save its name and guid.
7020 std::string name = convertCharPointerToStdString( description );
7021 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7022 if ( lpguid == NULL )
7023 name = "Default Device";
7024 if ( validDevice ) {
7025 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7026 if ( dsDevices[i].name == name ) {
7027 dsDevices[i].found = true;
7028 if ( probeInfo.isInput ) {
7029 dsDevices[i].id[1] = lpguid;
7030 dsDevices[i].validId[1] = true;
7031 }
7032 else {
7033 dsDevices[i].id[0] = lpguid;
7034 dsDevices[i].validId[0] = true;
7035 }
7036 return TRUE;
7037 }
7038 }
7039
7040 DsDevice device;
7041 device.name = name;
7042 device.found = true;
7043 if ( probeInfo.isInput ) {
7044 device.id[1] = lpguid;
7045 device.validId[1] = true;
7046 }
7047 else {
7048 device.id[0] = lpguid;
7049 device.validId[0] = true;
7050 }
7051 dsDevices.push_back( device );
7052 }
7053
7054 return TRUE;
7055 }
7056
getErrorString(int code)7057 static const char* getErrorString( int code )
7058 {
7059 switch ( code ) {
7060
7061 case DSERR_ALLOCATED:
7062 return "Already allocated";
7063
7064 case DSERR_CONTROLUNAVAIL:
7065 return "Control unavailable";
7066
7067 case DSERR_INVALIDPARAM:
7068 return "Invalid parameter";
7069
7070 case DSERR_INVALIDCALL:
7071 return "Invalid call";
7072
7073 case DSERR_GENERIC:
7074 return "Generic error";
7075
7076 case DSERR_PRIOLEVELNEEDED:
7077 return "Priority level needed";
7078
7079 case DSERR_OUTOFMEMORY:
7080 return "Out of memory";
7081
7082 case DSERR_BADFORMAT:
7083 return "The sample rate or the channel format is not supported";
7084
7085 case DSERR_UNSUPPORTED:
7086 return "Not supported";
7087
7088 case DSERR_NODRIVER:
7089 return "No driver";
7090
7091 case DSERR_ALREADYINITIALIZED:
7092 return "Already initialized";
7093
7094 case DSERR_NOAGGREGATION:
7095 return "No aggregation";
7096
7097 case DSERR_BUFFERLOST:
7098 return "Buffer lost";
7099
7100 case DSERR_OTHERAPPHASPRIO:
7101 return "Another application already has priority";
7102
7103 case DSERR_UNINITIALIZED:
7104 return "Uninitialized";
7105
7106 default:
7107 return "DirectSound unknown error";
7108 }
7109 }
7110 //******************** End of __WINDOWS_DS__ *********************//
7111 #endif
7112
7113
7114 #if defined(__LINUX_ALSA__)
7115
7116 #include <alsa/asoundlib.h>
7117 #include <unistd.h>
7118
7119 // A structure to hold various information related to the ALSA API
7120 // implementation.
7121 struct AlsaHandle {
7122 snd_pcm_t *handles[2];
7123 bool synchronized;
7124 bool xrun[2];
7125 pthread_cond_t runnable_cv;
7126 bool runnable;
7127
AlsaHandleAlsaHandle7128 AlsaHandle()
7129 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7130 };
7131
7132 static void *alsaCallbackHandler( void * ptr );
7133
RtApiAlsa()7134 RtApiAlsa :: RtApiAlsa()
7135 {
7136 // Nothing to do here.
7137 }
7138
~RtApiAlsa()7139 RtApiAlsa :: ~RtApiAlsa()
7140 {
7141 if ( stream_.state != STREAM_CLOSED ) closeStream();
7142 }
7143
getDeviceCount(void)7144 unsigned int RtApiAlsa :: getDeviceCount( void )
7145 {
7146 unsigned nDevices = 0;
7147 int result, subdevice, card;
7148 char name[64];
7149 snd_ctl_t *handle = 0;
7150
7151 // Count cards and devices
7152 card = -1;
7153 snd_card_next( &card );
7154 while ( card >= 0 ) {
7155 sprintf( name, "hw:%d", card );
7156 result = snd_ctl_open( &handle, name, 0 );
7157 if ( result < 0 ) {
7158 handle = 0;
7159 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7160 errorText_ = errorStream_.str();
7161 error( RtAudioError::WARNING );
7162 goto nextcard;
7163 }
7164 subdevice = -1;
7165 while( 1 ) {
7166 result = snd_ctl_pcm_next_device( handle, &subdevice );
7167 if ( result < 0 ) {
7168 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7169 errorText_ = errorStream_.str();
7170 error( RtAudioError::WARNING );
7171 break;
7172 }
7173 if ( subdevice < 0 )
7174 break;
7175 nDevices++;
7176 }
7177 nextcard:
7178 if ( handle )
7179 snd_ctl_close( handle );
7180 snd_card_next( &card );
7181 }
7182
7183 result = snd_ctl_open( &handle, "default", 0 );
7184 if (result == 0) {
7185 nDevices++;
7186 snd_ctl_close( handle );
7187 }
7188
7189 return nDevices;
7190 }
7191
getDeviceInfo(unsigned int device)7192 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7193 {
7194 RtAudio::DeviceInfo info;
7195 info.probed = false;
7196
7197 unsigned nDevices = 0;
7198 int result, subdevice, card;
7199 char name[64];
7200 snd_ctl_t *chandle = 0;
7201
7202 // Count cards and devices
7203 card = -1;
7204 subdevice = -1;
7205 snd_card_next( &card );
7206 while ( card >= 0 ) {
7207 sprintf( name, "hw:%d", card );
7208 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7209 if ( result < 0 ) {
7210 chandle = 0;
7211 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7212 errorText_ = errorStream_.str();
7213 error( RtAudioError::WARNING );
7214 goto nextcard;
7215 }
7216 subdevice = -1;
7217 while( 1 ) {
7218 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7219 if ( result < 0 ) {
7220 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7221 errorText_ = errorStream_.str();
7222 error( RtAudioError::WARNING );
7223 break;
7224 }
7225 if ( subdevice < 0 ) break;
7226 if ( nDevices == device ) {
7227 sprintf( name, "hw:%d,%d", card, subdevice );
7228 goto foundDevice;
7229 }
7230 nDevices++;
7231 }
7232 nextcard:
7233 if ( chandle )
7234 snd_ctl_close( chandle );
7235 snd_card_next( &card );
7236 }
7237
7238 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7239 if ( result == 0 ) {
7240 if ( nDevices == device ) {
7241 strcpy( name, "default" );
7242 goto foundDevice;
7243 }
7244 nDevices++;
7245 }
7246
7247 if ( nDevices == 0 ) {
7248 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7249 error( RtAudioError::INVALID_USE );
7250 return info;
7251 }
7252
7253 if ( device >= nDevices ) {
7254 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7255 error( RtAudioError::INVALID_USE );
7256 return info;
7257 }
7258
7259 foundDevice:
7260
7261 // If a stream is already open, we cannot probe the stream devices.
7262 // Thus, use the saved results.
7263 if ( stream_.state != STREAM_CLOSED &&
7264 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7265 snd_ctl_close( chandle );
7266 if ( device >= devices_.size() ) {
7267 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7268 error( RtAudioError::WARNING );
7269 return info;
7270 }
7271 return devices_[ device ];
7272 }
7273
7274 int openMode = SND_PCM_ASYNC;
7275 snd_pcm_stream_t stream;
7276 snd_pcm_info_t *pcminfo;
7277 snd_pcm_info_alloca( &pcminfo );
7278 snd_pcm_t *phandle;
7279 snd_pcm_hw_params_t *params;
7280 snd_pcm_hw_params_alloca( ¶ms );
7281
7282 // First try for playback unless default device (which has subdev -1)
7283 stream = SND_PCM_STREAM_PLAYBACK;
7284 snd_pcm_info_set_stream( pcminfo, stream );
7285 if ( subdevice != -1 ) {
7286 snd_pcm_info_set_device( pcminfo, subdevice );
7287 snd_pcm_info_set_subdevice( pcminfo, 0 );
7288
7289 result = snd_ctl_pcm_info( chandle, pcminfo );
7290 if ( result < 0 ) {
7291 // Device probably doesn't support playback.
7292 goto captureProbe;
7293 }
7294 }
7295
7296 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7297 if ( result < 0 ) {
7298 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7299 errorText_ = errorStream_.str();
7300 error( RtAudioError::WARNING );
7301 goto captureProbe;
7302 }
7303
7304 // The device is open ... fill the parameter structure.
7305 result = snd_pcm_hw_params_any( phandle, params );
7306 if ( result < 0 ) {
7307 snd_pcm_close( phandle );
7308 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7309 errorText_ = errorStream_.str();
7310 error( RtAudioError::WARNING );
7311 goto captureProbe;
7312 }
7313
7314 // Get output channel information.
7315 unsigned int value;
7316 result = snd_pcm_hw_params_get_channels_max( params, &value );
7317 if ( result < 0 ) {
7318 snd_pcm_close( phandle );
7319 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7320 errorText_ = errorStream_.str();
7321 error( RtAudioError::WARNING );
7322 goto captureProbe;
7323 }
7324 info.outputChannels = value;
7325 snd_pcm_close( phandle );
7326
7327 captureProbe:
7328 stream = SND_PCM_STREAM_CAPTURE;
7329 snd_pcm_info_set_stream( pcminfo, stream );
7330
7331 // Now try for capture unless default device (with subdev = -1)
7332 if ( subdevice != -1 ) {
7333 result = snd_ctl_pcm_info( chandle, pcminfo );
7334 snd_ctl_close( chandle );
7335 if ( result < 0 ) {
7336 // Device probably doesn't support capture.
7337 if ( info.outputChannels == 0 ) return info;
7338 goto probeParameters;
7339 }
7340 }
7341 else
7342 snd_ctl_close( chandle );
7343
7344 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7345 if ( result < 0 ) {
7346 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7347 errorText_ = errorStream_.str();
7348 error( RtAudioError::WARNING );
7349 if ( info.outputChannels == 0 ) return info;
7350 goto probeParameters;
7351 }
7352
7353 // The device is open ... fill the parameter structure.
7354 result = snd_pcm_hw_params_any( phandle, params );
7355 if ( result < 0 ) {
7356 snd_pcm_close( phandle );
7357 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7358 errorText_ = errorStream_.str();
7359 error( RtAudioError::WARNING );
7360 if ( info.outputChannels == 0 ) return info;
7361 goto probeParameters;
7362 }
7363
7364 result = snd_pcm_hw_params_get_channels_max( params, &value );
7365 if ( result < 0 ) {
7366 snd_pcm_close( phandle );
7367 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7368 errorText_ = errorStream_.str();
7369 error( RtAudioError::WARNING );
7370 if ( info.outputChannels == 0 ) return info;
7371 goto probeParameters;
7372 }
7373 info.inputChannels = value;
7374 snd_pcm_close( phandle );
7375
7376 // If device opens for both playback and capture, we determine the channels.
7377 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7378 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7379
7380 // ALSA doesn't provide default devices so we'll use the first available one.
7381 if ( device == 0 && info.outputChannels > 0 )
7382 info.isDefaultOutput = true;
7383 if ( device == 0 && info.inputChannels > 0 )
7384 info.isDefaultInput = true;
7385
7386 probeParameters:
7387 // At this point, we just need to figure out the supported data
7388 // formats and sample rates. We'll proceed by opening the device in
7389 // the direction with the maximum number of channels, or playback if
7390 // they are equal. This might limit our sample rate options, but so
7391 // be it.
7392
7393 if ( info.outputChannels >= info.inputChannels )
7394 stream = SND_PCM_STREAM_PLAYBACK;
7395 else
7396 stream = SND_PCM_STREAM_CAPTURE;
7397 snd_pcm_info_set_stream( pcminfo, stream );
7398
7399 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7400 if ( result < 0 ) {
7401 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7402 errorText_ = errorStream_.str();
7403 error( RtAudioError::WARNING );
7404 return info;
7405 }
7406
7407 // The device is open ... fill the parameter structure.
7408 result = snd_pcm_hw_params_any( phandle, params );
7409 if ( result < 0 ) {
7410 snd_pcm_close( phandle );
7411 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7412 errorText_ = errorStream_.str();
7413 error( RtAudioError::WARNING );
7414 return info;
7415 }
7416
7417 // Test our discrete set of sample rate values.
7418 info.sampleRates.clear();
7419 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7420 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7421 info.sampleRates.push_back( SAMPLE_RATES[i] );
7422
7423 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7424 info.preferredSampleRate = SAMPLE_RATES[i];
7425 }
7426 }
7427 if ( info.sampleRates.size() == 0 ) {
7428 snd_pcm_close( phandle );
7429 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7430 errorText_ = errorStream_.str();
7431 error( RtAudioError::WARNING );
7432 return info;
7433 }
7434
7435 // Probe the supported data formats ... we don't care about endian-ness just yet
7436 snd_pcm_format_t format;
7437 info.nativeFormats = 0;
7438 format = SND_PCM_FORMAT_S8;
7439 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7440 info.nativeFormats |= RTAUDIO_SINT8;
7441 format = SND_PCM_FORMAT_S16;
7442 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7443 info.nativeFormats |= RTAUDIO_SINT16;
7444 format = SND_PCM_FORMAT_S24;
7445 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7446 info.nativeFormats |= RTAUDIO_SINT24;
7447 format = SND_PCM_FORMAT_S32;
7448 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7449 info.nativeFormats |= RTAUDIO_SINT32;
7450 format = SND_PCM_FORMAT_FLOAT;
7451 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7452 info.nativeFormats |= RTAUDIO_FLOAT32;
7453 format = SND_PCM_FORMAT_FLOAT64;
7454 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7455 info.nativeFormats |= RTAUDIO_FLOAT64;
7456
7457 // Check that we have at least one supported format
7458 if ( info.nativeFormats == 0 ) {
7459 snd_pcm_close( phandle );
7460 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7461 errorText_ = errorStream_.str();
7462 error( RtAudioError::WARNING );
7463 return info;
7464 }
7465
7466 // Get the device name
7467 char *cardname;
7468 result = snd_card_get_name( card, &cardname );
7469 if ( result >= 0 ) {
7470 sprintf( name, "hw:%s,%d", cardname, subdevice );
7471 free( cardname );
7472 }
7473 info.name = name;
7474
7475 // That's all ... close the device and return
7476 snd_pcm_close( phandle );
7477 info.probed = true;
7478 return info;
7479 }
7480
saveDeviceInfo(void)7481 void RtApiAlsa :: saveDeviceInfo( void )
7482 {
7483 devices_.clear();
7484
7485 unsigned int nDevices = getDeviceCount();
7486 devices_.resize( nDevices );
7487 for ( unsigned int i=0; i<nDevices; i++ )
7488 devices_[i] = getDeviceInfo( i );
7489 }
7490
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7491 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7492 unsigned int firstChannel, unsigned int sampleRate,
7493 RtAudioFormat format, unsigned int *bufferSize,
7494 RtAudio::StreamOptions *options )
7495
7496 {
7497 #if defined(__RTAUDIO_DEBUG__)
7498 snd_output_t *out;
7499 snd_output_stdio_attach(&out, stderr, 0);
7500 #endif
7501
7502 // I'm not using the "plug" interface ... too much inconsistent behavior.
7503
7504 unsigned nDevices = 0;
7505 int result, subdevice, card;
7506 char name[64];
7507 snd_ctl_t *chandle;
7508
7509 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7510 snprintf(name, sizeof(name), "%s", "default");
7511 else {
7512 // Count cards and devices
7513 card = -1;
7514 snd_card_next( &card );
7515 while ( card >= 0 ) {
7516 sprintf( name, "hw:%d", card );
7517 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7518 if ( result < 0 ) {
7519 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7520 errorText_ = errorStream_.str();
7521 return FAILURE;
7522 }
7523 subdevice = -1;
7524 while( 1 ) {
7525 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7526 if ( result < 0 ) break;
7527 if ( subdevice < 0 ) break;
7528 if ( nDevices == device ) {
7529 sprintf( name, "hw:%d,%d", card, subdevice );
7530 snd_ctl_close( chandle );
7531 goto foundDevice;
7532 }
7533 nDevices++;
7534 }
7535 snd_ctl_close( chandle );
7536 snd_card_next( &card );
7537 }
7538
7539 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7540 if ( result == 0 ) {
7541 if ( nDevices == device ) {
7542 strcpy( name, "default" );
7543 snd_ctl_close( chandle );
7544 goto foundDevice;
7545 }
7546 nDevices++;
7547 }
7548 snd_ctl_close( chandle );
7549
7550 if ( nDevices == 0 ) {
7551 // This should not happen because a check is made before this function is called.
7552 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7553 return FAILURE;
7554 }
7555
7556 if ( device >= nDevices ) {
7557 // This should not happen because a check is made before this function is called.
7558 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7559 return FAILURE;
7560 }
7561 }
7562
7563 foundDevice:
7564
7565 // The getDeviceInfo() function will not work for a device that is
7566 // already open. Thus, we'll probe the system before opening a
7567 // stream and save the results for use by getDeviceInfo().
7568 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7569 this->saveDeviceInfo();
7570
7571 snd_pcm_stream_t stream;
7572 if ( mode == OUTPUT )
7573 stream = SND_PCM_STREAM_PLAYBACK;
7574 else
7575 stream = SND_PCM_STREAM_CAPTURE;
7576
7577 snd_pcm_t *phandle;
7578 int openMode = SND_PCM_ASYNC;
7579 result = snd_pcm_open( &phandle, name, stream, openMode );
7580 if ( result < 0 ) {
7581 if ( mode == OUTPUT )
7582 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7583 else
7584 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7585 errorText_ = errorStream_.str();
7586 return FAILURE;
7587 }
7588
7589 // Fill the parameter structure.
7590 snd_pcm_hw_params_t *hw_params;
7591 snd_pcm_hw_params_alloca( &hw_params );
7592 result = snd_pcm_hw_params_any( phandle, hw_params );
7593 if ( result < 0 ) {
7594 snd_pcm_close( phandle );
7595 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7596 errorText_ = errorStream_.str();
7597 return FAILURE;
7598 }
7599
7600 #if defined(__RTAUDIO_DEBUG__)
7601 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7602 snd_pcm_hw_params_dump( hw_params, out );
7603 #endif
7604
7605 // Set access ... check user preference.
7606 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7607 stream_.userInterleaved = false;
7608 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7609 if ( result < 0 ) {
7610 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7611 stream_.deviceInterleaved[mode] = true;
7612 }
7613 else
7614 stream_.deviceInterleaved[mode] = false;
7615 }
7616 else {
7617 stream_.userInterleaved = true;
7618 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7619 if ( result < 0 ) {
7620 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7621 stream_.deviceInterleaved[mode] = false;
7622 }
7623 else
7624 stream_.deviceInterleaved[mode] = true;
7625 }
7626
7627 if ( result < 0 ) {
7628 snd_pcm_close( phandle );
7629 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7630 errorText_ = errorStream_.str();
7631 return FAILURE;
7632 }
7633
7634 // Determine how to set the device format.
7635 stream_.userFormat = format;
7636 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7637
7638 if ( format == RTAUDIO_SINT8 )
7639 deviceFormat = SND_PCM_FORMAT_S8;
7640 else if ( format == RTAUDIO_SINT16 )
7641 deviceFormat = SND_PCM_FORMAT_S16;
7642 else if ( format == RTAUDIO_SINT24 )
7643 deviceFormat = SND_PCM_FORMAT_S24;
7644 else if ( format == RTAUDIO_SINT32 )
7645 deviceFormat = SND_PCM_FORMAT_S32;
7646 else if ( format == RTAUDIO_FLOAT32 )
7647 deviceFormat = SND_PCM_FORMAT_FLOAT;
7648 else if ( format == RTAUDIO_FLOAT64 )
7649 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7650
7651 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7652 stream_.deviceFormat[mode] = format;
7653 goto setFormat;
7654 }
7655
7656 // The user requested format is not natively supported by the device.
7657 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7658 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7659 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7660 goto setFormat;
7661 }
7662
7663 deviceFormat = SND_PCM_FORMAT_FLOAT;
7664 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7665 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7666 goto setFormat;
7667 }
7668
7669 deviceFormat = SND_PCM_FORMAT_S32;
7670 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7671 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7672 goto setFormat;
7673 }
7674
7675 deviceFormat = SND_PCM_FORMAT_S24;
7676 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7677 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7678 goto setFormat;
7679 }
7680
7681 deviceFormat = SND_PCM_FORMAT_S16;
7682 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7683 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7684 goto setFormat;
7685 }
7686
7687 deviceFormat = SND_PCM_FORMAT_S8;
7688 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7689 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7690 goto setFormat;
7691 }
7692
7693 // If we get here, no supported format was found.
7694 snd_pcm_close( phandle );
7695 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7696 errorText_ = errorStream_.str();
7697 return FAILURE;
7698
7699 setFormat:
7700 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7701 if ( result < 0 ) {
7702 snd_pcm_close( phandle );
7703 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7704 errorText_ = errorStream_.str();
7705 return FAILURE;
7706 }
7707
7708 // Determine whether byte-swaping is necessary.
7709 stream_.doByteSwap[mode] = false;
7710 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7711 result = snd_pcm_format_cpu_endian( deviceFormat );
7712 if ( result == 0 )
7713 stream_.doByteSwap[mode] = true;
7714 else if (result < 0) {
7715 snd_pcm_close( phandle );
7716 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7717 errorText_ = errorStream_.str();
7718 return FAILURE;
7719 }
7720 }
7721
7722 // Set the sample rate.
7723 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7724 if ( result < 0 ) {
7725 snd_pcm_close( phandle );
7726 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7727 errorText_ = errorStream_.str();
7728 return FAILURE;
7729 }
7730
7731 // Determine the number of channels for this device. We support a possible
7732 // minimum device channel number > than the value requested by the user.
7733 stream_.nUserChannels[mode] = channels;
7734 unsigned int value;
7735 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7736 unsigned int deviceChannels = value;
7737 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7738 snd_pcm_close( phandle );
7739 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7740 errorText_ = errorStream_.str();
7741 return FAILURE;
7742 }
7743
7744 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7745 if ( result < 0 ) {
7746 snd_pcm_close( phandle );
7747 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7748 errorText_ = errorStream_.str();
7749 return FAILURE;
7750 }
7751 deviceChannels = value;
7752 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7753 stream_.nDeviceChannels[mode] = deviceChannels;
7754
7755 // Set the device channels.
7756 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7757 if ( result < 0 ) {
7758 snd_pcm_close( phandle );
7759 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7760 errorText_ = errorStream_.str();
7761 return FAILURE;
7762 }
7763
7764 // Set the buffer (or period) size.
7765 int dir = 0;
7766 snd_pcm_uframes_t periodSize = *bufferSize;
7767 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7768 if ( result < 0 ) {
7769 snd_pcm_close( phandle );
7770 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7771 errorText_ = errorStream_.str();
7772 return FAILURE;
7773 }
7774 *bufferSize = periodSize;
7775
7776 // Set the buffer number, which in ALSA is referred to as the "period".
7777 unsigned int periods = 0;
7778 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7779 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7780 if ( periods < 2 ) periods = 4; // a fairly safe default value
7781 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7782 if ( result < 0 ) {
7783 snd_pcm_close( phandle );
7784 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7785 errorText_ = errorStream_.str();
7786 return FAILURE;
7787 }
7788
7789 // If attempting to setup a duplex stream, the bufferSize parameter
7790 // MUST be the same in both directions!
7791 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7792 snd_pcm_close( phandle );
7793 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7794 errorText_ = errorStream_.str();
7795 return FAILURE;
7796 }
7797
7798 stream_.bufferSize = *bufferSize;
7799
7800 // Install the hardware configuration
7801 result = snd_pcm_hw_params( phandle, hw_params );
7802 if ( result < 0 ) {
7803 snd_pcm_close( phandle );
7804 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7805 errorText_ = errorStream_.str();
7806 return FAILURE;
7807 }
7808
7809 #if defined(__RTAUDIO_DEBUG__)
7810 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7811 snd_pcm_hw_params_dump( hw_params, out );
7812 #endif
7813
7814 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7815 snd_pcm_sw_params_t *sw_params = NULL;
7816 snd_pcm_sw_params_alloca( &sw_params );
7817 snd_pcm_sw_params_current( phandle, sw_params );
7818 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7819 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7820 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7821
7822 // The following two settings were suggested by Theo Veenker
7823 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7824 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7825
7826 // here are two options for a fix
7827 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7828 snd_pcm_uframes_t val;
7829 snd_pcm_sw_params_get_boundary( sw_params, &val );
7830 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7831
7832 result = snd_pcm_sw_params( phandle, sw_params );
7833 if ( result < 0 ) {
7834 snd_pcm_close( phandle );
7835 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7836 errorText_ = errorStream_.str();
7837 return FAILURE;
7838 }
7839
7840 #if defined(__RTAUDIO_DEBUG__)
7841 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7842 snd_pcm_sw_params_dump( sw_params, out );
7843 #endif
7844
7845 // Set flags for buffer conversion
7846 stream_.doConvertBuffer[mode] = false;
7847 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7848 stream_.doConvertBuffer[mode] = true;
7849 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7850 stream_.doConvertBuffer[mode] = true;
7851 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7852 stream_.nUserChannels[mode] > 1 )
7853 stream_.doConvertBuffer[mode] = true;
7854
7855 // Allocate the ApiHandle if necessary and then save.
7856 AlsaHandle *apiInfo = 0;
7857 if ( stream_.apiHandle == 0 ) {
7858 try {
7859 apiInfo = (AlsaHandle *) new AlsaHandle;
7860 }
7861 catch ( std::bad_alloc& ) {
7862 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7863 goto error;
7864 }
7865
7866 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7867 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7868 goto error;
7869 }
7870
7871 stream_.apiHandle = (void *) apiInfo;
7872 apiInfo->handles[0] = 0;
7873 apiInfo->handles[1] = 0;
7874 }
7875 else {
7876 apiInfo = (AlsaHandle *) stream_.apiHandle;
7877 }
7878 apiInfo->handles[mode] = phandle;
7879 phandle = 0;
7880
7881 // Allocate necessary internal buffers.
7882 unsigned long bufferBytes;
7883 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7884 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7885 if ( stream_.userBuffer[mode] == NULL ) {
7886 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7887 goto error;
7888 }
7889
7890 if ( stream_.doConvertBuffer[mode] ) {
7891
7892 bool makeBuffer = true;
7893 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7894 if ( mode == INPUT ) {
7895 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7896 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7897 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7898 }
7899 }
7900
7901 if ( makeBuffer ) {
7902 bufferBytes *= *bufferSize;
7903 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7904 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7905 if ( stream_.deviceBuffer == NULL ) {
7906 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7907 goto error;
7908 }
7909 }
7910 }
7911
7912 stream_.sampleRate = sampleRate;
7913 stream_.nBuffers = periods;
7914 stream_.device[mode] = device;
7915 stream_.state = STREAM_STOPPED;
7916
7917 // Setup the buffer conversion information structure.
7918 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7919
7920 // Setup thread if necessary.
7921 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7922 // We had already set up an output stream.
7923 stream_.mode = DUPLEX;
7924 // Link the streams if possible.
7925 apiInfo->synchronized = false;
7926 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7927 apiInfo->synchronized = true;
7928 else {
7929 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7930 error( RtAudioError::WARNING );
7931 }
7932 }
7933 else {
7934 stream_.mode = mode;
7935
7936 // Setup callback thread.
7937 stream_.callbackInfo.object = (void *) this;
7938
7939 // Set the thread attributes for joinable and realtime scheduling
7940 // priority (optional). The higher priority will only take affect
7941 // if the program is run as root or suid. Note, under Linux
7942 // processes with CAP_SYS_NICE privilege, a user can change
7943 // scheduling policy and priority (thus need not be root). See
7944 // POSIX "capabilities".
7945 pthread_attr_t attr;
7946 pthread_attr_init( &attr );
7947 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7948 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7949 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7950 stream_.callbackInfo.doRealtime = true;
7951 struct sched_param param;
7952 int priority = options->priority;
7953 int min = sched_get_priority_min( SCHED_RR );
7954 int max = sched_get_priority_max( SCHED_RR );
7955 if ( priority < min ) priority = min;
7956 else if ( priority > max ) priority = max;
7957 param.sched_priority = priority;
7958
7959 // Set the policy BEFORE the priority. Otherwise it fails.
7960 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7961 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7962 // This is definitely required. Otherwise it fails.
7963 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7964 pthread_attr_setschedparam(&attr, ¶m);
7965 }
7966 else
7967 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7968 #else
7969 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7970 #endif
7971
7972 stream_.callbackInfo.isRunning = true;
7973 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7974 pthread_attr_destroy( &attr );
7975 if ( result ) {
7976 // Failed. Try instead with default attributes.
7977 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7978 if ( result ) {
7979 stream_.callbackInfo.isRunning = false;
7980 errorText_ = "RtApiAlsa::error creating callback thread!";
7981 goto error;
7982 }
7983 }
7984 }
7985
7986 return SUCCESS;
7987
7988 error:
7989 if ( apiInfo ) {
7990 pthread_cond_destroy( &apiInfo->runnable_cv );
7991 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7992 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7993 delete apiInfo;
7994 stream_.apiHandle = 0;
7995 }
7996
7997 if ( phandle) snd_pcm_close( phandle );
7998
7999 for ( int i=0; i<2; i++ ) {
8000 if ( stream_.userBuffer[i] ) {
8001 free( stream_.userBuffer[i] );
8002 stream_.userBuffer[i] = 0;
8003 }
8004 }
8005
8006 if ( stream_.deviceBuffer ) {
8007 free( stream_.deviceBuffer );
8008 stream_.deviceBuffer = 0;
8009 }
8010
8011 stream_.state = STREAM_CLOSED;
8012 return FAILURE;
8013 }
8014
closeStream()8015 void RtApiAlsa :: closeStream()
8016 {
8017 if ( stream_.state == STREAM_CLOSED ) {
8018 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8019 error( RtAudioError::WARNING );
8020 return;
8021 }
8022
8023 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8024 stream_.callbackInfo.isRunning = false;
8025 MUTEX_LOCK( &stream_.mutex );
8026 if ( stream_.state == STREAM_STOPPED ) {
8027 apiInfo->runnable = true;
8028 pthread_cond_signal( &apiInfo->runnable_cv );
8029 }
8030 MUTEX_UNLOCK( &stream_.mutex );
8031 pthread_join( stream_.callbackInfo.thread, NULL );
8032
8033 if ( stream_.state == STREAM_RUNNING ) {
8034 stream_.state = STREAM_STOPPED;
8035 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8036 snd_pcm_drop( apiInfo->handles[0] );
8037 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8038 snd_pcm_drop( apiInfo->handles[1] );
8039 }
8040
8041 if ( apiInfo ) {
8042 pthread_cond_destroy( &apiInfo->runnable_cv );
8043 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8044 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8045 delete apiInfo;
8046 stream_.apiHandle = 0;
8047 }
8048
8049 for ( int i=0; i<2; i++ ) {
8050 if ( stream_.userBuffer[i] ) {
8051 free( stream_.userBuffer[i] );
8052 stream_.userBuffer[i] = 0;
8053 }
8054 }
8055
8056 if ( stream_.deviceBuffer ) {
8057 free( stream_.deviceBuffer );
8058 stream_.deviceBuffer = 0;
8059 }
8060
8061 stream_.mode = UNINITIALIZED;
8062 stream_.state = STREAM_CLOSED;
8063 }
8064
startStream()8065 void RtApiAlsa :: startStream()
8066 {
8067 // This method calls snd_pcm_prepare if the device isn't already in that state.
8068
8069 verifyStream();
8070 if ( stream_.state == STREAM_RUNNING ) {
8071 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8072 error( RtAudioError::WARNING );
8073 return;
8074 }
8075
8076 MUTEX_LOCK( &stream_.mutex );
8077
8078 #if defined( HAVE_GETTIMEOFDAY )
8079 gettimeofday( &stream_.lastTickTimestamp, NULL );
8080 #endif
8081
8082 int result = 0;
8083 snd_pcm_state_t state;
8084 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8085 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8086 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8087 state = snd_pcm_state( handle[0] );
8088 if ( state != SND_PCM_STATE_PREPARED ) {
8089 result = snd_pcm_prepare( handle[0] );
8090 if ( result < 0 ) {
8091 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8092 errorText_ = errorStream_.str();
8093 goto unlock;
8094 }
8095 }
8096 }
8097
8098 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8099 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8100 state = snd_pcm_state( handle[1] );
8101 if ( state != SND_PCM_STATE_PREPARED ) {
8102 result = snd_pcm_prepare( handle[1] );
8103 if ( result < 0 ) {
8104 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8105 errorText_ = errorStream_.str();
8106 goto unlock;
8107 }
8108 }
8109 }
8110
8111 stream_.state = STREAM_RUNNING;
8112
8113 unlock:
8114 apiInfo->runnable = true;
8115 pthread_cond_signal( &apiInfo->runnable_cv );
8116 MUTEX_UNLOCK( &stream_.mutex );
8117
8118 if ( result >= 0 ) return;
8119 error( RtAudioError::SYSTEM_ERROR );
8120 }
8121
stopStream()8122 void RtApiAlsa :: stopStream()
8123 {
8124 verifyStream();
8125 if ( stream_.state == STREAM_STOPPED ) {
8126 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8127 error( RtAudioError::WARNING );
8128 return;
8129 }
8130
8131 stream_.state = STREAM_STOPPED;
8132 MUTEX_LOCK( &stream_.mutex );
8133
8134 int result = 0;
8135 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8136 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8137 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8138 if ( apiInfo->synchronized )
8139 result = snd_pcm_drop( handle[0] );
8140 else
8141 result = snd_pcm_drain( handle[0] );
8142 if ( result < 0 ) {
8143 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8144 errorText_ = errorStream_.str();
8145 goto unlock;
8146 }
8147 }
8148
8149 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8150 result = snd_pcm_drop( handle[1] );
8151 if ( result < 0 ) {
8152 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8153 errorText_ = errorStream_.str();
8154 goto unlock;
8155 }
8156 }
8157
8158 unlock:
8159 apiInfo->runnable = false; // fixes high CPU usage when stopped
8160 MUTEX_UNLOCK( &stream_.mutex );
8161
8162 if ( result >= 0 ) return;
8163 error( RtAudioError::SYSTEM_ERROR );
8164 }
8165
abortStream()8166 void RtApiAlsa :: abortStream()
8167 {
8168 verifyStream();
8169 if ( stream_.state == STREAM_STOPPED ) {
8170 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8171 error( RtAudioError::WARNING );
8172 return;
8173 }
8174
8175 stream_.state = STREAM_STOPPED;
8176 MUTEX_LOCK( &stream_.mutex );
8177
8178 int result = 0;
8179 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8180 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8181 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8182 result = snd_pcm_drop( handle[0] );
8183 if ( result < 0 ) {
8184 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8185 errorText_ = errorStream_.str();
8186 goto unlock;
8187 }
8188 }
8189
8190 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8191 result = snd_pcm_drop( handle[1] );
8192 if ( result < 0 ) {
8193 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8194 errorText_ = errorStream_.str();
8195 goto unlock;
8196 }
8197 }
8198
8199 unlock:
8200 apiInfo->runnable = false; // fixes high CPU usage when stopped
8201 MUTEX_UNLOCK( &stream_.mutex );
8202
8203 if ( result >= 0 ) return;
8204 error( RtAudioError::SYSTEM_ERROR );
8205 }
8206
callbackEvent()8207 void RtApiAlsa :: callbackEvent()
8208 {
8209 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8210 if ( stream_.state == STREAM_STOPPED ) {
8211 MUTEX_LOCK( &stream_.mutex );
8212 while ( !apiInfo->runnable )
8213 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8214
8215 if ( stream_.state != STREAM_RUNNING ) {
8216 MUTEX_UNLOCK( &stream_.mutex );
8217 return;
8218 }
8219 MUTEX_UNLOCK( &stream_.mutex );
8220 }
8221
8222 if ( stream_.state == STREAM_CLOSED ) {
8223 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8224 error( RtAudioError::WARNING );
8225 return;
8226 }
8227
8228 int doStopStream = 0;
8229 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8230 double streamTime = getStreamTime();
8231 RtAudioStreamStatus status = 0;
8232 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8233 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8234 apiInfo->xrun[0] = false;
8235 }
8236 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8237 status |= RTAUDIO_INPUT_OVERFLOW;
8238 apiInfo->xrun[1] = false;
8239 }
8240 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8241 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8242
8243 if ( doStopStream == 2 ) {
8244 abortStream();
8245 return;
8246 }
8247
8248 MUTEX_LOCK( &stream_.mutex );
8249
8250 // The state might change while waiting on a mutex.
8251 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8252
8253 int result;
8254 char *buffer;
8255 int channels;
8256 snd_pcm_t **handle;
8257 snd_pcm_sframes_t frames;
8258 RtAudioFormat format;
8259 handle = (snd_pcm_t **) apiInfo->handles;
8260
8261 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8262
8263 // Setup parameters.
8264 if ( stream_.doConvertBuffer[1] ) {
8265 buffer = stream_.deviceBuffer;
8266 channels = stream_.nDeviceChannels[1];
8267 format = stream_.deviceFormat[1];
8268 }
8269 else {
8270 buffer = stream_.userBuffer[1];
8271 channels = stream_.nUserChannels[1];
8272 format = stream_.userFormat;
8273 }
8274
8275 // Read samples from device in interleaved/non-interleaved format.
8276 if ( stream_.deviceInterleaved[1] )
8277 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8278 else {
8279 void *bufs[channels];
8280 size_t offset = stream_.bufferSize * formatBytes( format );
8281 for ( int i=0; i<channels; i++ )
8282 bufs[i] = (void *) (buffer + (i * offset));
8283 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8284 }
8285
8286 if ( result < (int) stream_.bufferSize ) {
8287 // Either an error or overrun occured.
8288 if ( result == -EPIPE ) {
8289 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8290 if ( state == SND_PCM_STATE_XRUN ) {
8291 apiInfo->xrun[1] = true;
8292 result = snd_pcm_prepare( handle[1] );
8293 if ( result < 0 ) {
8294 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8295 errorText_ = errorStream_.str();
8296 }
8297 }
8298 else {
8299 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8300 errorText_ = errorStream_.str();
8301 }
8302 }
8303 else {
8304 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8305 errorText_ = errorStream_.str();
8306 }
8307 error( RtAudioError::WARNING );
8308 goto tryOutput;
8309 }
8310
8311 // Do byte swapping if necessary.
8312 if ( stream_.doByteSwap[1] )
8313 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8314
8315 // Do buffer conversion if necessary.
8316 if ( stream_.doConvertBuffer[1] )
8317 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8318
8319 // Check stream latency
8320 result = snd_pcm_delay( handle[1], &frames );
8321 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8322 }
8323
8324 tryOutput:
8325
8326 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8327
8328 // Setup parameters and do buffer conversion if necessary.
8329 if ( stream_.doConvertBuffer[0] ) {
8330 buffer = stream_.deviceBuffer;
8331 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8332 channels = stream_.nDeviceChannels[0];
8333 format = stream_.deviceFormat[0];
8334 }
8335 else {
8336 buffer = stream_.userBuffer[0];
8337 channels = stream_.nUserChannels[0];
8338 format = stream_.userFormat;
8339 }
8340
8341 // Do byte swapping if necessary.
8342 if ( stream_.doByteSwap[0] )
8343 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8344
8345 // Write samples to device in interleaved/non-interleaved format.
8346 if ( stream_.deviceInterleaved[0] )
8347 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8348 else {
8349 void *bufs[channels];
8350 size_t offset = stream_.bufferSize * formatBytes( format );
8351 for ( int i=0; i<channels; i++ )
8352 bufs[i] = (void *) (buffer + (i * offset));
8353 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8354 }
8355
8356 if ( result < (int) stream_.bufferSize ) {
8357 // Either an error or underrun occured.
8358 if ( result == -EPIPE ) {
8359 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8360 if ( state == SND_PCM_STATE_XRUN ) {
8361 apiInfo->xrun[0] = true;
8362 result = snd_pcm_prepare( handle[0] );
8363 if ( result < 0 ) {
8364 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8365 errorText_ = errorStream_.str();
8366 }
8367 else
8368 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8369 }
8370 else {
8371 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8372 errorText_ = errorStream_.str();
8373 }
8374 }
8375 else {
8376 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8377 errorText_ = errorStream_.str();
8378 }
8379 error( RtAudioError::WARNING );
8380 goto unlock;
8381 }
8382
8383 // Check stream latency
8384 result = snd_pcm_delay( handle[0], &frames );
8385 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8386 }
8387
8388 unlock:
8389 MUTEX_UNLOCK( &stream_.mutex );
8390
8391 RtApi::tickStreamTime();
8392 if ( doStopStream == 1 ) this->stopStream();
8393 }
8394
alsaCallbackHandler(void * ptr)8395 static void *alsaCallbackHandler( void *ptr )
8396 {
8397 CallbackInfo *info = (CallbackInfo *) ptr;
8398 RtApiAlsa *object = (RtApiAlsa *) info->object;
8399 bool *isRunning = &info->isRunning;
8400
8401 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8402 if ( info->doRealtime ) {
8403 std::cerr << "RtAudio alsa: " <<
8404 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8405 "running realtime scheduling" << std::endl;
8406 }
8407 #endif
8408
8409 while ( *isRunning == true ) {
8410 pthread_testcancel();
8411 object->callbackEvent();
8412 }
8413
8414 pthread_exit( NULL );
8415 }
8416
8417 //******************** End of __LINUX_ALSA__ *********************//
8418 #endif
8419
8420 #if defined(__LINUX_PULSE__)
8421
8422 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8423 // and Tristan Matthews.
8424
8425 #include <pulse/error.h>
8426 #include <pulse/simple.h>
8427 #include <cstdio>
8428
8429 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8430 44100, 48000, 96000, 0};
8431
8432 struct rtaudio_pa_format_mapping_t {
8433 RtAudioFormat rtaudio_format;
8434 pa_sample_format_t pa_format;
8435 };
8436
8437 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8438 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8439 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8440 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8441 {0, PA_SAMPLE_INVALID}};
8442
8443 struct PulseAudioHandle {
8444 pa_simple *s_play;
8445 pa_simple *s_rec;
8446 pthread_t thread;
8447 pthread_cond_t runnable_cv;
8448 bool runnable;
PulseAudioHandlePulseAudioHandle8449 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8450 };
8451
~RtApiPulse()8452 RtApiPulse::~RtApiPulse()
8453 {
8454 if ( stream_.state != STREAM_CLOSED )
8455 closeStream();
8456 }
8457
getDeviceCount(void)8458 unsigned int RtApiPulse::getDeviceCount( void )
8459 {
8460 return 1;
8461 }
8462
getDeviceInfo(unsigned int)8463 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8464 {
8465 RtAudio::DeviceInfo info;
8466 info.probed = true;
8467 info.name = "PulseAudio";
8468 info.outputChannels = 2;
8469 info.inputChannels = 2;
8470 info.duplexChannels = 2;
8471 info.isDefaultOutput = true;
8472 info.isDefaultInput = true;
8473
8474 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8475 info.sampleRates.push_back( *sr );
8476
8477 info.preferredSampleRate = 48000;
8478 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8479
8480 return info;
8481 }
8482
pulseaudio_callback(void * user)8483 static void *pulseaudio_callback( void * user )
8484 {
8485 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8486 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8487 volatile bool *isRunning = &cbi->isRunning;
8488
8489 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8490 if (cbi->doRealtime) {
8491 std::cerr << "RtAudio pulse: " <<
8492 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8493 "running realtime scheduling" << std::endl;
8494 }
8495 #endif
8496
8497 while ( *isRunning ) {
8498 pthread_testcancel();
8499 context->callbackEvent();
8500 }
8501
8502 pthread_exit( NULL );
8503 }
8504
closeStream(void)8505 void RtApiPulse::closeStream( void )
8506 {
8507 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8508
8509 stream_.callbackInfo.isRunning = false;
8510 if ( pah ) {
8511 MUTEX_LOCK( &stream_.mutex );
8512 if ( stream_.state == STREAM_STOPPED ) {
8513 pah->runnable = true;
8514 pthread_cond_signal( &pah->runnable_cv );
8515 }
8516 MUTEX_UNLOCK( &stream_.mutex );
8517
8518 pthread_join( pah->thread, 0 );
8519 if ( pah->s_play ) {
8520 pa_simple_flush( pah->s_play, NULL );
8521 pa_simple_free( pah->s_play );
8522 }
8523 if ( pah->s_rec )
8524 pa_simple_free( pah->s_rec );
8525
8526 pthread_cond_destroy( &pah->runnable_cv );
8527 delete pah;
8528 stream_.apiHandle = 0;
8529 }
8530
8531 if ( stream_.userBuffer[0] ) {
8532 free( stream_.userBuffer[0] );
8533 stream_.userBuffer[0] = 0;
8534 }
8535 if ( stream_.userBuffer[1] ) {
8536 free( stream_.userBuffer[1] );
8537 stream_.userBuffer[1] = 0;
8538 }
8539
8540 stream_.state = STREAM_CLOSED;
8541 stream_.mode = UNINITIALIZED;
8542 }
8543
callbackEvent(void)8544 void RtApiPulse::callbackEvent( void )
8545 {
8546 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8547
8548 if ( stream_.state == STREAM_STOPPED ) {
8549 MUTEX_LOCK( &stream_.mutex );
8550 while ( !pah->runnable )
8551 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8552
8553 if ( stream_.state != STREAM_RUNNING ) {
8554 MUTEX_UNLOCK( &stream_.mutex );
8555 return;
8556 }
8557 MUTEX_UNLOCK( &stream_.mutex );
8558 }
8559
8560 if ( stream_.state == STREAM_CLOSED ) {
8561 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8562 "this shouldn't happen!";
8563 error( RtAudioError::WARNING );
8564 return;
8565 }
8566
8567 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8568 double streamTime = getStreamTime();
8569 RtAudioStreamStatus status = 0;
8570 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8571 stream_.bufferSize, streamTime, status,
8572 stream_.callbackInfo.userData );
8573
8574 if ( doStopStream == 2 ) {
8575 abortStream();
8576 return;
8577 }
8578
8579 MUTEX_LOCK( &stream_.mutex );
8580 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8581 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8582
8583 if ( stream_.state != STREAM_RUNNING )
8584 goto unlock;
8585
8586 int pa_error;
8587 size_t bytes;
8588 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8589 if ( stream_.doConvertBuffer[OUTPUT] ) {
8590 convertBuffer( stream_.deviceBuffer,
8591 stream_.userBuffer[OUTPUT],
8592 stream_.convertInfo[OUTPUT] );
8593 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8594 formatBytes( stream_.deviceFormat[OUTPUT] );
8595 } else
8596 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8597 formatBytes( stream_.userFormat );
8598
8599 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8600 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8601 pa_strerror( pa_error ) << ".";
8602 errorText_ = errorStream_.str();
8603 error( RtAudioError::WARNING );
8604 }
8605 }
8606
8607 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8608 if ( stream_.doConvertBuffer[INPUT] )
8609 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8610 formatBytes( stream_.deviceFormat[INPUT] );
8611 else
8612 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8613 formatBytes( stream_.userFormat );
8614
8615 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8616 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8617 pa_strerror( pa_error ) << ".";
8618 errorText_ = errorStream_.str();
8619 error( RtAudioError::WARNING );
8620 }
8621 if ( stream_.doConvertBuffer[INPUT] ) {
8622 convertBuffer( stream_.userBuffer[INPUT],
8623 stream_.deviceBuffer,
8624 stream_.convertInfo[INPUT] );
8625 }
8626 }
8627
8628 unlock:
8629 MUTEX_UNLOCK( &stream_.mutex );
8630 RtApi::tickStreamTime();
8631
8632 if ( doStopStream == 1 )
8633 stopStream();
8634 }
8635
startStream(void)8636 void RtApiPulse::startStream( void )
8637 {
8638 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8639
8640 if ( stream_.state == STREAM_CLOSED ) {
8641 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8642 error( RtAudioError::INVALID_USE );
8643 return;
8644 }
8645 if ( stream_.state == STREAM_RUNNING ) {
8646 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8647 error( RtAudioError::WARNING );
8648 return;
8649 }
8650
8651 MUTEX_LOCK( &stream_.mutex );
8652
8653 #if defined( HAVE_GETTIMEOFDAY )
8654 gettimeofday( &stream_.lastTickTimestamp, NULL );
8655 #endif
8656
8657 stream_.state = STREAM_RUNNING;
8658
8659 pah->runnable = true;
8660 pthread_cond_signal( &pah->runnable_cv );
8661 MUTEX_UNLOCK( &stream_.mutex );
8662 }
8663
stopStream(void)8664 void RtApiPulse::stopStream( void )
8665 {
8666 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8667
8668 if ( stream_.state == STREAM_CLOSED ) {
8669 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8670 error( RtAudioError::INVALID_USE );
8671 return;
8672 }
8673 if ( stream_.state == STREAM_STOPPED ) {
8674 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8675 error( RtAudioError::WARNING );
8676 return;
8677 }
8678
8679 stream_.state = STREAM_STOPPED;
8680 MUTEX_LOCK( &stream_.mutex );
8681
8682 if ( pah && pah->s_play ) {
8683 int pa_error;
8684 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8685 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8686 pa_strerror( pa_error ) << ".";
8687 errorText_ = errorStream_.str();
8688 MUTEX_UNLOCK( &stream_.mutex );
8689 error( RtAudioError::SYSTEM_ERROR );
8690 return;
8691 }
8692 }
8693
8694 stream_.state = STREAM_STOPPED;
8695 MUTEX_UNLOCK( &stream_.mutex );
8696 }
8697
abortStream(void)8698 void RtApiPulse::abortStream( void )
8699 {
8700 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8701
8702 if ( stream_.state == STREAM_CLOSED ) {
8703 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8704 error( RtAudioError::INVALID_USE );
8705 return;
8706 }
8707 if ( stream_.state == STREAM_STOPPED ) {
8708 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8709 error( RtAudioError::WARNING );
8710 return;
8711 }
8712
8713 stream_.state = STREAM_STOPPED;
8714 MUTEX_LOCK( &stream_.mutex );
8715
8716 if ( pah && pah->s_play ) {
8717 int pa_error;
8718 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8719 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8720 pa_strerror( pa_error ) << ".";
8721 errorText_ = errorStream_.str();
8722 MUTEX_UNLOCK( &stream_.mutex );
8723 error( RtAudioError::SYSTEM_ERROR );
8724 return;
8725 }
8726 }
8727
8728 stream_.state = STREAM_STOPPED;
8729 MUTEX_UNLOCK( &stream_.mutex );
8730 }
8731
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8732 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8733 unsigned int channels, unsigned int firstChannel,
8734 unsigned int sampleRate, RtAudioFormat format,
8735 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8736 {
8737 PulseAudioHandle *pah = 0;
8738 unsigned long bufferBytes = 0;
8739 pa_sample_spec ss;
8740
8741 if ( device != 0 ) return false;
8742 if ( mode != INPUT && mode != OUTPUT ) return false;
8743 if ( channels != 1 && channels != 2 ) {
8744 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8745 return false;
8746 }
8747 ss.channels = channels;
8748
8749 if ( firstChannel != 0 ) return false;
8750
8751 bool sr_found = false;
8752 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8753 if ( sampleRate == *sr ) {
8754 sr_found = true;
8755 stream_.sampleRate = sampleRate;
8756 ss.rate = sampleRate;
8757 break;
8758 }
8759 }
8760 if ( !sr_found ) {
8761 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8762 return false;
8763 }
8764
8765 bool sf_found = 0;
8766 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8767 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8768 if ( format == sf->rtaudio_format ) {
8769 sf_found = true;
8770 stream_.userFormat = sf->rtaudio_format;
8771 stream_.deviceFormat[mode] = stream_.userFormat;
8772 ss.format = sf->pa_format;
8773 break;
8774 }
8775 }
8776 if ( !sf_found ) { // Use internal data format conversion.
8777 stream_.userFormat = format;
8778 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8779 ss.format = PA_SAMPLE_FLOAT32LE;
8780 }
8781
8782 // Set other stream parameters.
8783 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8784 else stream_.userInterleaved = true;
8785 stream_.deviceInterleaved[mode] = true;
8786 stream_.nBuffers = 1;
8787 stream_.doByteSwap[mode] = false;
8788 stream_.nUserChannels[mode] = channels;
8789 stream_.nDeviceChannels[mode] = channels + firstChannel;
8790 stream_.channelOffset[mode] = 0;
8791 std::string streamName = "RtAudio";
8792
8793 // Set flags for buffer conversion.
8794 stream_.doConvertBuffer[mode] = false;
8795 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8796 stream_.doConvertBuffer[mode] = true;
8797 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8798 stream_.doConvertBuffer[mode] = true;
8799
8800 // Allocate necessary internal buffers.
8801 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8802 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8803 if ( stream_.userBuffer[mode] == NULL ) {
8804 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8805 goto error;
8806 }
8807 stream_.bufferSize = *bufferSize;
8808
8809 if ( stream_.doConvertBuffer[mode] ) {
8810
8811 bool makeBuffer = true;
8812 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8813 if ( mode == INPUT ) {
8814 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8815 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8816 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8817 }
8818 }
8819
8820 if ( makeBuffer ) {
8821 bufferBytes *= *bufferSize;
8822 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8823 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8824 if ( stream_.deviceBuffer == NULL ) {
8825 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8826 goto error;
8827 }
8828 }
8829 }
8830
8831 stream_.device[mode] = device;
8832
8833 // Setup the buffer conversion information structure.
8834 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8835
8836 if ( !stream_.apiHandle ) {
8837 PulseAudioHandle *pah = new PulseAudioHandle;
8838 if ( !pah ) {
8839 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8840 goto error;
8841 }
8842
8843 stream_.apiHandle = pah;
8844 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8845 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8846 goto error;
8847 }
8848 }
8849 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8850
8851 int error;
8852 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8853 switch ( mode ) {
8854 case INPUT:
8855 pa_buffer_attr buffer_attr;
8856 buffer_attr.fragsize = bufferBytes;
8857 buffer_attr.maxlength = -1;
8858
8859 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8860 if ( !pah->s_rec ) {
8861 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8862 goto error;
8863 }
8864 break;
8865 case OUTPUT:
8866 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8867 if ( !pah->s_play ) {
8868 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8869 goto error;
8870 }
8871 break;
8872 default:
8873 goto error;
8874 }
8875
8876 if ( stream_.mode == UNINITIALIZED )
8877 stream_.mode = mode;
8878 else if ( stream_.mode == mode )
8879 goto error;
8880 else
8881 stream_.mode = DUPLEX;
8882
8883 if ( !stream_.callbackInfo.isRunning ) {
8884 stream_.callbackInfo.object = this;
8885
8886 stream_.state = STREAM_STOPPED;
8887 // Set the thread attributes for joinable and realtime scheduling
8888 // priority (optional). The higher priority will only take affect
8889 // if the program is run as root or suid. Note, under Linux
8890 // processes with CAP_SYS_NICE privilege, a user can change
8891 // scheduling policy and priority (thus need not be root). See
8892 // POSIX "capabilities".
8893 pthread_attr_t attr;
8894 pthread_attr_init( &attr );
8895 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8896 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8897 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8898 stream_.callbackInfo.doRealtime = true;
8899 struct sched_param param;
8900 int priority = options->priority;
8901 int min = sched_get_priority_min( SCHED_RR );
8902 int max = sched_get_priority_max( SCHED_RR );
8903 if ( priority < min ) priority = min;
8904 else if ( priority > max ) priority = max;
8905 param.sched_priority = priority;
8906
8907 // Set the policy BEFORE the priority. Otherwise it fails.
8908 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8909 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8910 // This is definitely required. Otherwise it fails.
8911 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8912 pthread_attr_setschedparam(&attr, ¶m);
8913 }
8914 else
8915 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8916 #else
8917 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8918 #endif
8919
8920 stream_.callbackInfo.isRunning = true;
8921 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8922 pthread_attr_destroy(&attr);
8923 if(result != 0) {
8924 // Failed. Try instead with default attributes.
8925 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8926 if(result != 0) {
8927 stream_.callbackInfo.isRunning = false;
8928 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8929 goto error;
8930 }
8931 }
8932 }
8933
8934 return SUCCESS;
8935
8936 error:
8937 if ( pah && stream_.callbackInfo.isRunning ) {
8938 pthread_cond_destroy( &pah->runnable_cv );
8939 delete pah;
8940 stream_.apiHandle = 0;
8941 }
8942
8943 for ( int i=0; i<2; i++ ) {
8944 if ( stream_.userBuffer[i] ) {
8945 free( stream_.userBuffer[i] );
8946 stream_.userBuffer[i] = 0;
8947 }
8948 }
8949
8950 if ( stream_.deviceBuffer ) {
8951 free( stream_.deviceBuffer );
8952 stream_.deviceBuffer = 0;
8953 }
8954
8955 stream_.state = STREAM_CLOSED;
8956 return FAILURE;
8957 }
8958
8959 //******************** End of __LINUX_PULSE__ *********************//
8960 #endif
8961
8962 #if defined(__LINUX_OSS__)
8963
8964 #include <unistd.h>
8965 #include <sys/ioctl.h>
8966 #include <unistd.h>
8967 #include <fcntl.h>
8968 #include <sys/soundcard.h>
8969 #include <errno.h>
8970 #include <math.h>
8971
8972 static void *ossCallbackHandler(void * ptr);
8973
8974 // A structure to hold various information related to the OSS API
8975 // implementation.
8976 struct OssHandle {
8977 int id[2]; // device ids
8978 bool xrun[2];
8979 bool triggered;
8980 pthread_cond_t runnable;
8981
OssHandleOssHandle8982 OssHandle()
8983 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8984 };
8985
RtApiOss()8986 RtApiOss :: RtApiOss()
8987 {
8988 // Nothing to do here.
8989 }
8990
~RtApiOss()8991 RtApiOss :: ~RtApiOss()
8992 {
8993 if ( stream_.state != STREAM_CLOSED ) closeStream();
8994 }
8995
getDeviceCount(void)8996 unsigned int RtApiOss :: getDeviceCount( void )
8997 {
8998 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8999 if ( mixerfd == -1 ) {
9000 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9001 error( RtAudioError::WARNING );
9002 return 0;
9003 }
9004
9005 oss_sysinfo sysinfo;
9006 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9007 close( mixerfd );
9008 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9009 error( RtAudioError::WARNING );
9010 return 0;
9011 }
9012
9013 close( mixerfd );
9014 return sysinfo.numaudios;
9015 }
9016
getDeviceInfo(unsigned int device)9017 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9018 {
9019 RtAudio::DeviceInfo info;
9020 info.probed = false;
9021
9022 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9023 if ( mixerfd == -1 ) {
9024 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9025 error( RtAudioError::WARNING );
9026 return info;
9027 }
9028
9029 oss_sysinfo sysinfo;
9030 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9031 if ( result == -1 ) {
9032 close( mixerfd );
9033 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9034 error( RtAudioError::WARNING );
9035 return info;
9036 }
9037
9038 unsigned nDevices = sysinfo.numaudios;
9039 if ( nDevices == 0 ) {
9040 close( mixerfd );
9041 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9042 error( RtAudioError::INVALID_USE );
9043 return info;
9044 }
9045
9046 if ( device >= nDevices ) {
9047 close( mixerfd );
9048 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9049 error( RtAudioError::INVALID_USE );
9050 return info;
9051 }
9052
9053 oss_audioinfo ainfo;
9054 ainfo.dev = device;
9055 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9056 close( mixerfd );
9057 if ( result == -1 ) {
9058 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9059 errorText_ = errorStream_.str();
9060 error( RtAudioError::WARNING );
9061 return info;
9062 }
9063
9064 // Probe channels
9065 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9066 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9067 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9068 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9070 }
9071
9072 // Probe data formats ... do for input
9073 unsigned long mask = ainfo.iformats;
9074 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9075 info.nativeFormats |= RTAUDIO_SINT16;
9076 if ( mask & AFMT_S8 )
9077 info.nativeFormats |= RTAUDIO_SINT8;
9078 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9079 info.nativeFormats |= RTAUDIO_SINT32;
9080 #ifdef AFMT_FLOAT
9081 if ( mask & AFMT_FLOAT )
9082 info.nativeFormats |= RTAUDIO_FLOAT32;
9083 #endif
9084 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9085 info.nativeFormats |= RTAUDIO_SINT24;
9086
9087 // Check that we have at least one supported format
9088 if ( info.nativeFormats == 0 ) {
9089 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9090 errorText_ = errorStream_.str();
9091 error( RtAudioError::WARNING );
9092 return info;
9093 }
9094
9095 // Probe the supported sample rates.
9096 info.sampleRates.clear();
9097 if ( ainfo.nrates ) {
9098 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9099 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9100 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9101 info.sampleRates.push_back( SAMPLE_RATES[k] );
9102
9103 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9104 info.preferredSampleRate = SAMPLE_RATES[k];
9105
9106 break;
9107 }
9108 }
9109 }
9110 }
9111 else {
9112 // Check min and max rate values;
9113 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9114 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9115 info.sampleRates.push_back( SAMPLE_RATES[k] );
9116
9117 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9118 info.preferredSampleRate = SAMPLE_RATES[k];
9119 }
9120 }
9121 }
9122
9123 if ( info.sampleRates.size() == 0 ) {
9124 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9125 errorText_ = errorStream_.str();
9126 error( RtAudioError::WARNING );
9127 }
9128 else {
9129 info.probed = true;
9130 info.name = ainfo.name;
9131 }
9132
9133 return info;
9134 }
9135
9136
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)9137 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9138 unsigned int firstChannel, unsigned int sampleRate,
9139 RtAudioFormat format, unsigned int *bufferSize,
9140 RtAudio::StreamOptions *options )
9141 {
9142 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9143 if ( mixerfd == -1 ) {
9144 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9145 return FAILURE;
9146 }
9147
9148 oss_sysinfo sysinfo;
9149 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9150 if ( result == -1 ) {
9151 close( mixerfd );
9152 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9153 return FAILURE;
9154 }
9155
9156 unsigned nDevices = sysinfo.numaudios;
9157 if ( nDevices == 0 ) {
9158 // This should not happen because a check is made before this function is called.
9159 close( mixerfd );
9160 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9161 return FAILURE;
9162 }
9163
9164 if ( device >= nDevices ) {
9165 // This should not happen because a check is made before this function is called.
9166 close( mixerfd );
9167 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9168 return FAILURE;
9169 }
9170
9171 oss_audioinfo ainfo;
9172 ainfo.dev = device;
9173 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9174 close( mixerfd );
9175 if ( result == -1 ) {
9176 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9177 errorText_ = errorStream_.str();
9178 return FAILURE;
9179 }
9180
9181 // Check if device supports input or output
9182 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9183 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9184 if ( mode == OUTPUT )
9185 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9186 else
9187 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9188 errorText_ = errorStream_.str();
9189 return FAILURE;
9190 }
9191
9192 int flags = 0;
9193 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9194 if ( mode == OUTPUT )
9195 flags |= O_WRONLY;
9196 else { // mode == INPUT
9197 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9198 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9199 close( handle->id[0] );
9200 handle->id[0] = 0;
9201 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9202 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9203 errorText_ = errorStream_.str();
9204 return FAILURE;
9205 }
9206 // Check that the number previously set channels is the same.
9207 if ( stream_.nUserChannels[0] != channels ) {
9208 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9209 errorText_ = errorStream_.str();
9210 return FAILURE;
9211 }
9212 flags |= O_RDWR;
9213 }
9214 else
9215 flags |= O_RDONLY;
9216 }
9217
9218 // Set exclusive access if specified.
9219 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9220
9221 // Try to open the device.
9222 int fd;
9223 fd = open( ainfo.devnode, flags, 0 );
9224 if ( fd == -1 ) {
9225 if ( errno == EBUSY )
9226 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9227 else
9228 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9229 errorText_ = errorStream_.str();
9230 return FAILURE;
9231 }
9232
9233 // For duplex operation, specifically set this mode (this doesn't seem to work).
9234 /*
9235 if ( flags | O_RDWR ) {
9236 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9237 if ( result == -1) {
9238 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9239 errorText_ = errorStream_.str();
9240 return FAILURE;
9241 }
9242 }
9243 */
9244
9245 // Check the device channel support.
9246 stream_.nUserChannels[mode] = channels;
9247 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9248 close( fd );
9249 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9250 errorText_ = errorStream_.str();
9251 return FAILURE;
9252 }
9253
9254 // Set the number of channels.
9255 int deviceChannels = channels + firstChannel;
9256 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9257 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9258 close( fd );
9259 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9260 errorText_ = errorStream_.str();
9261 return FAILURE;
9262 }
9263 stream_.nDeviceChannels[mode] = deviceChannels;
9264
9265 // Get the data format mask
9266 int mask;
9267 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9268 if ( result == -1 ) {
9269 close( fd );
9270 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9271 errorText_ = errorStream_.str();
9272 return FAILURE;
9273 }
9274
9275 // Determine how to set the device format.
9276 stream_.userFormat = format;
9277 int deviceFormat = -1;
9278 stream_.doByteSwap[mode] = false;
9279 if ( format == RTAUDIO_SINT8 ) {
9280 if ( mask & AFMT_S8 ) {
9281 deviceFormat = AFMT_S8;
9282 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9283 }
9284 }
9285 else if ( format == RTAUDIO_SINT16 ) {
9286 if ( mask & AFMT_S16_NE ) {
9287 deviceFormat = AFMT_S16_NE;
9288 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9289 }
9290 else if ( mask & AFMT_S16_OE ) {
9291 deviceFormat = AFMT_S16_OE;
9292 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9293 stream_.doByteSwap[mode] = true;
9294 }
9295 }
9296 else if ( format == RTAUDIO_SINT24 ) {
9297 if ( mask & AFMT_S24_NE ) {
9298 deviceFormat = AFMT_S24_NE;
9299 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9300 }
9301 else if ( mask & AFMT_S24_OE ) {
9302 deviceFormat = AFMT_S24_OE;
9303 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9304 stream_.doByteSwap[mode] = true;
9305 }
9306 }
9307 else if ( format == RTAUDIO_SINT32 ) {
9308 if ( mask & AFMT_S32_NE ) {
9309 deviceFormat = AFMT_S32_NE;
9310 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9311 }
9312 else if ( mask & AFMT_S32_OE ) {
9313 deviceFormat = AFMT_S32_OE;
9314 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9315 stream_.doByteSwap[mode] = true;
9316 }
9317 }
9318
9319 if ( deviceFormat == -1 ) {
9320 // The user requested format is not natively supported by the device.
9321 if ( mask & AFMT_S16_NE ) {
9322 deviceFormat = AFMT_S16_NE;
9323 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9324 }
9325 else if ( mask & AFMT_S32_NE ) {
9326 deviceFormat = AFMT_S32_NE;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9328 }
9329 else if ( mask & AFMT_S24_NE ) {
9330 deviceFormat = AFMT_S24_NE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9332 }
9333 else if ( mask & AFMT_S16_OE ) {
9334 deviceFormat = AFMT_S16_OE;
9335 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9336 stream_.doByteSwap[mode] = true;
9337 }
9338 else if ( mask & AFMT_S32_OE ) {
9339 deviceFormat = AFMT_S32_OE;
9340 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9341 stream_.doByteSwap[mode] = true;
9342 }
9343 else if ( mask & AFMT_S24_OE ) {
9344 deviceFormat = AFMT_S24_OE;
9345 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9346 stream_.doByteSwap[mode] = true;
9347 }
9348 else if ( mask & AFMT_S8) {
9349 deviceFormat = AFMT_S8;
9350 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9351 }
9352 }
9353
9354 if ( stream_.deviceFormat[mode] == 0 ) {
9355 // This really shouldn't happen ...
9356 close( fd );
9357 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9358 errorText_ = errorStream_.str();
9359 return FAILURE;
9360 }
9361
9362 // Set the data format.
9363 int temp = deviceFormat;
9364 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9365 if ( result == -1 || deviceFormat != temp ) {
9366 close( fd );
9367 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9368 errorText_ = errorStream_.str();
9369 return FAILURE;
9370 }
9371
9372 // Attempt to set the buffer size. According to OSS, the minimum
9373 // number of buffers is two. The supposed minimum buffer size is 16
9374 // bytes, so that will be our lower bound. The argument to this
9375 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9376 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9377 // We'll check the actual value used near the end of the setup
9378 // procedure.
9379 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9380 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9381 int buffers = 0;
9382 if ( options ) buffers = options->numberOfBuffers;
9383 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9384 if ( buffers < 2 ) buffers = 3;
9385 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9386 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9387 if ( result == -1 ) {
9388 close( fd );
9389 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9390 errorText_ = errorStream_.str();
9391 return FAILURE;
9392 }
9393 stream_.nBuffers = buffers;
9394
9395 // Save buffer size (in sample frames).
9396 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9397 stream_.bufferSize = *bufferSize;
9398
9399 // Set the sample rate.
9400 int srate = sampleRate;
9401 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9402 if ( result == -1 ) {
9403 close( fd );
9404 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9405 errorText_ = errorStream_.str();
9406 return FAILURE;
9407 }
9408
9409 // Verify the sample rate setup worked.
9410 if ( abs( srate - (int)sampleRate ) > 100 ) {
9411 close( fd );
9412 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9413 errorText_ = errorStream_.str();
9414 return FAILURE;
9415 }
9416 stream_.sampleRate = sampleRate;
9417
9418 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9419 // We're doing duplex setup here.
9420 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9421 stream_.nDeviceChannels[0] = deviceChannels;
9422 }
9423
9424 // Set interleaving parameters.
9425 stream_.userInterleaved = true;
9426 stream_.deviceInterleaved[mode] = true;
9427 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9428 stream_.userInterleaved = false;
9429
9430 // Set flags for buffer conversion
9431 stream_.doConvertBuffer[mode] = false;
9432 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9433 stream_.doConvertBuffer[mode] = true;
9434 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9435 stream_.doConvertBuffer[mode] = true;
9436 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9437 stream_.nUserChannels[mode] > 1 )
9438 stream_.doConvertBuffer[mode] = true;
9439
9440 // Allocate the stream handles if necessary and then save.
9441 if ( stream_.apiHandle == 0 ) {
9442 try {
9443 handle = new OssHandle;
9444 }
9445 catch ( std::bad_alloc& ) {
9446 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9447 goto error;
9448 }
9449
9450 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9451 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9452 goto error;
9453 }
9454
9455 stream_.apiHandle = (void *) handle;
9456 }
9457 else {
9458 handle = (OssHandle *) stream_.apiHandle;
9459 }
9460 handle->id[mode] = fd;
9461
9462 // Allocate necessary internal buffers.
9463 unsigned long bufferBytes;
9464 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9465 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9466 if ( stream_.userBuffer[mode] == NULL ) {
9467 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9468 goto error;
9469 }
9470
9471 if ( stream_.doConvertBuffer[mode] ) {
9472
9473 bool makeBuffer = true;
9474 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9475 if ( mode == INPUT ) {
9476 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9477 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9478 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9479 }
9480 }
9481
9482 if ( makeBuffer ) {
9483 bufferBytes *= *bufferSize;
9484 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9485 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9486 if ( stream_.deviceBuffer == NULL ) {
9487 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9488 goto error;
9489 }
9490 }
9491 }
9492
9493 stream_.device[mode] = device;
9494 stream_.state = STREAM_STOPPED;
9495
9496 // Setup the buffer conversion information structure.
9497 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9498
9499 // Setup thread if necessary.
9500 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9501 // We had already set up an output stream.
9502 stream_.mode = DUPLEX;
9503 if ( stream_.device[0] == device ) handle->id[0] = fd;
9504 }
9505 else {
9506 stream_.mode = mode;
9507
9508 // Setup callback thread.
9509 stream_.callbackInfo.object = (void *) this;
9510
9511 // Set the thread attributes for joinable and realtime scheduling
9512 // priority. The higher priority will only take affect if the
9513 // program is run as root or suid.
9514 pthread_attr_t attr;
9515 pthread_attr_init( &attr );
9516 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9517 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9518 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9519 stream_.callbackInfo.doRealtime = true;
9520 struct sched_param param;
9521 int priority = options->priority;
9522 int min = sched_get_priority_min( SCHED_RR );
9523 int max = sched_get_priority_max( SCHED_RR );
9524 if ( priority < min ) priority = min;
9525 else if ( priority > max ) priority = max;
9526 param.sched_priority = priority;
9527
9528 // Set the policy BEFORE the priority. Otherwise it fails.
9529 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9530 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9531 // This is definitely required. Otherwise it fails.
9532 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9533 pthread_attr_setschedparam(&attr, ¶m);
9534 }
9535 else
9536 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9537 #else
9538 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9539 #endif
9540
9541 stream_.callbackInfo.isRunning = true;
9542 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9543 pthread_attr_destroy( &attr );
9544 if ( result ) {
9545 // Failed. Try instead with default attributes.
9546 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9547 if ( result ) {
9548 stream_.callbackInfo.isRunning = false;
9549 errorText_ = "RtApiOss::error creating callback thread!";
9550 goto error;
9551 }
9552 }
9553 }
9554
9555 return SUCCESS;
9556
9557 error:
9558 if ( handle ) {
9559 pthread_cond_destroy( &handle->runnable );
9560 if ( handle->id[0] ) close( handle->id[0] );
9561 if ( handle->id[1] ) close( handle->id[1] );
9562 delete handle;
9563 stream_.apiHandle = 0;
9564 }
9565
9566 for ( int i=0; i<2; i++ ) {
9567 if ( stream_.userBuffer[i] ) {
9568 free( stream_.userBuffer[i] );
9569 stream_.userBuffer[i] = 0;
9570 }
9571 }
9572
9573 if ( stream_.deviceBuffer ) {
9574 free( stream_.deviceBuffer );
9575 stream_.deviceBuffer = 0;
9576 }
9577
9578 stream_.state = STREAM_CLOSED;
9579 return FAILURE;
9580 }
9581
closeStream()9582 void RtApiOss :: closeStream()
9583 {
9584 if ( stream_.state == STREAM_CLOSED ) {
9585 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9586 error( RtAudioError::WARNING );
9587 return;
9588 }
9589
9590 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9591 stream_.callbackInfo.isRunning = false;
9592 MUTEX_LOCK( &stream_.mutex );
9593 if ( stream_.state == STREAM_STOPPED )
9594 pthread_cond_signal( &handle->runnable );
9595 MUTEX_UNLOCK( &stream_.mutex );
9596 pthread_join( stream_.callbackInfo.thread, NULL );
9597
9598 if ( stream_.state == STREAM_RUNNING ) {
9599 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9600 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9601 else
9602 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9603 stream_.state = STREAM_STOPPED;
9604 }
9605
9606 if ( handle ) {
9607 pthread_cond_destroy( &handle->runnable );
9608 if ( handle->id[0] ) close( handle->id[0] );
9609 if ( handle->id[1] ) close( handle->id[1] );
9610 delete handle;
9611 stream_.apiHandle = 0;
9612 }
9613
9614 for ( int i=0; i<2; i++ ) {
9615 if ( stream_.userBuffer[i] ) {
9616 free( stream_.userBuffer[i] );
9617 stream_.userBuffer[i] = 0;
9618 }
9619 }
9620
9621 if ( stream_.deviceBuffer ) {
9622 free( stream_.deviceBuffer );
9623 stream_.deviceBuffer = 0;
9624 }
9625
9626 stream_.mode = UNINITIALIZED;
9627 stream_.state = STREAM_CLOSED;
9628 }
9629
startStream()9630 void RtApiOss :: startStream()
9631 {
9632 verifyStream();
9633 if ( stream_.state == STREAM_RUNNING ) {
9634 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9635 error( RtAudioError::WARNING );
9636 return;
9637 }
9638
9639 MUTEX_LOCK( &stream_.mutex );
9640
9641 #if defined( HAVE_GETTIMEOFDAY )
9642 gettimeofday( &stream_.lastTickTimestamp, NULL );
9643 #endif
9644
9645 stream_.state = STREAM_RUNNING;
9646
9647 // No need to do anything else here ... OSS automatically starts
9648 // when fed samples.
9649
9650 MUTEX_UNLOCK( &stream_.mutex );
9651
9652 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9653 pthread_cond_signal( &handle->runnable );
9654 }
9655
stopStream()9656 void RtApiOss :: stopStream()
9657 {
9658 verifyStream();
9659 if ( stream_.state == STREAM_STOPPED ) {
9660 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9661 error( RtAudioError::WARNING );
9662 return;
9663 }
9664
9665 MUTEX_LOCK( &stream_.mutex );
9666
9667 // The state might change while waiting on a mutex.
9668 if ( stream_.state == STREAM_STOPPED ) {
9669 MUTEX_UNLOCK( &stream_.mutex );
9670 return;
9671 }
9672
9673 int result = 0;
9674 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9675 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9676
9677 // Flush the output with zeros a few times.
9678 char *buffer;
9679 int samples;
9680 RtAudioFormat format;
9681
9682 if ( stream_.doConvertBuffer[0] ) {
9683 buffer = stream_.deviceBuffer;
9684 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9685 format = stream_.deviceFormat[0];
9686 }
9687 else {
9688 buffer = stream_.userBuffer[0];
9689 samples = stream_.bufferSize * stream_.nUserChannels[0];
9690 format = stream_.userFormat;
9691 }
9692
9693 memset( buffer, 0, samples * formatBytes(format) );
9694 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9695 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9696 if ( result == -1 ) {
9697 errorText_ = "RtApiOss::stopStream: audio write error.";
9698 error( RtAudioError::WARNING );
9699 }
9700 }
9701
9702 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9703 if ( result == -1 ) {
9704 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9705 errorText_ = errorStream_.str();
9706 goto unlock;
9707 }
9708 handle->triggered = false;
9709 }
9710
9711 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9712 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9713 if ( result == -1 ) {
9714 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9715 errorText_ = errorStream_.str();
9716 goto unlock;
9717 }
9718 }
9719
9720 unlock:
9721 stream_.state = STREAM_STOPPED;
9722 MUTEX_UNLOCK( &stream_.mutex );
9723
9724 if ( result != -1 ) return;
9725 error( RtAudioError::SYSTEM_ERROR );
9726 }
9727
abortStream()9728 void RtApiOss :: abortStream()
9729 {
9730 verifyStream();
9731 if ( stream_.state == STREAM_STOPPED ) {
9732 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9733 error( RtAudioError::WARNING );
9734 return;
9735 }
9736
9737 MUTEX_LOCK( &stream_.mutex );
9738
9739 // The state might change while waiting on a mutex.
9740 if ( stream_.state == STREAM_STOPPED ) {
9741 MUTEX_UNLOCK( &stream_.mutex );
9742 return;
9743 }
9744
9745 int result = 0;
9746 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9747 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9748 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9749 if ( result == -1 ) {
9750 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9751 errorText_ = errorStream_.str();
9752 goto unlock;
9753 }
9754 handle->triggered = false;
9755 }
9756
9757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9758 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9759 if ( result == -1 ) {
9760 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9761 errorText_ = errorStream_.str();
9762 goto unlock;
9763 }
9764 }
9765
9766 unlock:
9767 stream_.state = STREAM_STOPPED;
9768 MUTEX_UNLOCK( &stream_.mutex );
9769
9770 if ( result != -1 ) return;
9771 error( RtAudioError::SYSTEM_ERROR );
9772 }
9773
callbackEvent()9774 void RtApiOss :: callbackEvent()
9775 {
9776 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9777 if ( stream_.state == STREAM_STOPPED ) {
9778 MUTEX_LOCK( &stream_.mutex );
9779 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9780 if ( stream_.state != STREAM_RUNNING ) {
9781 MUTEX_UNLOCK( &stream_.mutex );
9782 return;
9783 }
9784 MUTEX_UNLOCK( &stream_.mutex );
9785 }
9786
9787 if ( stream_.state == STREAM_CLOSED ) {
9788 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9789 error( RtAudioError::WARNING );
9790 return;
9791 }
9792
9793 // Invoke user callback to get fresh output data.
9794 int doStopStream = 0;
9795 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9796 double streamTime = getStreamTime();
9797 RtAudioStreamStatus status = 0;
9798 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9799 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9800 handle->xrun[0] = false;
9801 }
9802 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9803 status |= RTAUDIO_INPUT_OVERFLOW;
9804 handle->xrun[1] = false;
9805 }
9806 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9807 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9808 if ( doStopStream == 2 ) {
9809 this->abortStream();
9810 return;
9811 }
9812
9813 MUTEX_LOCK( &stream_.mutex );
9814
9815 // The state might change while waiting on a mutex.
9816 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9817
9818 int result;
9819 char *buffer;
9820 int samples;
9821 RtAudioFormat format;
9822
9823 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9824
9825 // Setup parameters and do buffer conversion if necessary.
9826 if ( stream_.doConvertBuffer[0] ) {
9827 buffer = stream_.deviceBuffer;
9828 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9829 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9830 format = stream_.deviceFormat[0];
9831 }
9832 else {
9833 buffer = stream_.userBuffer[0];
9834 samples = stream_.bufferSize * stream_.nUserChannels[0];
9835 format = stream_.userFormat;
9836 }
9837
9838 // Do byte swapping if necessary.
9839 if ( stream_.doByteSwap[0] )
9840 byteSwapBuffer( buffer, samples, format );
9841
9842 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9843 int trig = 0;
9844 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9845 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9846 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9847 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9848 handle->triggered = true;
9849 }
9850 else
9851 // Write samples to device.
9852 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9853
9854 if ( result == -1 ) {
9855 // We'll assume this is an underrun, though there isn't a
9856 // specific means for determining that.
9857 handle->xrun[0] = true;
9858 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9859 error( RtAudioError::WARNING );
9860 // Continue on to input section.
9861 }
9862 }
9863
9864 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9865
9866 // Setup parameters.
9867 if ( stream_.doConvertBuffer[1] ) {
9868 buffer = stream_.deviceBuffer;
9869 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9870 format = stream_.deviceFormat[1];
9871 }
9872 else {
9873 buffer = stream_.userBuffer[1];
9874 samples = stream_.bufferSize * stream_.nUserChannels[1];
9875 format = stream_.userFormat;
9876 }
9877
9878 // Read samples from device.
9879 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9880
9881 if ( result == -1 ) {
9882 // We'll assume this is an overrun, though there isn't a
9883 // specific means for determining that.
9884 handle->xrun[1] = true;
9885 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9886 error( RtAudioError::WARNING );
9887 goto unlock;
9888 }
9889
9890 // Do byte swapping if necessary.
9891 if ( stream_.doByteSwap[1] )
9892 byteSwapBuffer( buffer, samples, format );
9893
9894 // Do buffer conversion if necessary.
9895 if ( stream_.doConvertBuffer[1] )
9896 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9897 }
9898
9899 unlock:
9900 MUTEX_UNLOCK( &stream_.mutex );
9901
9902 RtApi::tickStreamTime();
9903 if ( doStopStream == 1 ) this->stopStream();
9904 }
9905
ossCallbackHandler(void * ptr)9906 static void *ossCallbackHandler( void *ptr )
9907 {
9908 CallbackInfo *info = (CallbackInfo *) ptr;
9909 RtApiOss *object = (RtApiOss *) info->object;
9910 bool *isRunning = &info->isRunning;
9911
9912 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9913 if (info->doRealtime) {
9914 std::cerr << "RtAudio oss: " <<
9915 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9916 "running realtime scheduling" << std::endl;
9917 }
9918 #endif
9919
9920 while ( *isRunning == true ) {
9921 pthread_testcancel();
9922 object->callbackEvent();
9923 }
9924
9925 pthread_exit( NULL );
9926 }
9927
9928 //******************** End of __LINUX_OSS__ *********************//
9929 #endif
9930
9931
9932 // *************************************************** //
9933 //
9934 // Protected common (OS-independent) RtAudio methods.
9935 //
9936 // *************************************************** //
9937
9938 // This method can be modified to control the behavior of error
9939 // message printing.
error(RtAudioError::Type type)9940 void RtApi :: error( RtAudioError::Type type )
9941 {
9942 errorStream_.str(""); // clear the ostringstream
9943
9944 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9945 if ( errorCallback ) {
9946 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9947
9948 if ( firstErrorOccurred_ )
9949 return;
9950
9951 firstErrorOccurred_ = true;
9952 const std::string errorMessage = errorText_;
9953
9954 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9955 stream_.callbackInfo.isRunning = false; // exit from the thread
9956 abortStream();
9957 }
9958
9959 errorCallback( type, errorMessage );
9960 firstErrorOccurred_ = false;
9961 return;
9962 }
9963
9964 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9965 std::cerr << '\n' << errorText_ << "\n\n";
9966 else if ( type != RtAudioError::WARNING )
9967 throw( RtAudioError( errorText_, type ) );
9968 }
9969
verifyStream()9970 void RtApi :: verifyStream()
9971 {
9972 if ( stream_.state == STREAM_CLOSED ) {
9973 errorText_ = "RtApi:: a stream is not open!";
9974 error( RtAudioError::INVALID_USE );
9975 }
9976 }
9977
clearStreamInfo()9978 void RtApi :: clearStreamInfo()
9979 {
9980 stream_.mode = UNINITIALIZED;
9981 stream_.state = STREAM_CLOSED;
9982 stream_.sampleRate = 0;
9983 stream_.bufferSize = 0;
9984 stream_.nBuffers = 0;
9985 stream_.userFormat = 0;
9986 stream_.userInterleaved = true;
9987 stream_.streamTime = 0.0;
9988 stream_.apiHandle = 0;
9989 stream_.deviceBuffer = 0;
9990 stream_.callbackInfo.callback = 0;
9991 stream_.callbackInfo.userData = 0;
9992 stream_.callbackInfo.isRunning = false;
9993 stream_.callbackInfo.errorCallback = 0;
9994 for ( int i=0; i<2; i++ ) {
9995 stream_.device[i] = 11111;
9996 stream_.doConvertBuffer[i] = false;
9997 stream_.deviceInterleaved[i] = true;
9998 stream_.doByteSwap[i] = false;
9999 stream_.nUserChannels[i] = 0;
10000 stream_.nDeviceChannels[i] = 0;
10001 stream_.channelOffset[i] = 0;
10002 stream_.deviceFormat[i] = 0;
10003 stream_.latency[i] = 0;
10004 stream_.userBuffer[i] = 0;
10005 stream_.convertInfo[i].channels = 0;
10006 stream_.convertInfo[i].inJump = 0;
10007 stream_.convertInfo[i].outJump = 0;
10008 stream_.convertInfo[i].inFormat = 0;
10009 stream_.convertInfo[i].outFormat = 0;
10010 stream_.convertInfo[i].inOffset.clear();
10011 stream_.convertInfo[i].outOffset.clear();
10012 }
10013 }
10014
formatBytes(RtAudioFormat format)10015 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10016 {
10017 if ( format == RTAUDIO_SINT16 )
10018 return 2;
10019 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10020 return 4;
10021 else if ( format == RTAUDIO_FLOAT64 )
10022 return 8;
10023 else if ( format == RTAUDIO_SINT24 )
10024 return 3;
10025 else if ( format == RTAUDIO_SINT8 )
10026 return 1;
10027
10028 errorText_ = "RtApi::formatBytes: undefined format.";
10029 error( RtAudioError::WARNING );
10030
10031 return 0;
10032 }
10033
setConvertInfo(StreamMode mode,unsigned int firstChannel)10034 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10035 {
10036 if ( mode == INPUT ) { // convert device to user buffer
10037 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10038 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10039 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10040 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10041 }
10042 else { // convert user to device buffer
10043 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10044 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10045 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10046 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10047 }
10048
10049 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10050 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10051 else
10052 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10053
10054 // Set up the interleave/deinterleave offsets.
10055 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10056 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10057 ( mode == INPUT && stream_.userInterleaved ) ) {
10058 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10059 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10060 stream_.convertInfo[mode].outOffset.push_back( k );
10061 stream_.convertInfo[mode].inJump = 1;
10062 }
10063 }
10064 else {
10065 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10066 stream_.convertInfo[mode].inOffset.push_back( k );
10067 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10068 stream_.convertInfo[mode].outJump = 1;
10069 }
10070 }
10071 }
10072 else { // no (de)interleaving
10073 if ( stream_.userInterleaved ) {
10074 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10075 stream_.convertInfo[mode].inOffset.push_back( k );
10076 stream_.convertInfo[mode].outOffset.push_back( k );
10077 }
10078 }
10079 else {
10080 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10081 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10082 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10083 stream_.convertInfo[mode].inJump = 1;
10084 stream_.convertInfo[mode].outJump = 1;
10085 }
10086 }
10087 }
10088
10089 // Add channel offset.
10090 if ( firstChannel > 0 ) {
10091 if ( stream_.deviceInterleaved[mode] ) {
10092 if ( mode == OUTPUT ) {
10093 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10094 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10095 }
10096 else {
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10098 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10099 }
10100 }
10101 else {
10102 if ( mode == OUTPUT ) {
10103 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10104 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10105 }
10106 else {
10107 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10108 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10109 }
10110 }
10111 }
10112 }
10113
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)10114 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10115 {
10116 // This function does format conversion, input/output channel compensation, and
10117 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10118 // the lower three bytes of a 32-bit integer.
10119
10120 // Clear our device buffer when in/out duplex device channels are different
10121 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10122 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10123 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10124
10125 int j;
10126 if (info.outFormat == RTAUDIO_FLOAT64) {
10127 Float64 scale;
10128 Float64 *out = (Float64 *)outBuffer;
10129
10130 if (info.inFormat == RTAUDIO_SINT8) {
10131 signed char *in = (signed char *)inBuffer;
10132 scale = 1.0 / 127.5;
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10134 for (j=0; j<info.channels; j++) {
10135 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10136 out[info.outOffset[j]] += 0.5;
10137 out[info.outOffset[j]] *= scale;
10138 }
10139 in += info.inJump;
10140 out += info.outJump;
10141 }
10142 }
10143 else if (info.inFormat == RTAUDIO_SINT16) {
10144 Int16 *in = (Int16 *)inBuffer;
10145 scale = 1.0 / 32767.5;
10146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10147 for (j=0; j<info.channels; j++) {
10148 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10149 out[info.outOffset[j]] += 0.5;
10150 out[info.outOffset[j]] *= scale;
10151 }
10152 in += info.inJump;
10153 out += info.outJump;
10154 }
10155 }
10156 else if (info.inFormat == RTAUDIO_SINT24) {
10157 Int24 *in = (Int24 *)inBuffer;
10158 scale = 1.0 / 8388607.5;
10159 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10160 for (j=0; j<info.channels; j++) {
10161 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10162 out[info.outOffset[j]] += 0.5;
10163 out[info.outOffset[j]] *= scale;
10164 }
10165 in += info.inJump;
10166 out += info.outJump;
10167 }
10168 }
10169 else if (info.inFormat == RTAUDIO_SINT32) {
10170 Int32 *in = (Int32 *)inBuffer;
10171 scale = 1.0 / 2147483647.5;
10172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10173 for (j=0; j<info.channels; j++) {
10174 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10175 out[info.outOffset[j]] += 0.5;
10176 out[info.outOffset[j]] *= scale;
10177 }
10178 in += info.inJump;
10179 out += info.outJump;
10180 }
10181 }
10182 else if (info.inFormat == RTAUDIO_FLOAT32) {
10183 Float32 *in = (Float32 *)inBuffer;
10184 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10185 for (j=0; j<info.channels; j++) {
10186 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10187 }
10188 in += info.inJump;
10189 out += info.outJump;
10190 }
10191 }
10192 else if (info.inFormat == RTAUDIO_FLOAT64) {
10193 // Channel compensation and/or (de)interleaving only.
10194 Float64 *in = (Float64 *)inBuffer;
10195 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10196 for (j=0; j<info.channels; j++) {
10197 out[info.outOffset[j]] = in[info.inOffset[j]];
10198 }
10199 in += info.inJump;
10200 out += info.outJump;
10201 }
10202 }
10203 }
10204 else if (info.outFormat == RTAUDIO_FLOAT32) {
10205 Float32 scale;
10206 Float32 *out = (Float32 *)outBuffer;
10207
10208 if (info.inFormat == RTAUDIO_SINT8) {
10209 signed char *in = (signed char *)inBuffer;
10210 scale = (Float32) ( 1.0 / 127.5 );
10211 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10212 for (j=0; j<info.channels; j++) {
10213 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10214 out[info.outOffset[j]] += 0.5;
10215 out[info.outOffset[j]] *= scale;
10216 }
10217 in += info.inJump;
10218 out += info.outJump;
10219 }
10220 }
10221 else if (info.inFormat == RTAUDIO_SINT16) {
10222 Int16 *in = (Int16 *)inBuffer;
10223 scale = (Float32) ( 1.0 / 32767.5 );
10224 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10225 for (j=0; j<info.channels; j++) {
10226 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10227 out[info.outOffset[j]] += 0.5;
10228 out[info.outOffset[j]] *= scale;
10229 }
10230 in += info.inJump;
10231 out += info.outJump;
10232 }
10233 }
10234 else if (info.inFormat == RTAUDIO_SINT24) {
10235 Int24 *in = (Int24 *)inBuffer;
10236 scale = (Float32) ( 1.0 / 8388607.5 );
10237 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10238 for (j=0; j<info.channels; j++) {
10239 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10240 out[info.outOffset[j]] += 0.5;
10241 out[info.outOffset[j]] *= scale;
10242 }
10243 in += info.inJump;
10244 out += info.outJump;
10245 }
10246 }
10247 else if (info.inFormat == RTAUDIO_SINT32) {
10248 Int32 *in = (Int32 *)inBuffer;
10249 scale = (Float32) ( 1.0 / 2147483647.5 );
10250 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10251 for (j=0; j<info.channels; j++) {
10252 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10253 out[info.outOffset[j]] += 0.5;
10254 out[info.outOffset[j]] *= scale;
10255 }
10256 in += info.inJump;
10257 out += info.outJump;
10258 }
10259 }
10260 else if (info.inFormat == RTAUDIO_FLOAT32) {
10261 // Channel compensation and/or (de)interleaving only.
10262 Float32 *in = (Float32 *)inBuffer;
10263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10264 for (j=0; j<info.channels; j++) {
10265 out[info.outOffset[j]] = in[info.inOffset[j]];
10266 }
10267 in += info.inJump;
10268 out += info.outJump;
10269 }
10270 }
10271 else if (info.inFormat == RTAUDIO_FLOAT64) {
10272 Float64 *in = (Float64 *)inBuffer;
10273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10274 for (j=0; j<info.channels; j++) {
10275 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10276 }
10277 in += info.inJump;
10278 out += info.outJump;
10279 }
10280 }
10281 }
10282 else if (info.outFormat == RTAUDIO_SINT32) {
10283 Int32 *out = (Int32 *)outBuffer;
10284 if (info.inFormat == RTAUDIO_SINT8) {
10285 signed char *in = (signed char *)inBuffer;
10286 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10287 for (j=0; j<info.channels; j++) {
10288 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10289 out[info.outOffset[j]] <<= 24;
10290 }
10291 in += info.inJump;
10292 out += info.outJump;
10293 }
10294 }
10295 else if (info.inFormat == RTAUDIO_SINT16) {
10296 Int16 *in = (Int16 *)inBuffer;
10297 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10298 for (j=0; j<info.channels; j++) {
10299 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10300 out[info.outOffset[j]] <<= 16;
10301 }
10302 in += info.inJump;
10303 out += info.outJump;
10304 }
10305 }
10306 else if (info.inFormat == RTAUDIO_SINT24) {
10307 Int24 *in = (Int24 *)inBuffer;
10308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10309 for (j=0; j<info.channels; j++) {
10310 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10311 out[info.outOffset[j]] <<= 8;
10312 }
10313 in += info.inJump;
10314 out += info.outJump;
10315 }
10316 }
10317 else if (info.inFormat == RTAUDIO_SINT32) {
10318 // Channel compensation and/or (de)interleaving only.
10319 Int32 *in = (Int32 *)inBuffer;
10320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10321 for (j=0; j<info.channels; j++) {
10322 out[info.outOffset[j]] = in[info.inOffset[j]];
10323 }
10324 in += info.inJump;
10325 out += info.outJump;
10326 }
10327 }
10328 else if (info.inFormat == RTAUDIO_FLOAT32) {
10329 Float32 *in = (Float32 *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10333 }
10334 in += info.inJump;
10335 out += info.outJump;
10336 }
10337 }
10338 else if (info.inFormat == RTAUDIO_FLOAT64) {
10339 Float64 *in = (Float64 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10343 }
10344 in += info.inJump;
10345 out += info.outJump;
10346 }
10347 }
10348 }
10349 else if (info.outFormat == RTAUDIO_SINT24) {
10350 Int24 *out = (Int24 *)outBuffer;
10351 if (info.inFormat == RTAUDIO_SINT8) {
10352 signed char *in = (signed char *)inBuffer;
10353 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10354 for (j=0; j<info.channels; j++) {
10355 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10356 //out[info.outOffset[j]] <<= 16;
10357 }
10358 in += info.inJump;
10359 out += info.outJump;
10360 }
10361 }
10362 else if (info.inFormat == RTAUDIO_SINT16) {
10363 Int16 *in = (Int16 *)inBuffer;
10364 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10365 for (j=0; j<info.channels; j++) {
10366 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10367 //out[info.outOffset[j]] <<= 8;
10368 }
10369 in += info.inJump;
10370 out += info.outJump;
10371 }
10372 }
10373 else if (info.inFormat == RTAUDIO_SINT24) {
10374 // Channel compensation and/or (de)interleaving only.
10375 Int24 *in = (Int24 *)inBuffer;
10376 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10377 for (j=0; j<info.channels; j++) {
10378 out[info.outOffset[j]] = in[info.inOffset[j]];
10379 }
10380 in += info.inJump;
10381 out += info.outJump;
10382 }
10383 }
10384 else if (info.inFormat == RTAUDIO_SINT32) {
10385 Int32 *in = (Int32 *)inBuffer;
10386 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10387 for (j=0; j<info.channels; j++) {
10388 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10389 //out[info.outOffset[j]] >>= 8;
10390 }
10391 in += info.inJump;
10392 out += info.outJump;
10393 }
10394 }
10395 else if (info.inFormat == RTAUDIO_FLOAT32) {
10396 Float32 *in = (Float32 *)inBuffer;
10397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10398 for (j=0; j<info.channels; j++) {
10399 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10400 }
10401 in += info.inJump;
10402 out += info.outJump;
10403 }
10404 }
10405 else if (info.inFormat == RTAUDIO_FLOAT64) {
10406 Float64 *in = (Float64 *)inBuffer;
10407 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10408 for (j=0; j<info.channels; j++) {
10409 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10410 }
10411 in += info.inJump;
10412 out += info.outJump;
10413 }
10414 }
10415 }
10416 else if (info.outFormat == RTAUDIO_SINT16) {
10417 Int16 *out = (Int16 *)outBuffer;
10418 if (info.inFormat == RTAUDIO_SINT8) {
10419 signed char *in = (signed char *)inBuffer;
10420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10421 for (j=0; j<info.channels; j++) {
10422 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10423 out[info.outOffset[j]] <<= 8;
10424 }
10425 in += info.inJump;
10426 out += info.outJump;
10427 }
10428 }
10429 else if (info.inFormat == RTAUDIO_SINT16) {
10430 // Channel compensation and/or (de)interleaving only.
10431 Int16 *in = (Int16 *)inBuffer;
10432 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10433 for (j=0; j<info.channels; j++) {
10434 out[info.outOffset[j]] = in[info.inOffset[j]];
10435 }
10436 in += info.inJump;
10437 out += info.outJump;
10438 }
10439 }
10440 else if (info.inFormat == RTAUDIO_SINT24) {
10441 Int24 *in = (Int24 *)inBuffer;
10442 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10443 for (j=0; j<info.channels; j++) {
10444 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10445 }
10446 in += info.inJump;
10447 out += info.outJump;
10448 }
10449 }
10450 else if (info.inFormat == RTAUDIO_SINT32) {
10451 Int32 *in = (Int32 *)inBuffer;
10452 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10453 for (j=0; j<info.channels; j++) {
10454 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10455 }
10456 in += info.inJump;
10457 out += info.outJump;
10458 }
10459 }
10460 else if (info.inFormat == RTAUDIO_FLOAT32) {
10461 Float32 *in = (Float32 *)inBuffer;
10462 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10463 for (j=0; j<info.channels; j++) {
10464 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10465 }
10466 in += info.inJump;
10467 out += info.outJump;
10468 }
10469 }
10470 else if (info.inFormat == RTAUDIO_FLOAT64) {
10471 Float64 *in = (Float64 *)inBuffer;
10472 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10473 for (j=0; j<info.channels; j++) {
10474 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10475 }
10476 in += info.inJump;
10477 out += info.outJump;
10478 }
10479 }
10480 }
10481 else if (info.outFormat == RTAUDIO_SINT8) {
10482 signed char *out = (signed char *)outBuffer;
10483 if (info.inFormat == RTAUDIO_SINT8) {
10484 // Channel compensation and/or (de)interleaving only.
10485 signed char *in = (signed char *)inBuffer;
10486 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10487 for (j=0; j<info.channels; j++) {
10488 out[info.outOffset[j]] = in[info.inOffset[j]];
10489 }
10490 in += info.inJump;
10491 out += info.outJump;
10492 }
10493 }
10494 if (info.inFormat == RTAUDIO_SINT16) {
10495 Int16 *in = (Int16 *)inBuffer;
10496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10497 for (j=0; j<info.channels; j++) {
10498 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10499 }
10500 in += info.inJump;
10501 out += info.outJump;
10502 }
10503 }
10504 else if (info.inFormat == RTAUDIO_SINT24) {
10505 Int24 *in = (Int24 *)inBuffer;
10506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10507 for (j=0; j<info.channels; j++) {
10508 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10509 }
10510 in += info.inJump;
10511 out += info.outJump;
10512 }
10513 }
10514 else if (info.inFormat == RTAUDIO_SINT32) {
10515 Int32 *in = (Int32 *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10519 }
10520 in += info.inJump;
10521 out += info.outJump;
10522 }
10523 }
10524 else if (info.inFormat == RTAUDIO_FLOAT32) {
10525 Float32 *in = (Float32 *)inBuffer;
10526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527 for (j=0; j<info.channels; j++) {
10528 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10529 }
10530 in += info.inJump;
10531 out += info.outJump;
10532 }
10533 }
10534 else if (info.inFormat == RTAUDIO_FLOAT64) {
10535 Float64 *in = (Float64 *)inBuffer;
10536 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10537 for (j=0; j<info.channels; j++) {
10538 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10539 }
10540 in += info.inJump;
10541 out += info.outJump;
10542 }
10543 }
10544 }
10545 }
10546
10547 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10548 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10549 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10550
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10551 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10552 {
10553 char val;
10554 char *ptr;
10555
10556 ptr = buffer;
10557 if ( format == RTAUDIO_SINT16 ) {
10558 for ( unsigned int i=0; i<samples; i++ ) {
10559 // Swap 1st and 2nd bytes.
10560 val = *(ptr);
10561 *(ptr) = *(ptr+1);
10562 *(ptr+1) = val;
10563
10564 // Increment 2 bytes.
10565 ptr += 2;
10566 }
10567 }
10568 else if ( format == RTAUDIO_SINT32 ||
10569 format == RTAUDIO_FLOAT32 ) {
10570 for ( unsigned int i=0; i<samples; i++ ) {
10571 // Swap 1st and 4th bytes.
10572 val = *(ptr);
10573 *(ptr) = *(ptr+3);
10574 *(ptr+3) = val;
10575
10576 // Swap 2nd and 3rd bytes.
10577 ptr += 1;
10578 val = *(ptr);
10579 *(ptr) = *(ptr+1);
10580 *(ptr+1) = val;
10581
10582 // Increment 3 more bytes.
10583 ptr += 3;
10584 }
10585 }
10586 else if ( format == RTAUDIO_SINT24 ) {
10587 for ( unsigned int i=0; i<samples; i++ ) {
10588 // Swap 1st and 3rd bytes.
10589 val = *(ptr);
10590 *(ptr) = *(ptr+2);
10591 *(ptr+2) = val;
10592
10593 // Increment 2 more bytes.
10594 ptr += 2;
10595 }
10596 }
10597 else if ( format == RTAUDIO_FLOAT64 ) {
10598 for ( unsigned int i=0; i<samples; i++ ) {
10599 // Swap 1st and 8th bytes
10600 val = *(ptr);
10601 *(ptr) = *(ptr+7);
10602 *(ptr+7) = val;
10603
10604 // Swap 2nd and 7th bytes
10605 ptr += 1;
10606 val = *(ptr);
10607 *(ptr) = *(ptr+5);
10608 *(ptr+5) = val;
10609
10610 // Swap 3rd and 6th bytes
10611 ptr += 1;
10612 val = *(ptr);
10613 *(ptr) = *(ptr+3);
10614 *(ptr+3) = val;
10615
10616 // Swap 4th and 5th bytes
10617 ptr += 1;
10618 val = *(ptr);
10619 *(ptr) = *(ptr+1);
10620 *(ptr+1) = val;
10621
10622 // Increment 5 more bytes.
10623 ptr += 5;
10624 }
10625 }
10626 }
10627
10628 // Indentation settings for Vim and Emacs
10629 //
10630 // Local Variables:
10631 // c-basic-offset: 2
10632 // indent-tabs-mode: nil
10633 // End:
10634 //
10635 // vim: et sts=2 sw=2
10636
10637