1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
15
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
23
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
26
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
31
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 */
40 /************************************************************************/
41
42 // RtAudio: Version 5.1.0
43
44 #include "RtAudio.h"
45 #include <iostream>
46 #include <cstdlib>
47 #include <cstring>
48 #include <climits>
49 #include <cmath>
50 #include <algorithm>
51
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
64
65 #include "tchar.h"
66
convertCharPointerToStdString(const char * text)67 static std::string convertCharPointerToStdString(const char *text)
68 {
69 return std::string(text);
70 }
71
convertCharPointerToStdString(const wchar_t * text)72 static std::string convertCharPointerToStdString(const wchar_t *text)
73 {
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
77 return s;
78 }
79
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 // pthread API
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #else
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
89 #endif
90
91 // *************************************************** //
92 //
93 // RtAudio definitions.
94 //
95 // *************************************************** //
96
getVersion(void)97 std::string RtAudio :: getVersion( void )
98 {
99 return RTAUDIO_VERSION;
100 }
101
102 // Define API names and display names.
103 // Must be in same order as API enum.
104 extern "C" {
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
107 { "alsa" , "ALSA" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
110 { "jack" , "Jack" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
113 { "asio" , "ASIO" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
116 };
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119
120 // The order here will control the order of RtAudio's API search in
121 // the constructor.
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
124 RtAudio::UNIX_JACK,
125 #endif
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
128 #endif
129 #if defined(__LINUX_ALSA__)
130 RtAudio::LINUX_ALSA,
131 #endif
132 #if defined(__LINUX_OSS__)
133 RtAudio::LINUX_OSS,
134 #endif
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
137 #endif
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
140 #endif
141 #if defined(__WINDOWS_DS__)
142 RtAudio::WINDOWS_DS,
143 #endif
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
146 #endif
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
149 #endif
150 RtAudio::UNSPECIFIED,
151 };
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
154 }
155
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
StaticAssert()158 template<bool b> class StaticAssert { private: StaticAssert() {} };
StaticAssert()159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
StaticAssertions()160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
162 }};
163
getCompiledApi(std::vector<RtAudio::Api> & apis)164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 {
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
168 }
169
getApiName(RtAudio::Api api)170 std::string RtAudio :: getApiName( RtAudio::Api api )
171 {
172 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return "";
174 return rtaudio_api_names[api][0];
175 }
176
getApiDisplayName(RtAudio::Api api)177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 {
179 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return "Unknown";
181 return rtaudio_api_names[api][1];
182 }
183
getCompiledApiByName(const std::string & name)184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
185 {
186 unsigned int i=0;
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
191 }
192
openRtApi(RtAudio::Api api)193 void RtAudio :: openRtApi( RtAudio::Api api )
194 {
195 if ( rtapi_ )
196 delete rtapi_;
197 rtapi_ = 0;
198
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
202 #endif
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
206 #endif
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
210 #endif
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
214 #endif
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
218 #endif
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
222 #endif
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
226 #endif
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
230 #endif
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
234 #endif
235 }
236
RtAudio(RtAudio::Api api)237 RtAudio :: RtAudio( RtAudio::Api api )
238 {
239 rtapi_ = 0;
240
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
243 openRtApi( api );
244 if ( rtapi_ ) return;
245
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
249 }
250
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
258 }
259
260 if ( rtapi_ ) return;
261
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
268 }
269
~RtAudio()270 RtAudio :: ~RtAudio()
271 {
272 if ( rtapi_ )
273 delete rtapi_;
274 }
275
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
283 {
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
287 }
288
289 // *************************************************** //
290 //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
293 //
294 // *************************************************** //
295
RtApi()296 RtApi :: RtApi()
297 {
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
306 }
307
~RtApi()308 RtApi :: ~RtApi()
309 {
310 MUTEX_DESTROY( &stream_.mutex );
311 }
312
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
320 {
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
324 return;
325 }
326
327 // Clear stream information potentially left from a previously open stream.
328 clearStreamInfo();
329
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
333 return;
334 }
335
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
339 return;
340 }
341
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
345 return;
346 }
347
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
351 return;
352 }
353
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
356 if ( oParams ) {
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
361 return;
362 }
363 }
364
365 unsigned int iChannels = 0;
366 if ( iParams ) {
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
371 return;
372 }
373 }
374
375 bool result;
376
377 if ( oChannels > 0 ) {
378
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
383 return;
384 }
385 }
386
387 if ( iChannels > 0 ) {
388
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
394 return;
395 }
396 }
397
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
404 }
405
getDefaultInputDevice(void)406 unsigned int RtApi :: getDefaultInputDevice( void )
407 {
408 // Should be implemented in subclasses if possible.
409 return 0;
410 }
411
getDefaultOutputDevice(void)412 unsigned int RtApi :: getDefaultOutputDevice( void )
413 {
414 // Should be implemented in subclasses if possible.
415 return 0;
416 }
417
closeStream(void)418 void RtApi :: closeStream( void )
419 {
420 // MUST be implemented in subclasses!
421 return;
422 }
423
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
428 {
429 // MUST be implemented in subclasses!
430 return FAILURE;
431 }
432
tickStreamTime(void)433 void RtApi :: tickStreamTime( void )
434 {
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
438
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
443 #endif
444 }
445
getStreamLatency(void)446 long RtApi :: getStreamLatency( void )
447 {
448 verifyStream();
449
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
455
456 return totalLatency;
457 }
458
getStreamTime(void)459 double RtApi :: getStreamTime( void )
460 {
461 verifyStream();
462
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
466 struct timeval then;
467 struct timeval now;
468
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
471
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
477 #else
478 return stream_.streamTime;
479 #endif
480 }
481
setStreamTime(double time)482 void RtApi :: setStreamTime( double time )
483 {
484 verifyStream();
485
486 if ( time >= 0.0 )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
490 #endif
491 }
492
getStreamSampleRate(void)493 unsigned int RtApi :: getStreamSampleRate( void )
494 {
495 verifyStream();
496
497 return stream_.sampleRate;
498 }
499
500
501 // *************************************************** //
502 //
503 // OS/API-specific methods.
504 //
505 // *************************************************** //
506
507 #if defined(__MACOSX_CORE__)
508
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
514 //
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
523
524 // A structure to hold various information related to the CoreAudio API
525 // implementation.
526 struct CoreHandle {
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
530 #endif
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
533 bool xrun[2];
534 char *deviceBuffer;
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
538
CoreHandleCoreHandle539 CoreHandle()
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
541 };
542
RtApiCore()543 RtApiCore:: RtApiCore()
544 {
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
549 // correctly.
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
558 }
559 #endif
560 }
561
~RtApiCore()562 RtApiCore :: ~RtApiCore()
563 {
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
568 }
569
getDeviceCount(void)570 unsigned int RtApiCore :: getDeviceCount( void )
571 {
572 // Find out how many audio devices there are, if any.
573 UInt32 dataSize;
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
579 return 0;
580 }
581
582 return dataSize / sizeof( AudioDeviceID );
583 }
584
getDefaultInputDevice(void)585 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 {
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
589
590 AudioDeviceID id;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
597 return 0;
598 }
599
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
607 return 0;
608 }
609
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
612
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
615 return 0;
616 }
617
getDefaultOutputDevice(void)618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 {
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
622
623 AudioDeviceID id;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
630 return 0;
631 }
632
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
640 return 0;
641 }
642
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
645
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
648 return 0;
649 }
650
getDeviceInfo(unsigned int device)651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 {
653 RtAudio::DeviceInfo info;
654 info.probed = false;
655
656 // Get device ID
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
661 return info;
662 }
663
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
667 return info;
668 }
669
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
680 return info;
681 }
682
683 AudioDeviceID id = deviceList[ device ];
684
685 // Get the device name.
686 info.name.erase();
687 CFStringRef cfname;
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
695 return info;
696 }
697
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 #else
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 #endif
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
708 CFRelease( cfname );
709 free(mname);
710
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
717 return info;
718 }
719
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 #else
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 #endif
728 info.name.append( (const char *)name, strlen(name) );
729 CFRelease( cfname );
730 free(name);
731
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
737 dataSize = 0;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
743 return info;
744 }
745
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
751 return info;
752 }
753
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
756 free( bufferList );
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
760 return info;
761 }
762
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
767 free( bufferList );
768
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
776 return info;
777 }
778
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
784 return info;
785 }
786
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
789 free( bufferList );
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
793 return info;
794 }
795
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
800 free( bufferList );
801
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
809
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
818 return info;
819 }
820
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
828 return info;
829 }
830
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
845
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
848
849 } else {
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
853 }
854 }
855
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
860
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
863 }
864 }
865 }
866
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
875 return info;
876 }
877
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
882
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
887
888 info.probed = true;
889 return info;
890 }
891
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
898 void* infoPointer )
899 {
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
901
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
905 else
906 return kAudioHardwareNoError;
907 }
908
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 UInt32 nAddresses,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
913 {
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
919 else
920 handle->xrun[0] = true;
921 }
922 }
923
924 return kAudioHardwareNoError;
925 }
926
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
930 void* ratePointer )
931 {
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
939 }
940
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
945 {
946 // Get device ID
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
951 return FAILURE;
952 }
953
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
957 return FAILURE;
958 }
959
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
969 return FAILURE;
970 }
971
972 AudioDeviceID id = deviceList[ device ];
973
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
977 isInput = true;
978 property.mScope = kAudioDevicePropertyScopeInput;
979 }
980 else
981 property.mScope = kAudioDevicePropertyScopeOutput;
982
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
985 dataSize = 0;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
991 return FAILURE;
992 }
993
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
998 return FAILURE;
999 }
1000
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1003 free( bufferList );
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1006 return FAILURE;
1007 }
1008
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1021
1022 // First check that the device supports the requested number of
1023 // channels.
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 free( bufferList );
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1032 return FAILURE;
1033 }
1034
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1042 foundStream = true;
1043 break;
1044 }
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1047 }
1048
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1052 monoMode = true;
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1058 }
1059
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1069 streamCount++;
1070 }
1071 }
1072
1073 free( bufferList );
1074
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1084 return FAILURE;
1085 }
1086
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1101 return FAILURE;
1102 }
1103
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1110 return FAILURE;
1111 }
1112
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1115
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 pid_t hog_pid;
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1125 return FAILURE;
1126 }
1127
1128 if ( hog_pid != getpid() ) {
1129 hog_pid = getpid();
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1134 return FAILURE;
1135 }
1136 }
1137 }
1138
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1147 return FAILURE;
1148 }
1149
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1160 return FAILURE;
1161 }
1162
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1169 return FAILURE;
1170 }
1171
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1177 usleep( 5000 );
1178 }
1179
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1186 return FAILURE;
1187 }
1188 }
1189
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1199 return FAILURE;
1200 }
1201
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1209 }
1210
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1214 }
1215
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1221 return FAILURE;
1222 }
1223 }
1224
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1231 return FAILURE;
1232 }
1233
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1244 UInt32 formatFlags;
1245
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 else
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1279 break;
1280 }
1281 }
1282
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1286 return FAILURE;
1287 }
1288 } // done setting virtual/physical formats.
1289
1290 // Get the stream / device latency.
1291 UInt32 latency;
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 else {
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1301 }
1302 }
1303
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1308
1309 // From the CoreAudio documentation, PCM data must be supplied as
1310 // 32-bit floats.
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1335 }
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1338
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1342 try {
1343 handle = new CoreHandle;
1344 }
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1347 goto error;
1348 }
1349
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1352 goto error;
1353 }
1354 stream_.apiHandle = (void *) handle;
1355 }
1356 else
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1361
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1370 goto error;
1371 }
1372
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1375 // streams.
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1384 }
1385 }
1386
1387 if ( makeBuffer ) {
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1393 goto error;
1394 }
1395 }
1396 }
1397
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1402
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1407 }
1408
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1412 else {
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 #else
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 #endif
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1422 goto error;
1423 }
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1426 else
1427 stream_.mode = mode;
1428 }
1429
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434
1435 return SUCCESS;
1436
1437 error:
1438 if ( handle ) {
1439 pthread_cond_destroy( &handle->condition );
1440 delete handle;
1441 stream_.apiHandle = 0;
1442 }
1443
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1448 }
1449 }
1450
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1454 }
1455
1456 stream_.state = STREAM_CLOSED;
1457 return FAILURE;
1458 }
1459
closeStream(void)1460 void RtApiCore :: closeStream( void )
1461 {
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1465 return;
1466 }
1467
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 if (handle) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1474
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1480 }
1481
1482 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1483 if ( stream_.state == STREAM_RUNNING )
1484 AudioDeviceStop( handle->id[0], handle->procId[0] );
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else // deprecated behaviour
1487 if ( stream_.state == STREAM_RUNNING )
1488 AudioDeviceStop( handle->id[0], callbackHandler );
1489 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1490 #endif
1491 }
1492 }
1493
1494 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1495 if (handle) {
1496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1497 kAudioObjectPropertyScopeGlobal,
1498 kAudioObjectPropertyElementMaster };
1499
1500 property.mSelector = kAudioDeviceProcessorOverload;
1501 property.mScope = kAudioObjectPropertyScopeGlobal;
1502 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1503 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1504 error( RtAudioError::WARNING );
1505 }
1506
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 if ( stream_.state == STREAM_RUNNING )
1509 AudioDeviceStop( handle->id[1], handle->procId[1] );
1510 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1511 #else // deprecated behaviour
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[1], callbackHandler );
1514 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1515 #endif
1516 }
1517 }
1518
1519 for ( int i=0; i<2; i++ ) {
1520 if ( stream_.userBuffer[i] ) {
1521 free( stream_.userBuffer[i] );
1522 stream_.userBuffer[i] = 0;
1523 }
1524 }
1525
1526 if ( stream_.deviceBuffer ) {
1527 free( stream_.deviceBuffer );
1528 stream_.deviceBuffer = 0;
1529 }
1530
1531 // Destroy pthread condition variable.
1532 pthread_cond_destroy( &handle->condition );
1533 delete handle;
1534 stream_.apiHandle = 0;
1535
1536 stream_.mode = UNINITIALIZED;
1537 stream_.state = STREAM_CLOSED;
1538 }
1539
startStream(void)1540 void RtApiCore :: startStream( void )
1541 {
1542 verifyStream();
1543 if ( stream_.state == STREAM_RUNNING ) {
1544 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1545 error( RtAudioError::WARNING );
1546 return;
1547 }
1548
1549 #if defined( HAVE_GETTIMEOFDAY )
1550 gettimeofday( &stream_.lastTickTimestamp, NULL );
1551 #endif
1552
1553 OSStatus result = noErr;
1554 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1555 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1556
1557 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1558 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1559 #else // deprecated behaviour
1560 result = AudioDeviceStart( handle->id[0], callbackHandler );
1561 #endif
1562 if ( result != noErr ) {
1563 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1564 errorText_ = errorStream_.str();
1565 goto unlock;
1566 }
1567 }
1568
1569 if ( stream_.mode == INPUT ||
1570 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1571
1572 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1573 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1574 #else // deprecated behaviour
1575 result = AudioDeviceStart( handle->id[1], callbackHandler );
1576 #endif
1577 if ( result != noErr ) {
1578 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1579 errorText_ = errorStream_.str();
1580 goto unlock;
1581 }
1582 }
1583
1584 handle->drainCounter = 0;
1585 handle->internalDrain = false;
1586 stream_.state = STREAM_RUNNING;
1587
1588 unlock:
1589 if ( result == noErr ) return;
1590 error( RtAudioError::SYSTEM_ERROR );
1591 }
1592
stopStream(void)1593 void RtApiCore :: stopStream( void )
1594 {
1595 verifyStream();
1596 if ( stream_.state == STREAM_STOPPED ) {
1597 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1598 error( RtAudioError::WARNING );
1599 return;
1600 }
1601
1602 OSStatus result = noErr;
1603 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1604 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1605
1606 if ( handle->drainCounter == 0 ) {
1607 handle->drainCounter = 2;
1608 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1609 }
1610
1611 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1612 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1613 #else // deprecated behaviour
1614 result = AudioDeviceStop( handle->id[0], callbackHandler );
1615 #endif
1616 if ( result != noErr ) {
1617 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1618 errorText_ = errorStream_.str();
1619 goto unlock;
1620 }
1621 }
1622
1623 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1624
1625 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1626 result = AudioDeviceStop( handle->id[0], handle->procId[1] );
1627 #else // deprecated behaviour
1628 result = AudioDeviceStop( handle->id[1], callbackHandler );
1629 #endif
1630 if ( result != noErr ) {
1631 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1632 errorText_ = errorStream_.str();
1633 goto unlock;
1634 }
1635 }
1636
1637 stream_.state = STREAM_STOPPED;
1638
1639 unlock:
1640 if ( result == noErr ) return;
1641 error( RtAudioError::SYSTEM_ERROR );
1642 }
1643
abortStream(void)1644 void RtApiCore :: abortStream( void )
1645 {
1646 verifyStream();
1647 if ( stream_.state == STREAM_STOPPED ) {
1648 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1649 error( RtAudioError::WARNING );
1650 return;
1651 }
1652
1653 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1654 handle->drainCounter = 2;
1655
1656 stopStream();
1657 }
1658
1659 // This function will be called by a spawned thread when the user
1660 // callback function signals that the stream should be stopped or
1661 // aborted. It is better to handle it this way because the
1662 // callbackEvent() function probably should return before the AudioDeviceStop()
1663 // function is called.
coreStopStream(void * ptr)1664 static void *coreStopStream( void *ptr )
1665 {
1666 CallbackInfo *info = (CallbackInfo *) ptr;
1667 RtApiCore *object = (RtApiCore *) info->object;
1668
1669 object->stopStream();
1670 pthread_exit( NULL );
1671 }
1672
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1673 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1674 const AudioBufferList *inBufferList,
1675 const AudioBufferList *outBufferList )
1676 {
1677 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1678 if ( stream_.state == STREAM_CLOSED ) {
1679 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1680 error( RtAudioError::WARNING );
1681 return FAILURE;
1682 }
1683
1684 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1685 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1686
1687 // Check if we were draining the stream and signal is finished.
1688 if ( handle->drainCounter > 3 ) {
1689 ThreadHandle threadId;
1690
1691 stream_.state = STREAM_STOPPING;
1692 if ( handle->internalDrain == true )
1693 pthread_create( &threadId, NULL, coreStopStream, info );
1694 else // external call to stopStream()
1695 pthread_cond_signal( &handle->condition );
1696 return SUCCESS;
1697 }
1698
1699 AudioDeviceID outputDevice = handle->id[0];
1700
1701 // Invoke user callback to get fresh output data UNLESS we are
1702 // draining stream or duplex mode AND the input/output devices are
1703 // different AND this function is called for the input device.
1704 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1705 RtAudioCallback callback = (RtAudioCallback) info->callback;
1706 double streamTime = getStreamTime();
1707 RtAudioStreamStatus status = 0;
1708 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1709 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1710 handle->xrun[0] = false;
1711 }
1712 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1713 status |= RTAUDIO_INPUT_OVERFLOW;
1714 handle->xrun[1] = false;
1715 }
1716
1717 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1718 stream_.bufferSize, streamTime, status, info->userData );
1719 if ( cbReturnValue == 2 ) {
1720 stream_.state = STREAM_STOPPING;
1721 handle->drainCounter = 2;
1722 abortStream();
1723 return SUCCESS;
1724 }
1725 else if ( cbReturnValue == 1 ) {
1726 handle->drainCounter = 1;
1727 handle->internalDrain = true;
1728 }
1729 }
1730
1731 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1732
1733 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1734
1735 if ( handle->nStreams[0] == 1 ) {
1736 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1737 0,
1738 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1739 }
1740 else { // fill multiple streams with zeros
1741 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1742 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1743 0,
1744 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1745 }
1746 }
1747 }
1748 else if ( handle->nStreams[0] == 1 ) {
1749 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1750 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1751 stream_.userBuffer[0], stream_.convertInfo[0] );
1752 }
1753 else { // copy from user buffer
1754 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1755 stream_.userBuffer[0],
1756 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1757 }
1758 }
1759 else { // fill multiple streams
1760 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1761 if ( stream_.doConvertBuffer[0] ) {
1762 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1763 inBuffer = (Float32 *) stream_.deviceBuffer;
1764 }
1765
1766 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1767 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1768 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1769 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1770 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1771 }
1772 }
1773 else { // fill multiple multi-channel streams with interleaved data
1774 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1775 Float32 *out, *in;
1776
1777 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1778 UInt32 inChannels = stream_.nUserChannels[0];
1779 if ( stream_.doConvertBuffer[0] ) {
1780 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1781 inChannels = stream_.nDeviceChannels[0];
1782 }
1783
1784 if ( inInterleaved ) inOffset = 1;
1785 else inOffset = stream_.bufferSize;
1786
1787 channelsLeft = inChannels;
1788 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1789 in = inBuffer;
1790 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1791 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1792
1793 outJump = 0;
1794 // Account for possible channel offset in first stream
1795 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1796 streamChannels -= stream_.channelOffset[0];
1797 outJump = stream_.channelOffset[0];
1798 out += outJump;
1799 }
1800
1801 // Account for possible unfilled channels at end of the last stream
1802 if ( streamChannels > channelsLeft ) {
1803 outJump = streamChannels - channelsLeft;
1804 streamChannels = channelsLeft;
1805 }
1806
1807 // Determine input buffer offsets and skips
1808 if ( inInterleaved ) {
1809 inJump = inChannels;
1810 in += inChannels - channelsLeft;
1811 }
1812 else {
1813 inJump = 1;
1814 in += (inChannels - channelsLeft) * inOffset;
1815 }
1816
1817 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1818 for ( unsigned int j=0; j<streamChannels; j++ ) {
1819 *out++ = in[j*inOffset];
1820 }
1821 out += outJump;
1822 in += inJump;
1823 }
1824 channelsLeft -= streamChannels;
1825 }
1826 }
1827 }
1828 }
1829
1830 // Don't bother draining input
1831 if ( handle->drainCounter ) {
1832 handle->drainCounter++;
1833 goto unlock;
1834 }
1835
1836 AudioDeviceID inputDevice;
1837 inputDevice = handle->id[1];
1838 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1839
1840 if ( handle->nStreams[1] == 1 ) {
1841 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1842 convertBuffer( stream_.userBuffer[1],
1843 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1844 stream_.convertInfo[1] );
1845 }
1846 else { // copy to user buffer
1847 memcpy( stream_.userBuffer[1],
1848 inBufferList->mBuffers[handle->iStream[1]].mData,
1849 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1850 }
1851 }
1852 else { // read from multiple streams
1853 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1854 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1855
1856 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1857 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1858 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1859 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1860 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1861 }
1862 }
1863 else { // read from multiple multi-channel streams
1864 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1865 Float32 *out, *in;
1866
1867 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1868 UInt32 outChannels = stream_.nUserChannels[1];
1869 if ( stream_.doConvertBuffer[1] ) {
1870 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1871 outChannels = stream_.nDeviceChannels[1];
1872 }
1873
1874 if ( outInterleaved ) outOffset = 1;
1875 else outOffset = stream_.bufferSize;
1876
1877 channelsLeft = outChannels;
1878 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1879 out = outBuffer;
1880 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1881 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1882
1883 inJump = 0;
1884 // Account for possible channel offset in first stream
1885 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1886 streamChannels -= stream_.channelOffset[1];
1887 inJump = stream_.channelOffset[1];
1888 in += inJump;
1889 }
1890
1891 // Account for possible unread channels at end of the last stream
1892 if ( streamChannels > channelsLeft ) {
1893 inJump = streamChannels - channelsLeft;
1894 streamChannels = channelsLeft;
1895 }
1896
1897 // Determine output buffer offsets and skips
1898 if ( outInterleaved ) {
1899 outJump = outChannels;
1900 out += outChannels - channelsLeft;
1901 }
1902 else {
1903 outJump = 1;
1904 out += (outChannels - channelsLeft) * outOffset;
1905 }
1906
1907 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1908 for ( unsigned int j=0; j<streamChannels; j++ ) {
1909 out[j*outOffset] = *in++;
1910 }
1911 out += outJump;
1912 in += inJump;
1913 }
1914 channelsLeft -= streamChannels;
1915 }
1916 }
1917
1918 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1919 convertBuffer( stream_.userBuffer[1],
1920 stream_.deviceBuffer,
1921 stream_.convertInfo[1] );
1922 }
1923 }
1924 }
1925
1926 unlock:
1927 //MUTEX_UNLOCK( &stream_.mutex );
1928
1929 // Make sure to only tick duplex stream time once if using two devices
1930 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1931 RtApi::tickStreamTime();
1932
1933 return SUCCESS;
1934 }
1935
getErrorCode(OSStatus code)1936 const char* RtApiCore :: getErrorCode( OSStatus code )
1937 {
1938 switch( code ) {
1939
1940 case kAudioHardwareNotRunningError:
1941 return "kAudioHardwareNotRunningError";
1942
1943 case kAudioHardwareUnspecifiedError:
1944 return "kAudioHardwareUnspecifiedError";
1945
1946 case kAudioHardwareUnknownPropertyError:
1947 return "kAudioHardwareUnknownPropertyError";
1948
1949 case kAudioHardwareBadPropertySizeError:
1950 return "kAudioHardwareBadPropertySizeError";
1951
1952 case kAudioHardwareIllegalOperationError:
1953 return "kAudioHardwareIllegalOperationError";
1954
1955 case kAudioHardwareBadObjectError:
1956 return "kAudioHardwareBadObjectError";
1957
1958 case kAudioHardwareBadDeviceError:
1959 return "kAudioHardwareBadDeviceError";
1960
1961 case kAudioHardwareBadStreamError:
1962 return "kAudioHardwareBadStreamError";
1963
1964 case kAudioHardwareUnsupportedOperationError:
1965 return "kAudioHardwareUnsupportedOperationError";
1966
1967 case kAudioDeviceUnsupportedFormatError:
1968 return "kAudioDeviceUnsupportedFormatError";
1969
1970 case kAudioDevicePermissionsError:
1971 return "kAudioDevicePermissionsError";
1972
1973 default:
1974 return "CoreAudio unknown error";
1975 }
1976 }
1977
1978 //******************** End of __MACOSX_CORE__ *********************//
1979 #endif
1980
1981 #if defined(__UNIX_JACK__)
1982
1983 // JACK is a low-latency audio server, originally written for the
1984 // GNU/Linux operating system and now also ported to OS-X. It can
1985 // connect a number of different applications to an audio device, as
1986 // well as allowing them to share audio between themselves.
1987 //
1988 // When using JACK with RtAudio, "devices" refer to JACK clients that
1989 // have ports connected to the server. The JACK server is typically
1990 // started in a terminal as follows:
1991 //
1992 // .jackd -d alsa -d hw:0
1993 //
1994 // or through an interface program such as qjackctl. Many of the
1995 // parameters normally set for a stream are fixed by the JACK server
1996 // and can be specified when the JACK server is started. In
1997 // particular,
1998 //
1999 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2000 //
2001 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2002 // frames, and number of buffers = 4. Once the server is running, it
2003 // is not possible to override these values. If the values are not
2004 // specified in the command-line, the JACK server uses default values.
2005 //
2006 // The JACK server does not have to be running when an instance of
2007 // RtApiJack is created, though the function getDeviceCount() will
2008 // report 0 devices found until JACK has been started. When no
2009 // devices are available (i.e., the JACK server is not running), a
2010 // stream cannot be opened.
2011
2012 #include <jack/jack.h>
2013 #include <unistd.h>
2014 #include <cstdio>
2015
2016 // A structure to hold various information related to the Jack API
2017 // implementation.
2018 struct JackHandle {
2019 jack_client_t *client;
2020 jack_port_t **ports[2];
2021 std::string deviceName[2];
2022 bool xrun[2];
2023 pthread_cond_t condition;
2024 int drainCounter; // Tracks callback counts when draining
2025 bool internalDrain; // Indicates if stop is initiated from callback or not.
2026
JackHandleJackHandle2027 JackHandle()
2028 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2029 };
2030
2031 #if defined(__UNIX_JACK__)
HACK__getJackClient()2032 void* RtAudio :: HACK__getJackClient()
2033 {
2034 return static_cast<JackHandle*>(rtapi_->stream_.apiHandle)->client;
2035 }
2036 #endif
2037
2038 #if !defined(__RTAUDIO_DEBUG__)
jackSilentError(const char *)2039 static void jackSilentError( const char * ) {};
2040 #endif
2041
RtApiJack()2042 RtApiJack :: RtApiJack()
2043 :shouldAutoconnect_(true) {
2044 // Nothing to do here.
2045 #if !defined(__RTAUDIO_DEBUG__)
2046 // Turn off Jack's internal error reporting.
2047 jack_set_error_function( &jackSilentError );
2048 #endif
2049 }
2050
~RtApiJack()2051 RtApiJack :: ~RtApiJack()
2052 {
2053 if ( stream_.state != STREAM_CLOSED ) closeStream();
2054 }
2055
getDeviceCount(void)2056 unsigned int RtApiJack :: getDeviceCount( void )
2057 {
2058 // See if we can become a jack client.
2059 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2060 jack_status_t *status = NULL;
2061 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2062 if ( client == 0 ) return 0;
2063
2064 const char **ports;
2065 std::string port, previousPort;
2066 unsigned int nChannels = 0, nDevices = 0;
2067 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2068 if ( ports ) {
2069 // Parse the port names up to the first colon (:).
2070 size_t iColon = 0;
2071 do {
2072 port = (char *) ports[ nChannels ];
2073 iColon = port.find(":");
2074 if ( iColon != std::string::npos ) {
2075 port = port.substr( 0, iColon + 1 );
2076 if ( port != previousPort ) {
2077 nDevices++;
2078 previousPort = port;
2079 }
2080 }
2081 } while ( ports[++nChannels] );
2082 free( ports );
2083 }
2084
2085 jack_client_close( client );
2086 return nDevices;
2087 }
2088
getDeviceInfo(unsigned int device)2089 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2090 {
2091 RtAudio::DeviceInfo info;
2092 info.probed = false;
2093
2094 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2095 jack_status_t *status = NULL;
2096 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2097 if ( client == 0 ) {
2098 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2099 error( RtAudioError::WARNING );
2100 return info;
2101 }
2102
2103 const char **ports;
2104 std::string port, previousPort;
2105 unsigned int nPorts = 0, nDevices = 0;
2106 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2107 if ( ports ) {
2108 // Parse the port names up to the first colon (:).
2109 size_t iColon = 0;
2110 do {
2111 port = (char *) ports[ nPorts ];
2112 iColon = port.find(":");
2113 if ( iColon != std::string::npos ) {
2114 port = port.substr( 0, iColon );
2115 if ( port != previousPort ) {
2116 if ( nDevices == device ) info.name = port;
2117 nDevices++;
2118 previousPort = port;
2119 }
2120 }
2121 } while ( ports[++nPorts] );
2122 free( ports );
2123 }
2124
2125 if ( device >= nDevices ) {
2126 jack_client_close( client );
2127 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2128 error( RtAudioError::INVALID_USE );
2129 return info;
2130 }
2131
2132 // Get the current jack server sample rate.
2133 info.sampleRates.clear();
2134
2135 info.preferredSampleRate = jack_get_sample_rate( client );
2136 info.sampleRates.push_back( info.preferredSampleRate );
2137
2138 // Count the available ports containing the client name as device
2139 // channels. Jack "input ports" equal RtAudio output channels.
2140 unsigned int nChannels = 0;
2141 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2142 if ( ports ) {
2143 while ( ports[ nChannels ] ) nChannels++;
2144 free( ports );
2145 info.outputChannels = nChannels;
2146 }
2147
2148 // Jack "output ports" equal RtAudio input channels.
2149 nChannels = 0;
2150 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2151 if ( ports ) {
2152 while ( ports[ nChannels ] ) nChannels++;
2153 free( ports );
2154 info.inputChannels = nChannels;
2155 }
2156
2157 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2158 jack_client_close(client);
2159 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2160 error( RtAudioError::WARNING );
2161 return info;
2162 }
2163
2164 // If device opens for both playback and capture, we determine the channels.
2165 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2166 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2167
2168 // Jack always uses 32-bit floats.
2169 info.nativeFormats = RTAUDIO_FLOAT32;
2170
2171 // Jack doesn't provide default devices so we'll use the first available one.
2172 if ( device == 0 && info.outputChannels > 0 )
2173 info.isDefaultOutput = true;
2174 if ( device == 0 && info.inputChannels > 0 )
2175 info.isDefaultInput = true;
2176
2177 jack_client_close(client);
2178 info.probed = true;
2179 return info;
2180 }
2181
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2182 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2183 {
2184 CallbackInfo *info = (CallbackInfo *) infoPointer;
2185
2186 RtApiJack *object = (RtApiJack *) info->object;
2187 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2188
2189 return 0;
2190 }
2191
2192 // This function will be called by a spawned thread when the Jack
2193 // server signals that it is shutting down. It is necessary to handle
2194 // it this way because the jackShutdown() function must return before
2195 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2196 static void *jackCloseStream( void *ptr )
2197 {
2198 CallbackInfo *info = (CallbackInfo *) ptr;
2199 RtApiJack *object = (RtApiJack *) info->object;
2200
2201 object->closeStream();
2202
2203 pthread_exit( NULL );
2204 }
jackShutdown(void * infoPointer)2205 static void jackShutdown( void *infoPointer )
2206 {
2207 CallbackInfo *info = (CallbackInfo *) infoPointer;
2208 RtApiJack *object = (RtApiJack *) info->object;
2209
2210 // Check current stream state. If stopped, then we'll assume this
2211 // was called as a result of a call to RtApiJack::stopStream (the
2212 // deactivation of a client handle causes this function to be called).
2213 // If not, we'll assume the Jack server is shutting down or some
2214 // other problem occurred and we should close the stream.
2215 if ( object->isStreamRunning() == false ) return;
2216
2217 ThreadHandle threadId;
2218 pthread_create( &threadId, NULL, jackCloseStream, info );
2219 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2220 }
2221
jackXrun(void * infoPointer)2222 static int jackXrun( void *infoPointer )
2223 {
2224 JackHandle *handle = *((JackHandle **) infoPointer);
2225
2226 if ( handle->ports[0] ) handle->xrun[0] = true;
2227 if ( handle->ports[1] ) handle->xrun[1] = true;
2228
2229 return 0;
2230 }
2231
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2232 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2233 unsigned int firstChannel, unsigned int sampleRate,
2234 RtAudioFormat format, unsigned int *bufferSize,
2235 RtAudio::StreamOptions *options )
2236 {
2237 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2238
2239 // Look for jack server and try to become a client (only do once per stream).
2240 jack_client_t *client = 0;
2241 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2242 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2243 jack_status_t *status = NULL;
2244 if ( options && !options->streamName.empty() )
2245 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2246 else
2247 client = jack_client_open( "RtApiJack", jackoptions, status );
2248 if ( client == 0 ) {
2249 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2250 error( RtAudioError::WARNING );
2251 return FAILURE;
2252 }
2253 }
2254 else {
2255 // The handle must have been created on an earlier pass.
2256 client = handle->client;
2257 }
2258
2259 const char **ports;
2260 std::string port, previousPort, deviceName;
2261 unsigned int nPorts = 0, nDevices = 0;
2262 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2263 if ( ports ) {
2264 // Parse the port names up to the first colon (:).
2265 size_t iColon = 0;
2266 do {
2267 port = (char *) ports[ nPorts ];
2268 iColon = port.find(":");
2269 if ( iColon != std::string::npos ) {
2270 port = port.substr( 0, iColon );
2271 if ( port != previousPort ) {
2272 if ( nDevices == device ) deviceName = port;
2273 nDevices++;
2274 previousPort = port;
2275 }
2276 }
2277 } while ( ports[++nPorts] );
2278 free( ports );
2279 }
2280
2281 if ( device >= nDevices ) {
2282 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2283 return FAILURE;
2284 }
2285
2286 unsigned long flag = JackPortIsInput;
2287 if ( mode == INPUT ) flag = JackPortIsOutput;
2288
2289 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2290 // Count the available ports containing the client name as device
2291 // channels. Jack "input ports" equal RtAudio output channels.
2292 unsigned int nChannels = 0;
2293 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2294 if ( ports ) {
2295 while ( ports[ nChannels ] ) nChannels++;
2296 free( ports );
2297 }
2298 // Compare the jack ports for specified client to the requested number of channels.
2299 if ( nChannels < (channels + firstChannel) ) {
2300 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2301 errorText_ = errorStream_.str();
2302 return FAILURE;
2303 }
2304 }
2305
2306 // Check the jack server sample rate.
2307 unsigned int jackRate = jack_get_sample_rate( client );
2308 if ( sampleRate != jackRate ) {
2309 jack_client_close( client );
2310 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2311 errorText_ = errorStream_.str();
2312 return FAILURE;
2313 }
2314 stream_.sampleRate = jackRate;
2315
2316 // Get the latency of the JACK port.
2317 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2318 if ( ports[ firstChannel ] ) {
2319 // Added by Ge Wang
2320 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2321 // the range (usually the min and max are equal)
2322 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2323 // get the latency range
2324 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2325 // be optimistic, use the min!
2326 stream_.latency[mode] = latrange.min;
2327 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2328 }
2329 free( ports );
2330
2331 // The jack server always uses 32-bit floating-point data.
2332 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2333 stream_.userFormat = format;
2334
2335 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2336 else stream_.userInterleaved = true;
2337
2338 // Jack always uses non-interleaved buffers.
2339 stream_.deviceInterleaved[mode] = false;
2340
2341 // Jack always provides host byte-ordered data.
2342 stream_.doByteSwap[mode] = false;
2343
2344 // Get the buffer size. The buffer size and number of buffers
2345 // (periods) is set when the jack server is started.
2346 stream_.bufferSize = (int) jack_get_buffer_size( client );
2347 *bufferSize = stream_.bufferSize;
2348
2349 stream_.nDeviceChannels[mode] = channels;
2350 stream_.nUserChannels[mode] = channels;
2351
2352 // Set flags for buffer conversion.
2353 stream_.doConvertBuffer[mode] = false;
2354 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2355 stream_.doConvertBuffer[mode] = true;
2356 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2357 stream_.nUserChannels[mode] > 1 )
2358 stream_.doConvertBuffer[mode] = true;
2359
2360 // Allocate our JackHandle structure for the stream.
2361 if ( handle == 0 ) {
2362 try {
2363 handle = new JackHandle;
2364 }
2365 catch ( std::bad_alloc& ) {
2366 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2367 goto error;
2368 }
2369
2370 if ( pthread_cond_init(&handle->condition, NULL) ) {
2371 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2372 goto error;
2373 }
2374 stream_.apiHandle = (void *) handle;
2375 handle->client = client;
2376 }
2377 handle->deviceName[mode] = deviceName;
2378
2379 // Allocate necessary internal buffers.
2380 unsigned long bufferBytes;
2381 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2382 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2383 if ( stream_.userBuffer[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2385 goto error;
2386 }
2387
2388 if ( stream_.doConvertBuffer[mode] ) {
2389
2390 bool makeBuffer = true;
2391 if ( mode == OUTPUT )
2392 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2393 else { // mode == INPUT
2394 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2397 if ( bufferBytes < bytesOut ) makeBuffer = false;
2398 }
2399 }
2400
2401 if ( makeBuffer ) {
2402 bufferBytes *= *bufferSize;
2403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2405 if ( stream_.deviceBuffer == NULL ) {
2406 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2407 goto error;
2408 }
2409 }
2410 }
2411
2412 // Allocate memory for the Jack ports (channels) identifiers.
2413 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2414 if ( handle->ports[mode] == NULL ) {
2415 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2416 goto error;
2417 }
2418
2419 stream_.device[mode] = device;
2420 stream_.channelOffset[mode] = firstChannel;
2421 stream_.state = STREAM_STOPPED;
2422 stream_.callbackInfo.object = (void *) this;
2423
2424 if ( stream_.mode == OUTPUT && mode == INPUT )
2425 // We had already set up the stream for output.
2426 stream_.mode = DUPLEX;
2427 else {
2428 stream_.mode = mode;
2429 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2430 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2431 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2432 }
2433
2434 // Register our ports.
2435 char label[64];
2436 if ( mode == OUTPUT ) {
2437 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2438 snprintf( label, 64, "outport %d", i );
2439 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2440 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2441 }
2442 }
2443 else {
2444 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2445 snprintf( label, 64, "inport %d", i );
2446 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2447 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2448 }
2449 }
2450
2451 // Setup the buffer conversion information structure. We don't use
2452 // buffers to do channel offsets, so we override that parameter
2453 // here.
2454 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2455
2456 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2457
2458 return SUCCESS;
2459
2460 error:
2461 if ( handle ) {
2462 pthread_cond_destroy( &handle->condition );
2463 jack_client_close( handle->client );
2464
2465 if ( handle->ports[0] ) free( handle->ports[0] );
2466 if ( handle->ports[1] ) free( handle->ports[1] );
2467
2468 delete handle;
2469 stream_.apiHandle = 0;
2470 }
2471
2472 for ( int i=0; i<2; i++ ) {
2473 if ( stream_.userBuffer[i] ) {
2474 free( stream_.userBuffer[i] );
2475 stream_.userBuffer[i] = 0;
2476 }
2477 }
2478
2479 if ( stream_.deviceBuffer ) {
2480 free( stream_.deviceBuffer );
2481 stream_.deviceBuffer = 0;
2482 }
2483
2484 return FAILURE;
2485 }
2486
closeStream(void)2487 void RtApiJack :: closeStream( void )
2488 {
2489 if ( stream_.state == STREAM_CLOSED ) {
2490 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2491 error( RtAudioError::WARNING );
2492 return;
2493 }
2494
2495 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2496 if ( handle ) {
2497
2498 if ( stream_.state == STREAM_RUNNING )
2499 jack_deactivate( handle->client );
2500
2501 jack_client_close( handle->client );
2502 }
2503
2504 if ( handle ) {
2505 if ( handle->ports[0] ) free( handle->ports[0] );
2506 if ( handle->ports[1] ) free( handle->ports[1] );
2507 pthread_cond_destroy( &handle->condition );
2508 delete handle;
2509 stream_.apiHandle = 0;
2510 }
2511
2512 for ( int i=0; i<2; i++ ) {
2513 if ( stream_.userBuffer[i] ) {
2514 free( stream_.userBuffer[i] );
2515 stream_.userBuffer[i] = 0;
2516 }
2517 }
2518
2519 if ( stream_.deviceBuffer ) {
2520 free( stream_.deviceBuffer );
2521 stream_.deviceBuffer = 0;
2522 }
2523
2524 stream_.mode = UNINITIALIZED;
2525 stream_.state = STREAM_CLOSED;
2526 }
2527
startStream(void)2528 void RtApiJack :: startStream( void )
2529 {
2530 verifyStream();
2531 if ( stream_.state == STREAM_RUNNING ) {
2532 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2533 error( RtAudioError::WARNING );
2534 return;
2535 }
2536
2537 #if defined( HAVE_GETTIMEOFDAY )
2538 gettimeofday( &stream_.lastTickTimestamp, NULL );
2539 #endif
2540
2541 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2542 int result = jack_activate( handle->client );
2543 if ( result ) {
2544 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2545 goto unlock;
2546 }
2547
2548 const char **ports;
2549
2550 // Get the list of available ports.
2551 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2552 result = 1;
2553 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2554 if ( ports == NULL) {
2555 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2556 goto unlock;
2557 }
2558
2559 // Now make the port connections. Since RtAudio wasn't designed to
2560 // allow the user to select particular channels of a device, we'll
2561 // just open the first "nChannels" ports with offset.
2562 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2563 result = 1;
2564 if ( ports[ stream_.channelOffset[0] + i ] )
2565 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2566 if ( result ) {
2567 free( ports );
2568 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2569 goto unlock;
2570 }
2571 }
2572 free(ports);
2573 }
2574
2575 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2576 result = 1;
2577 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2578 if ( ports == NULL) {
2579 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2580 goto unlock;
2581 }
2582
2583 // Now make the port connections. See note above.
2584 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2585 result = 1;
2586 if ( ports[ stream_.channelOffset[1] + i ] )
2587 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2588 if ( result ) {
2589 free( ports );
2590 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2591 goto unlock;
2592 }
2593 }
2594 free(ports);
2595 }
2596
2597 handle->drainCounter = 0;
2598 handle->internalDrain = false;
2599 stream_.state = STREAM_RUNNING;
2600
2601 unlock:
2602 if ( result == 0 ) return;
2603 error( RtAudioError::SYSTEM_ERROR );
2604 }
2605
stopStream(void)2606 void RtApiJack :: stopStream( void )
2607 {
2608 verifyStream();
2609 if ( stream_.state == STREAM_STOPPED ) {
2610 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2611 error( RtAudioError::WARNING );
2612 return;
2613 }
2614
2615 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2616 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2617
2618 if ( handle->drainCounter == 0 ) {
2619 handle->drainCounter = 2;
2620 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2621 }
2622 }
2623
2624 jack_deactivate( handle->client );
2625 stream_.state = STREAM_STOPPED;
2626 }
2627
abortStream(void)2628 void RtApiJack :: abortStream( void )
2629 {
2630 verifyStream();
2631 if ( stream_.state == STREAM_STOPPED ) {
2632 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2633 error( RtAudioError::WARNING );
2634 return;
2635 }
2636
2637 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2638 handle->drainCounter = 2;
2639
2640 stopStream();
2641 }
2642
2643 // This function will be called by a spawned thread when the user
2644 // callback function signals that the stream should be stopped or
2645 // aborted. It is necessary to handle it this way because the
2646 // callbackEvent() function must return before the jack_deactivate()
2647 // function will return.
jackStopStream(void * ptr)2648 static void *jackStopStream( void *ptr )
2649 {
2650 CallbackInfo *info = (CallbackInfo *) ptr;
2651 RtApiJack *object = (RtApiJack *) info->object;
2652
2653 object->stopStream();
2654 pthread_exit( NULL );
2655 }
2656
callbackEvent(unsigned long nframes)2657 bool RtApiJack :: callbackEvent( unsigned long nframes )
2658 {
2659 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2660 if ( stream_.state == STREAM_CLOSED ) {
2661 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2662 error( RtAudioError::WARNING );
2663 return FAILURE;
2664 }
2665 if ( stream_.bufferSize != nframes ) {
2666 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2667 error( RtAudioError::WARNING );
2668 return FAILURE;
2669 }
2670
2671 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2672 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2673
2674 // Check if we were draining the stream and signal is finished.
2675 if ( handle->drainCounter > 3 ) {
2676 ThreadHandle threadId;
2677
2678 stream_.state = STREAM_STOPPING;
2679 if ( handle->internalDrain == true )
2680 pthread_create( &threadId, NULL, jackStopStream, info );
2681 else
2682 pthread_cond_signal( &handle->condition );
2683 return SUCCESS;
2684 }
2685
2686 // Invoke user callback first, to get fresh output data.
2687 if ( handle->drainCounter == 0 ) {
2688 RtAudioCallback callback = (RtAudioCallback) info->callback;
2689 double streamTime = getStreamTime();
2690 RtAudioStreamStatus status = 0;
2691 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2692 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2693 handle->xrun[0] = false;
2694 }
2695 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2696 status |= RTAUDIO_INPUT_OVERFLOW;
2697 handle->xrun[1] = false;
2698 }
2699 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2700 stream_.bufferSize, streamTime, status, info->userData );
2701 if ( cbReturnValue == 2 ) {
2702 stream_.state = STREAM_STOPPING;
2703 handle->drainCounter = 2;
2704 ThreadHandle id;
2705 pthread_create( &id, NULL, jackStopStream, info );
2706 return SUCCESS;
2707 }
2708 else if ( cbReturnValue == 1 ) {
2709 handle->drainCounter = 1;
2710 handle->internalDrain = true;
2711 }
2712 }
2713
2714 jack_default_audio_sample_t *jackbuffer;
2715 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2716 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2717
2718 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2719
2720 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2721 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2722 memset( jackbuffer, 0, bufferBytes );
2723 }
2724
2725 }
2726 else if ( stream_.doConvertBuffer[0] ) {
2727
2728 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2729
2730 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2731 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2732 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2733 }
2734 }
2735 else { // no buffer conversion
2736 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2737 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2738 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2739 }
2740 }
2741 }
2742
2743 // Don't bother draining input
2744 if ( handle->drainCounter ) {
2745 handle->drainCounter++;
2746 goto unlock;
2747 }
2748
2749 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2750
2751 if ( stream_.doConvertBuffer[1] ) {
2752 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2755 }
2756 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2757 }
2758 else { // no buffer conversion
2759 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2760 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2761 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2762 }
2763 }
2764 }
2765
2766 unlock:
2767 RtApi::tickStreamTime();
2768 return SUCCESS;
2769 }
2770 //******************** End of __UNIX_JACK__ *********************//
2771 #endif
2772
2773 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2774
2775 // The ASIO API is designed around a callback scheme, so this
2776 // implementation is similar to that used for OS-X CoreAudio and Linux
2777 // Jack. The primary constraint with ASIO is that it only allows
2778 // access to a single driver at a time. Thus, it is not possible to
2779 // have more than one simultaneous RtAudio stream.
2780 //
2781 // This implementation also requires a number of external ASIO files
2782 // and a few global variables. The ASIO callback scheme does not
2783 // allow for the passing of user data, so we must create a global
2784 // pointer to our callbackInfo structure.
2785 //
2786 // On unix systems, we make use of a pthread condition variable.
2787 // Since there is no equivalent in Windows, I hacked something based
2788 // on information found in
2789 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2790
2791 #include "asiosys.h"
2792 #include "asio.h"
2793 #include "iasiothiscallresolver.h"
2794 #include "asiodrivers.h"
2795 #include <cmath>
2796
2797 static AsioDrivers drivers;
2798 static ASIOCallbacks asioCallbacks;
2799 static ASIODriverInfo driverInfo;
2800 static CallbackInfo *asioCallbackInfo;
2801 static bool asioXRun;
2802
2803 struct AsioHandle {
2804 int drainCounter; // Tracks callback counts when draining
2805 bool internalDrain; // Indicates if stop is initiated from callback or not.
2806 ASIOBufferInfo *bufferInfos;
2807 HANDLE condition;
2808
AsioHandleAsioHandle2809 AsioHandle()
2810 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2811 };
2812
2813 // Function declarations (definitions at end of section)
2814 static const char* getAsioErrorString( ASIOError result );
2815 static void sampleRateChanged( ASIOSampleRate sRate );
2816 static long asioMessages( long selector, long value, void* message, double* opt );
2817
RtApiAsio()2818 RtApiAsio :: RtApiAsio()
2819 {
2820 // ASIO cannot run on a multi-threaded appartment. You can call
2821 // CoInitialize beforehand, but it must be for appartment threading
2822 // (in which case, CoInitilialize will return S_FALSE here).
2823 coInitialized_ = false;
2824 HRESULT hr = CoInitialize( NULL );
2825 if ( FAILED(hr) ) {
2826 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2827 error( RtAudioError::WARNING );
2828 }
2829 coInitialized_ = true;
2830
2831 drivers.removeCurrentDriver();
2832 driverInfo.asioVersion = 2;
2833
2834 // See note in DirectSound implementation about GetDesktopWindow().
2835 driverInfo.sysRef = GetForegroundWindow();
2836 }
2837
~RtApiAsio()2838 RtApiAsio :: ~RtApiAsio()
2839 {
2840 if ( stream_.state != STREAM_CLOSED ) closeStream();
2841 if ( coInitialized_ ) CoUninitialize();
2842 }
2843
getDeviceCount(void)2844 unsigned int RtApiAsio :: getDeviceCount( void )
2845 {
2846 return (unsigned int) drivers.asioGetNumDev();
2847 }
2848
getDeviceInfo(unsigned int device)2849 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2850 {
2851 RtAudio::DeviceInfo info;
2852 info.probed = false;
2853
2854 // Get device ID
2855 unsigned int nDevices = getDeviceCount();
2856 if ( nDevices == 0 ) {
2857 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2858 error( RtAudioError::INVALID_USE );
2859 return info;
2860 }
2861
2862 if ( device >= nDevices ) {
2863 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2864 error( RtAudioError::INVALID_USE );
2865 return info;
2866 }
2867
2868 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2869 if ( stream_.state != STREAM_CLOSED ) {
2870 if ( device >= devices_.size() ) {
2871 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2872 error( RtAudioError::WARNING );
2873 return info;
2874 }
2875 return devices_[ device ];
2876 }
2877
2878 char driverName[32];
2879 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2880 if ( result != ASE_OK ) {
2881 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2882 errorText_ = errorStream_.str();
2883 error( RtAudioError::WARNING );
2884 return info;
2885 }
2886
2887 info.name = driverName;
2888
2889 if ( !drivers.loadDriver( driverName ) ) {
2890 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2891 errorText_ = errorStream_.str();
2892 error( RtAudioError::WARNING );
2893 return info;
2894 }
2895
2896 result = ASIOInit( &driverInfo );
2897 if ( result != ASE_OK ) {
2898 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2899 errorText_ = errorStream_.str();
2900 error( RtAudioError::WARNING );
2901 return info;
2902 }
2903
2904 // Determine the device channel information.
2905 long inputChannels, outputChannels;
2906 result = ASIOGetChannels( &inputChannels, &outputChannels );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2912 return info;
2913 }
2914
2915 info.outputChannels = outputChannels;
2916 info.inputChannels = inputChannels;
2917 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2918 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2919
2920 // Determine the supported sample rates.
2921 info.sampleRates.clear();
2922 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2923 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2924 if ( result == ASE_OK ) {
2925 info.sampleRates.push_back( SAMPLE_RATES[i] );
2926
2927 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2928 info.preferredSampleRate = SAMPLE_RATES[i];
2929 }
2930 }
2931
2932 // Determine supported data types ... just check first channel and assume rest are the same.
2933 ASIOChannelInfo channelInfo;
2934 channelInfo.channel = 0;
2935 channelInfo.isInput = true;
2936 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2937 result = ASIOGetChannelInfo( &channelInfo );
2938 if ( result != ASE_OK ) {
2939 drivers.removeCurrentDriver();
2940 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2941 errorText_ = errorStream_.str();
2942 error( RtAudioError::WARNING );
2943 return info;
2944 }
2945
2946 info.nativeFormats = 0;
2947 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2948 info.nativeFormats |= RTAUDIO_SINT16;
2949 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2950 info.nativeFormats |= RTAUDIO_SINT32;
2951 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2952 info.nativeFormats |= RTAUDIO_FLOAT32;
2953 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2954 info.nativeFormats |= RTAUDIO_FLOAT64;
2955 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2956 info.nativeFormats |= RTAUDIO_SINT24;
2957
2958 if ( info.outputChannels > 0 )
2959 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2960 if ( info.inputChannels > 0 )
2961 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2962
2963 info.probed = true;
2964 drivers.removeCurrentDriver();
2965 return info;
2966 }
2967
bufferSwitch(long index,ASIOBool)2968 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2969 {
2970 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2971 object->callbackEvent( index );
2972 }
2973
saveDeviceInfo(void)2974 void RtApiAsio :: saveDeviceInfo( void )
2975 {
2976 devices_.clear();
2977
2978 unsigned int nDevices = getDeviceCount();
2979 devices_.resize( nDevices );
2980 for ( unsigned int i=0; i<nDevices; i++ )
2981 devices_[i] = getDeviceInfo( i );
2982 }
2983
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2984 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2985 unsigned int firstChannel, unsigned int sampleRate,
2986 RtAudioFormat format, unsigned int *bufferSize,
2987 RtAudio::StreamOptions *options )
2988 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2989
2990 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2991
2992 // For ASIO, a duplex stream MUST use the same driver.
2993 if ( isDuplexInput && stream_.device[0] != device ) {
2994 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2995 return FAILURE;
2996 }
2997
2998 char driverName[32];
2999 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3000 if ( result != ASE_OK ) {
3001 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3002 errorText_ = errorStream_.str();
3003 return FAILURE;
3004 }
3005
3006 // Only load the driver once for duplex stream.
3007 if ( !isDuplexInput ) {
3008 // The getDeviceInfo() function will not work when a stream is open
3009 // because ASIO does not allow multiple devices to run at the same
3010 // time. Thus, we'll probe the system before opening a stream and
3011 // save the results for use by getDeviceInfo().
3012 this->saveDeviceInfo();
3013
3014 if ( !drivers.loadDriver( driverName ) ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3017 return FAILURE;
3018 }
3019
3020 result = ASIOInit( &driverInfo );
3021 if ( result != ASE_OK ) {
3022 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3023 errorText_ = errorStream_.str();
3024 return FAILURE;
3025 }
3026 }
3027
3028 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3029 bool buffersAllocated = false;
3030 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3031 unsigned int nChannels;
3032
3033
3034 // Check the device channel count.
3035 long inputChannels, outputChannels;
3036 result = ASIOGetChannels( &inputChannels, &outputChannels );
3037 if ( result != ASE_OK ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3039 errorText_ = errorStream_.str();
3040 goto error;
3041 }
3042
3043 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3044 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3046 errorText_ = errorStream_.str();
3047 goto error;
3048 }
3049 stream_.nDeviceChannels[mode] = channels;
3050 stream_.nUserChannels[mode] = channels;
3051 stream_.channelOffset[mode] = firstChannel;
3052
3053 // Verify the sample rate is supported.
3054 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3057 errorText_ = errorStream_.str();
3058 goto error;
3059 }
3060
3061 // Get the current sample rate
3062 ASIOSampleRate currentRate;
3063 result = ASIOGetSampleRate( ¤tRate );
3064 if ( result != ASE_OK ) {
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3066 errorText_ = errorStream_.str();
3067 goto error;
3068 }
3069
3070 // Set the sample rate only if necessary
3071 if ( currentRate != sampleRate ) {
3072 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3073 if ( result != ASE_OK ) {
3074 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3075 errorText_ = errorStream_.str();
3076 goto error;
3077 }
3078 }
3079
3080 // Determine the driver data type.
3081 ASIOChannelInfo channelInfo;
3082 channelInfo.channel = 0;
3083 if ( mode == OUTPUT ) channelInfo.isInput = false;
3084 else channelInfo.isInput = true;
3085 result = ASIOGetChannelInfo( &channelInfo );
3086 if ( result != ASE_OK ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3088 errorText_ = errorStream_.str();
3089 goto error;
3090 }
3091
3092 // Assuming WINDOWS host is always little-endian.
3093 stream_.doByteSwap[mode] = false;
3094 stream_.userFormat = format;
3095 stream_.deviceFormat[mode] = 0;
3096 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3097 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3098 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3099 }
3100 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3101 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3102 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3103 }
3104 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3105 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3106 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3107 }
3108 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3109 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3110 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3111 }
3112 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3113 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3114 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3115 }
3116
3117 if ( stream_.deviceFormat[mode] == 0 ) {
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3119 errorText_ = errorStream_.str();
3120 goto error;
3121 }
3122
3123 // Set the buffer size. For a duplex stream, this will end up
3124 // setting the buffer size based on the input constraints, which
3125 // should be ok.
3126 long minSize, maxSize, preferSize, granularity;
3127 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3128 if ( result != ASE_OK ) {
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3130 errorText_ = errorStream_.str();
3131 goto error;
3132 }
3133
3134 if ( isDuplexInput ) {
3135 // When this is the duplex input (output was opened before), then we have to use the same
3136 // buffersize as the output, because it might use the preferred buffer size, which most
3137 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3138 // So instead of throwing an error, make them equal. The caller uses the reference
3139 // to the "bufferSize" param as usual to set up processing buffers.
3140
3141 *bufferSize = stream_.bufferSize;
3142
3143 } else {
3144 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3145 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3146 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3147 else if ( granularity == -1 ) {
3148 // Make sure bufferSize is a power of two.
3149 int log2_of_min_size = 0;
3150 int log2_of_max_size = 0;
3151
3152 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3153 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3154 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3155 }
3156
3157 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3158 int min_delta_num = log2_of_min_size;
3159
3160 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3161 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3162 if (current_delta < min_delta) {
3163 min_delta = current_delta;
3164 min_delta_num = i;
3165 }
3166 }
3167
3168 *bufferSize = ( (unsigned int)1 << min_delta_num );
3169 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3170 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3171 }
3172 else if ( granularity != 0 ) {
3173 // Set to an even multiple of granularity, rounding up.
3174 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3175 }
3176 }
3177
3178 /*
3179 // we don't use it anymore, see above!
3180 // Just left it here for the case...
3181 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3182 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3183 goto error;
3184 }
3185 */
3186
3187 stream_.bufferSize = *bufferSize;
3188 stream_.nBuffers = 2;
3189
3190 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3191 else stream_.userInterleaved = true;
3192
3193 // ASIO always uses non-interleaved buffers.
3194 stream_.deviceInterleaved[mode] = false;
3195
3196 // Allocate, if necessary, our AsioHandle structure for the stream.
3197 if ( handle == 0 ) {
3198 try {
3199 handle = new AsioHandle;
3200 }
3201 catch ( std::bad_alloc& ) {
3202 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3203 goto error;
3204 }
3205 handle->bufferInfos = 0;
3206
3207 // Create a manual-reset event.
3208 handle->condition = CreateEvent( NULL, // no security
3209 TRUE, // manual-reset
3210 FALSE, // non-signaled initially
3211 NULL ); // unnamed
3212 stream_.apiHandle = (void *) handle;
3213 }
3214
3215 // Create the ASIO internal buffers. Since RtAudio sets up input
3216 // and output separately, we'll have to dispose of previously
3217 // created output buffers for a duplex stream.
3218 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3219 ASIODisposeBuffers();
3220 if ( handle->bufferInfos ) free( handle->bufferInfos );
3221 }
3222
3223 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3224 unsigned int i;
3225 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3226 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3227 if ( handle->bufferInfos == NULL ) {
3228 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3229 errorText_ = errorStream_.str();
3230 goto error;
3231 }
3232
3233 ASIOBufferInfo *infos;
3234 infos = handle->bufferInfos;
3235 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3236 infos->isInput = ASIOFalse;
3237 infos->channelNum = i + stream_.channelOffset[0];
3238 infos->buffers[0] = infos->buffers[1] = 0;
3239 }
3240 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3241 infos->isInput = ASIOTrue;
3242 infos->channelNum = i + stream_.channelOffset[1];
3243 infos->buffers[0] = infos->buffers[1] = 0;
3244 }
3245
3246 // prepare for callbacks
3247 stream_.sampleRate = sampleRate;
3248 stream_.device[mode] = device;
3249 stream_.mode = isDuplexInput ? DUPLEX : mode;
3250
3251 // store this class instance before registering callbacks, that are going to use it
3252 asioCallbackInfo = &stream_.callbackInfo;
3253 stream_.callbackInfo.object = (void *) this;
3254
3255 // Set up the ASIO callback structure and create the ASIO data buffers.
3256 asioCallbacks.bufferSwitch = &bufferSwitch;
3257 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3258 asioCallbacks.asioMessage = &asioMessages;
3259 asioCallbacks.bufferSwitchTimeInfo = NULL;
3260 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3261 if ( result != ASE_OK ) {
3262 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3263 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3264 // In that case, let's be naïve and try that instead.
3265 *bufferSize = preferSize;
3266 stream_.bufferSize = *bufferSize;
3267 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3268 }
3269
3270 if ( result != ASE_OK ) {
3271 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3272 errorText_ = errorStream_.str();
3273 goto error;
3274 }
3275 buffersAllocated = true;
3276 stream_.state = STREAM_STOPPED;
3277
3278 // Set flags for buffer conversion.
3279 stream_.doConvertBuffer[mode] = false;
3280 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3281 stream_.doConvertBuffer[mode] = true;
3282 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3283 stream_.nUserChannels[mode] > 1 )
3284 stream_.doConvertBuffer[mode] = true;
3285
3286 // Allocate necessary internal buffers
3287 unsigned long bufferBytes;
3288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3290 if ( stream_.userBuffer[mode] == NULL ) {
3291 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3292 goto error;
3293 }
3294
3295 if ( stream_.doConvertBuffer[mode] ) {
3296
3297 bool makeBuffer = true;
3298 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3299 if ( isDuplexInput && stream_.deviceBuffer ) {
3300 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3301 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3302 }
3303
3304 if ( makeBuffer ) {
3305 bufferBytes *= *bufferSize;
3306 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3308 if ( stream_.deviceBuffer == NULL ) {
3309 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3310 goto error;
3311 }
3312 }
3313 }
3314
3315 // Determine device latencies
3316 long inputLatency, outputLatency;
3317 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3318 if ( result != ASE_OK ) {
3319 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3320 errorText_ = errorStream_.str();
3321 error( RtAudioError::WARNING); // warn but don't fail
3322 }
3323 else {
3324 stream_.latency[0] = outputLatency;
3325 stream_.latency[1] = inputLatency;
3326 }
3327
3328 // Setup the buffer conversion information structure. We don't use
3329 // buffers to do channel offsets, so we override that parameter
3330 // here.
3331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3332
3333 return SUCCESS;
3334
3335 error:
3336 if ( !isDuplexInput ) {
3337 // the cleanup for error in the duplex input, is done by RtApi::openStream
3338 // So we clean up for single channel only
3339
3340 if ( buffersAllocated )
3341 ASIODisposeBuffers();
3342
3343 drivers.removeCurrentDriver();
3344
3345 if ( handle ) {
3346 CloseHandle( handle->condition );
3347 if ( handle->bufferInfos )
3348 free( handle->bufferInfos );
3349
3350 delete handle;
3351 stream_.apiHandle = 0;
3352 }
3353
3354
3355 if ( stream_.userBuffer[mode] ) {
3356 free( stream_.userBuffer[mode] );
3357 stream_.userBuffer[mode] = 0;
3358 }
3359
3360 if ( stream_.deviceBuffer ) {
3361 free( stream_.deviceBuffer );
3362 stream_.deviceBuffer = 0;
3363 }
3364 }
3365
3366 return FAILURE;
3367 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3368
closeStream()3369 void RtApiAsio :: closeStream()
3370 {
3371 if ( stream_.state == STREAM_CLOSED ) {
3372 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3373 error( RtAudioError::WARNING );
3374 return;
3375 }
3376
3377 if ( stream_.state == STREAM_RUNNING ) {
3378 stream_.state = STREAM_STOPPED;
3379 ASIOStop();
3380 }
3381 ASIODisposeBuffers();
3382 drivers.removeCurrentDriver();
3383
3384 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3385 if ( handle ) {
3386 CloseHandle( handle->condition );
3387 if ( handle->bufferInfos )
3388 free( handle->bufferInfos );
3389 delete handle;
3390 stream_.apiHandle = 0;
3391 }
3392
3393 for ( int i=0; i<2; i++ ) {
3394 if ( stream_.userBuffer[i] ) {
3395 free( stream_.userBuffer[i] );
3396 stream_.userBuffer[i] = 0;
3397 }
3398 }
3399
3400 if ( stream_.deviceBuffer ) {
3401 free( stream_.deviceBuffer );
3402 stream_.deviceBuffer = 0;
3403 }
3404
3405 stream_.mode = UNINITIALIZED;
3406 stream_.state = STREAM_CLOSED;
3407 }
3408
3409 bool stopThreadCalled = false;
3410
startStream()3411 void RtApiAsio :: startStream()
3412 {
3413 verifyStream();
3414 if ( stream_.state == STREAM_RUNNING ) {
3415 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3416 error( RtAudioError::WARNING );
3417 return;
3418 }
3419
3420 #if defined( HAVE_GETTIMEOFDAY )
3421 gettimeofday( &stream_.lastTickTimestamp, NULL );
3422 #endif
3423
3424 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3425 ASIOError result = ASIOStart();
3426 if ( result != ASE_OK ) {
3427 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3428 errorText_ = errorStream_.str();
3429 goto unlock;
3430 }
3431
3432 handle->drainCounter = 0;
3433 handle->internalDrain = false;
3434 ResetEvent( handle->condition );
3435 stream_.state = STREAM_RUNNING;
3436 asioXRun = false;
3437
3438 unlock:
3439 stopThreadCalled = false;
3440
3441 if ( result == ASE_OK ) return;
3442 error( RtAudioError::SYSTEM_ERROR );
3443 }
3444
stopStream()3445 void RtApiAsio :: stopStream()
3446 {
3447 verifyStream();
3448 if ( stream_.state == STREAM_STOPPED ) {
3449 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3450 error( RtAudioError::WARNING );
3451 return;
3452 }
3453
3454 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3455 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3456 if ( handle->drainCounter == 0 ) {
3457 handle->drainCounter = 2;
3458 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3459 }
3460 }
3461
3462 stream_.state = STREAM_STOPPED;
3463
3464 ASIOError result = ASIOStop();
3465 if ( result != ASE_OK ) {
3466 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3467 errorText_ = errorStream_.str();
3468 }
3469
3470 if ( result == ASE_OK ) return;
3471 error( RtAudioError::SYSTEM_ERROR );
3472 }
3473
abortStream()3474 void RtApiAsio :: abortStream()
3475 {
3476 verifyStream();
3477 if ( stream_.state == STREAM_STOPPED ) {
3478 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3479 error( RtAudioError::WARNING );
3480 return;
3481 }
3482
3483 // The following lines were commented-out because some behavior was
3484 // noted where the device buffers need to be zeroed to avoid
3485 // continuing sound, even when the device buffers are completely
3486 // disposed. So now, calling abort is the same as calling stop.
3487 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // handle->drainCounter = 2;
3489 stopStream();
3490 }
3491
3492 // This function will be called by a spawned thread when the user
3493 // callback function signals that the stream should be stopped or
3494 // aborted. It is necessary to handle it this way because the
3495 // callbackEvent() function must return before the ASIOStop()
3496 // function will return.
asioStopStream(void * ptr)3497 static unsigned __stdcall asioStopStream( void *ptr )
3498 {
3499 CallbackInfo *info = (CallbackInfo *) ptr;
3500 RtApiAsio *object = (RtApiAsio *) info->object;
3501
3502 object->stopStream();
3503 _endthreadex( 0 );
3504 return 0;
3505 }
3506
callbackEvent(long bufferIndex)3507 bool RtApiAsio :: callbackEvent( long bufferIndex )
3508 {
3509 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3510 if ( stream_.state == STREAM_CLOSED ) {
3511 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3512 error( RtAudioError::WARNING );
3513 return FAILURE;
3514 }
3515
3516 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3517 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3518
3519 // Check if we were draining the stream and signal if finished.
3520 if ( handle->drainCounter > 3 ) {
3521
3522 stream_.state = STREAM_STOPPING;
3523 if ( handle->internalDrain == false )
3524 SetEvent( handle->condition );
3525 else { // spawn a thread to stop the stream
3526 unsigned threadId;
3527 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3528 &stream_.callbackInfo, 0, &threadId );
3529 }
3530 return SUCCESS;
3531 }
3532
3533 // Invoke user callback to get fresh output data UNLESS we are
3534 // draining stream.
3535 if ( handle->drainCounter == 0 ) {
3536 RtAudioCallback callback = (RtAudioCallback) info->callback;
3537 double streamTime = getStreamTime();
3538 RtAudioStreamStatus status = 0;
3539 if ( stream_.mode != INPUT && asioXRun == true ) {
3540 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3541 asioXRun = false;
3542 }
3543 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3544 status |= RTAUDIO_INPUT_OVERFLOW;
3545 asioXRun = false;
3546 }
3547 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3548 stream_.bufferSize, streamTime, status, info->userData );
3549 if ( cbReturnValue == 2 ) {
3550 stream_.state = STREAM_STOPPING;
3551 handle->drainCounter = 2;
3552 unsigned threadId;
3553 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3554 &stream_.callbackInfo, 0, &threadId );
3555 return SUCCESS;
3556 }
3557 else if ( cbReturnValue == 1 ) {
3558 handle->drainCounter = 1;
3559 handle->internalDrain = true;
3560 }
3561 }
3562
3563 unsigned int nChannels, bufferBytes, i, j;
3564 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3565 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3566
3567 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3568
3569 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3570
3571 for ( i=0, j=0; i<nChannels; i++ ) {
3572 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3573 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3574 }
3575
3576 }
3577 else if ( stream_.doConvertBuffer[0] ) {
3578
3579 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3580 if ( stream_.doByteSwap[0] )
3581 byteSwapBuffer( stream_.deviceBuffer,
3582 stream_.bufferSize * stream_.nDeviceChannels[0],
3583 stream_.deviceFormat[0] );
3584
3585 for ( i=0, j=0; i<nChannels; i++ ) {
3586 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3587 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3588 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3589 }
3590
3591 }
3592 else {
3593
3594 if ( stream_.doByteSwap[0] )
3595 byteSwapBuffer( stream_.userBuffer[0],
3596 stream_.bufferSize * stream_.nUserChannels[0],
3597 stream_.userFormat );
3598
3599 for ( i=0, j=0; i<nChannels; i++ ) {
3600 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3601 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3602 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3603 }
3604
3605 }
3606 }
3607
3608 // Don't bother draining input
3609 if ( handle->drainCounter ) {
3610 handle->drainCounter++;
3611 goto unlock;
3612 }
3613
3614 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3615
3616 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3617
3618 if (stream_.doConvertBuffer[1]) {
3619
3620 // Always interleave ASIO input data.
3621 for ( i=0, j=0; i<nChannels; i++ ) {
3622 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3623 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3624 handle->bufferInfos[i].buffers[bufferIndex],
3625 bufferBytes );
3626 }
3627
3628 if ( stream_.doByteSwap[1] )
3629 byteSwapBuffer( stream_.deviceBuffer,
3630 stream_.bufferSize * stream_.nDeviceChannels[1],
3631 stream_.deviceFormat[1] );
3632 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3633
3634 }
3635 else {
3636 for ( i=0, j=0; i<nChannels; i++ ) {
3637 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3638 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3639 handle->bufferInfos[i].buffers[bufferIndex],
3640 bufferBytes );
3641 }
3642 }
3643
3644 if ( stream_.doByteSwap[1] )
3645 byteSwapBuffer( stream_.userBuffer[1],
3646 stream_.bufferSize * stream_.nUserChannels[1],
3647 stream_.userFormat );
3648 }
3649 }
3650
3651 unlock:
3652 // The following call was suggested by Malte Clasen. While the API
3653 // documentation indicates it should not be required, some device
3654 // drivers apparently do not function correctly without it.
3655 ASIOOutputReady();
3656
3657 RtApi::tickStreamTime();
3658 return SUCCESS;
3659 }
3660
sampleRateChanged(ASIOSampleRate sRate)3661 static void sampleRateChanged( ASIOSampleRate sRate )
3662 {
3663 // The ASIO documentation says that this usually only happens during
3664 // external sync. Audio processing is not stopped by the driver,
3665 // actual sample rate might not have even changed, maybe only the
3666 // sample rate status of an AES/EBU or S/PDIF digital input at the
3667 // audio device.
3668
3669 RtApi *object = (RtApi *) asioCallbackInfo->object;
3670 try {
3671 object->stopStream();
3672 }
3673 catch ( RtAudioError &exception ) {
3674 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3675 return;
3676 }
3677
3678 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3679 }
3680
asioMessages(long selector,long value,void *,double *)3681 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3682 {
3683 long ret = 0;
3684
3685 switch( selector ) {
3686 case kAsioSelectorSupported:
3687 if ( value == kAsioResetRequest
3688 || value == kAsioEngineVersion
3689 || value == kAsioResyncRequest
3690 || value == kAsioLatenciesChanged
3691 // The following three were added for ASIO 2.0, you don't
3692 // necessarily have to support them.
3693 || value == kAsioSupportsTimeInfo
3694 || value == kAsioSupportsTimeCode
3695 || value == kAsioSupportsInputMonitor)
3696 ret = 1L;
3697 break;
3698 case kAsioResetRequest:
3699 // Defer the task and perform the reset of the driver during the
3700 // next "safe" situation. You cannot reset the driver right now,
3701 // as this code is called from the driver. Reset the driver is
3702 // done by completely destruct is. I.e. ASIOStop(),
3703 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3704 // driver again.
3705 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3706 ret = 1L;
3707 break;
3708 case kAsioResyncRequest:
3709 // This informs the application that the driver encountered some
3710 // non-fatal data loss. It is used for synchronization purposes
3711 // of different media. Added mainly to work around the Win16Mutex
3712 // problems in Windows 95/98 with the Windows Multimedia system,
3713 // which could lose data because the Mutex was held too long by
3714 // another thread. However a driver can issue it in other
3715 // situations, too.
3716 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3717 asioXRun = true;
3718 ret = 1L;
3719 break;
3720 case kAsioLatenciesChanged:
3721 // This will inform the host application that the drivers were
3722 // latencies changed. Beware, it this does not mean that the
3723 // buffer sizes have changed! You might need to update internal
3724 // delay data.
3725 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3726 ret = 1L;
3727 break;
3728 case kAsioEngineVersion:
3729 // Return the supported ASIO version of the host application. If
3730 // a host application does not implement this selector, ASIO 1.0
3731 // is assumed by the driver.
3732 ret = 2L;
3733 break;
3734 case kAsioSupportsTimeInfo:
3735 // Informs the driver whether the
3736 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3737 // For compatibility with ASIO 1.0 drivers the host application
3738 // should always support the "old" bufferSwitch method, too.
3739 ret = 0;
3740 break;
3741 case kAsioSupportsTimeCode:
3742 // Informs the driver whether application is interested in time
3743 // code info. If an application does not need to know about time
3744 // code, the driver has less work to do.
3745 ret = 0;
3746 break;
3747 }
3748 return ret;
3749 }
3750
getAsioErrorString(ASIOError result)3751 static const char* getAsioErrorString( ASIOError result )
3752 {
3753 struct Messages
3754 {
3755 ASIOError value;
3756 const char*message;
3757 };
3758
3759 static const Messages m[] =
3760 {
3761 { ASE_NotPresent, "Hardware input or output is not present or available." },
3762 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3763 { ASE_InvalidParameter, "Invalid input parameter." },
3764 { ASE_InvalidMode, "Invalid mode." },
3765 { ASE_SPNotAdvancing, "Sample position not advancing." },
3766 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3767 { ASE_NoMemory, "Not enough memory to complete the request." }
3768 };
3769
3770 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3771 if ( m[i].value == result ) return m[i].message;
3772
3773 return "Unknown error.";
3774 }
3775
3776 //******************** End of __WINDOWS_ASIO__ *********************//
3777 #endif
3778
3779
3780 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3781
3782 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3783 // - Introduces support for the Windows WASAPI API
3784 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3785 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3786 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3787
3788 #ifndef INITGUID
3789 #define INITGUID
3790 #endif
3791
3792 #include <mfapi.h>
3793 #include <mferror.h>
3794 #include <mfplay.h>
3795 #include <mftransform.h>
3796 #include <wmcodecdsp.h>
3797
3798 #include <audioclient.h>
3799 #include <avrt.h>
3800 #include <mmdeviceapi.h>
3801 #include <functiondiscoverykeys_devpkey.h>
3802
3803 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3804 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3805 #endif
3806
3807 #ifndef MFSTARTUP_NOSOCKET
3808 #define MFSTARTUP_NOSOCKET 0x1
3809 #endif
3810
3811 #ifdef _MSC_VER
3812 #pragma comment( lib, "ksuser" )
3813 #pragma comment( lib, "mfplat.lib" )
3814 #pragma comment( lib, "mfuuid.lib" )
3815 #pragma comment( lib, "wmcodecdspuuid" )
3816 #endif
3817
3818 //=============================================================================
3819
3820 #define SAFE_RELEASE( objectPtr )\
3821 if ( objectPtr )\
3822 {\
3823 objectPtr->Release();\
3824 objectPtr = NULL;\
3825 }
3826
3827 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3828
3829 //-----------------------------------------------------------------------------
3830
3831 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3832 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3833 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3834 // provide intermediate storage for read / write synchronization.
3835 class WasapiBuffer
3836 {
3837 public:
WasapiBuffer()3838 WasapiBuffer()
3839 : buffer_( NULL ),
3840 bufferSize_( 0 ),
3841 inIndex_( 0 ),
3842 outIndex_( 0 ) {}
3843
~WasapiBuffer()3844 ~WasapiBuffer() {
3845 free( buffer_ );
3846 }
3847
3848 // sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)3849 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3850 free( buffer_ );
3851
3852 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3853
3854 bufferSize_ = bufferSize;
3855 inIndex_ = 0;
3856 outIndex_ = 0;
3857 }
3858
3859 // attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3860 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3861 {
3862 if ( !buffer || // incoming buffer is NULL
3863 bufferSize == 0 || // incoming buffer has no data
3864 bufferSize > bufferSize_ ) // incoming buffer too large
3865 {
3866 return false;
3867 }
3868
3869 unsigned int relOutIndex = outIndex_;
3870 unsigned int inIndexEnd = inIndex_ + bufferSize;
3871 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3872 relOutIndex += bufferSize_;
3873 }
3874
3875 // the "IN" index CAN BEGIN at the "OUT" index
3876 // the "IN" index CANNOT END at the "OUT" index
3877 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3878 return false; // not enough space between "in" index and "out" index
3879 }
3880
3881 // copy buffer from external to internal
3882 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3883 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3884 int fromInSize = bufferSize - fromZeroSize;
3885
3886 switch( format )
3887 {
3888 case RTAUDIO_SINT8:
3889 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3890 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3891 break;
3892 case RTAUDIO_SINT16:
3893 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3894 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3895 break;
3896 case RTAUDIO_SINT24:
3897 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3898 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3899 break;
3900 case RTAUDIO_SINT32:
3901 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3902 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3903 break;
3904 case RTAUDIO_FLOAT32:
3905 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3906 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3907 break;
3908 case RTAUDIO_FLOAT64:
3909 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3910 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3911 break;
3912 }
3913
3914 // update "in" index
3915 inIndex_ += bufferSize;
3916 inIndex_ %= bufferSize_;
3917
3918 return true;
3919 }
3920
3921 // attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)3922 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3923 {
3924 if ( !buffer || // incoming buffer is NULL
3925 bufferSize == 0 || // incoming buffer has no data
3926 bufferSize > bufferSize_ ) // incoming buffer too large
3927 {
3928 return false;
3929 }
3930
3931 unsigned int relInIndex = inIndex_;
3932 unsigned int outIndexEnd = outIndex_ + bufferSize;
3933 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3934 relInIndex += bufferSize_;
3935 }
3936
3937 // the "OUT" index CANNOT BEGIN at the "IN" index
3938 // the "OUT" index CAN END at the "IN" index
3939 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3940 return false; // not enough space between "out" index and "in" index
3941 }
3942
3943 // copy buffer from internal to external
3944 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3945 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3946 int fromOutSize = bufferSize - fromZeroSize;
3947
3948 switch( format )
3949 {
3950 case RTAUDIO_SINT8:
3951 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3952 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3953 break;
3954 case RTAUDIO_SINT16:
3955 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3956 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3957 break;
3958 case RTAUDIO_SINT24:
3959 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3960 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3961 break;
3962 case RTAUDIO_SINT32:
3963 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3964 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3965 break;
3966 case RTAUDIO_FLOAT32:
3967 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3968 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3969 break;
3970 case RTAUDIO_FLOAT64:
3971 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3972 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3973 break;
3974 }
3975
3976 // update "out" index
3977 outIndex_ += bufferSize;
3978 outIndex_ %= bufferSize_;
3979
3980 return true;
3981 }
3982
3983 private:
3984 char* buffer_;
3985 unsigned int bufferSize_;
3986 unsigned int inIndex_;
3987 unsigned int outIndex_;
3988 };
3989
3990 //-----------------------------------------------------------------------------
3991
3992 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3993 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3994 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3995 class WasapiResampler
3996 {
3997 public:
WasapiResampler(bool isFloat,unsigned int bitsPerSample,unsigned int channelCount,unsigned int inSampleRate,unsigned int outSampleRate)3998 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3999 unsigned int inSampleRate, unsigned int outSampleRate )
4000 : _bytesPerSample( bitsPerSample / 8 )
4001 , _channelCount( channelCount )
4002 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4003 , _transformUnk( NULL )
4004 , _transform( NULL )
4005 , _mediaType( NULL )
4006 , _inputMediaType( NULL )
4007 , _outputMediaType( NULL )
4008
4009 #ifdef __IWMResamplerProps_FWD_DEFINED__
4010 , _resamplerProps( NULL )
4011 #endif
4012 {
4013 // 1. Initialization
4014
4015 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4016
4017 // 2. Create Resampler Transform Object
4018
4019 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4020 IID_IUnknown, ( void** ) &_transformUnk );
4021
4022 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4023
4024 #ifdef __IWMResamplerProps_FWD_DEFINED__
4025 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4026 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4027 #endif
4028
4029 // 3. Specify input / output format
4030
4031 MFCreateMediaType( &_mediaType );
4032 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4033 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4034 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4035 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4036 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4037 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4038 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4039 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4040
4041 MFCreateMediaType( &_inputMediaType );
4042 _mediaType->CopyAllItems( _inputMediaType );
4043
4044 _transform->SetInputType( 0, _inputMediaType, 0 );
4045
4046 MFCreateMediaType( &_outputMediaType );
4047 _mediaType->CopyAllItems( _outputMediaType );
4048
4049 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4050 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4051
4052 _transform->SetOutputType( 0, _outputMediaType, 0 );
4053
4054 // 4. Send stream start messages to Resampler
4055
4056 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4057 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4058 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4059 }
4060
~WasapiResampler()4061 ~WasapiResampler()
4062 {
4063 // 8. Send stream stop messages to Resampler
4064
4065 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4066 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4067
4068 // 9. Cleanup
4069
4070 MFShutdown();
4071
4072 SAFE_RELEASE( _transformUnk );
4073 SAFE_RELEASE( _transform );
4074 SAFE_RELEASE( _mediaType );
4075 SAFE_RELEASE( _inputMediaType );
4076 SAFE_RELEASE( _outputMediaType );
4077
4078 #ifdef __IWMResamplerProps_FWD_DEFINED__
4079 SAFE_RELEASE( _resamplerProps );
4080 #endif
4081 }
4082
Convert(char * outBuffer,const char * inBuffer,unsigned int inSampleCount,unsigned int & outSampleCount,int maxOutSampleCount=-1)4083 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4084 {
4085 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4086 if ( _sampleRatio == 1 )
4087 {
4088 // no sample rate conversion required
4089 memcpy( outBuffer, inBuffer, inputBufferSize );
4090 outSampleCount = inSampleCount;
4091 return;
4092 }
4093
4094 unsigned int outputBufferSize = 0;
4095 if ( maxOutSampleCount != -1 )
4096 {
4097 outputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4098 }
4099 else
4100 {
4101 outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4102 }
4103
4104 IMFMediaBuffer* rInBuffer;
4105 IMFSample* rInSample;
4106 BYTE* rInByteBuffer = NULL;
4107
4108 // 5. Create Sample object from input data
4109
4110 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4111
4112 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4113 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4114 rInBuffer->Unlock();
4115 rInByteBuffer = NULL;
4116
4117 rInBuffer->SetCurrentLength( inputBufferSize );
4118
4119 MFCreateSample( &rInSample );
4120 rInSample->AddBuffer( rInBuffer );
4121
4122 // 6. Pass input data to Resampler
4123
4124 _transform->ProcessInput( 0, rInSample, 0 );
4125
4126 SAFE_RELEASE( rInBuffer );
4127 SAFE_RELEASE( rInSample );
4128
4129 // 7. Perform sample rate conversion
4130
4131 IMFMediaBuffer* rOutBuffer = NULL;
4132 BYTE* rOutByteBuffer = NULL;
4133
4134 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4135 DWORD rStatus;
4136 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4137
4138 // 7.1 Create Sample object for output data
4139
4140 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4141 MFCreateSample( &( rOutDataBuffer.pSample ) );
4142 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4143 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4144 rOutDataBuffer.dwStreamID = 0;
4145 rOutDataBuffer.dwStatus = 0;
4146 rOutDataBuffer.pEvents = NULL;
4147
4148 // 7.2 Get output data from Resampler
4149
4150 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4151 {
4152 outSampleCount = 0;
4153 SAFE_RELEASE( rOutBuffer );
4154 SAFE_RELEASE( rOutDataBuffer.pSample );
4155 return;
4156 }
4157
4158 // 7.3 Write output data to outBuffer
4159
4160 SAFE_RELEASE( rOutBuffer );
4161 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4162 rOutBuffer->GetCurrentLength( &rBytes );
4163
4164 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4165 memcpy( outBuffer, rOutByteBuffer, rBytes );
4166 rOutBuffer->Unlock();
4167 rOutByteBuffer = NULL;
4168
4169 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4170 SAFE_RELEASE( rOutBuffer );
4171 SAFE_RELEASE( rOutDataBuffer.pSample );
4172 }
4173
4174 private:
4175 unsigned int _bytesPerSample;
4176 unsigned int _channelCount;
4177 float _sampleRatio;
4178
4179 IUnknown* _transformUnk;
4180 IMFTransform* _transform;
4181 IMFMediaType* _mediaType;
4182 IMFMediaType* _inputMediaType;
4183 IMFMediaType* _outputMediaType;
4184
4185 #ifdef __IWMResamplerProps_FWD_DEFINED__
4186 IWMResamplerProps* _resamplerProps;
4187 #endif
4188 };
4189
4190 //-----------------------------------------------------------------------------
4191
4192 // A structure to hold various information related to the WASAPI implementation.
4193 struct WasapiHandle
4194 {
4195 IAudioClient* captureAudioClient;
4196 IAudioClient* renderAudioClient;
4197 IAudioCaptureClient* captureClient;
4198 IAudioRenderClient* renderClient;
4199 HANDLE captureEvent;
4200 HANDLE renderEvent;
4201
WasapiHandleWasapiHandle4202 WasapiHandle()
4203 : captureAudioClient( NULL ),
4204 renderAudioClient( NULL ),
4205 captureClient( NULL ),
4206 renderClient( NULL ),
4207 captureEvent( NULL ),
4208 renderEvent( NULL ) {}
4209 };
4210
4211 //=============================================================================
4212
RtApiWasapi()4213 RtApiWasapi::RtApiWasapi()
4214 : coInitialized_( false ), deviceEnumerator_( NULL )
4215 {
4216 // WASAPI can run either apartment or multi-threaded
4217 HRESULT hr = CoInitialize( NULL );
4218 if ( !FAILED( hr ) )
4219 coInitialized_ = true;
4220
4221 // Instantiate device enumerator
4222 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4223 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4224 ( void** ) &deviceEnumerator_ );
4225
4226 // If this runs on an old Windows, it will fail. Ignore and proceed.
4227 if ( FAILED( hr ) )
4228 deviceEnumerator_ = NULL;
4229 }
4230
4231 //-----------------------------------------------------------------------------
4232
~RtApiWasapi()4233 RtApiWasapi::~RtApiWasapi()
4234 {
4235 if ( stream_.state != STREAM_CLOSED )
4236 closeStream();
4237
4238 SAFE_RELEASE( deviceEnumerator_ );
4239
4240 // If this object previously called CoInitialize()
4241 if ( coInitialized_ )
4242 CoUninitialize();
4243 }
4244
4245 //=============================================================================
4246
getDeviceCount(void)4247 unsigned int RtApiWasapi::getDeviceCount( void )
4248 {
4249 unsigned int captureDeviceCount = 0;
4250 unsigned int renderDeviceCount = 0;
4251
4252 IMMDeviceCollection* captureDevices = NULL;
4253 IMMDeviceCollection* renderDevices = NULL;
4254
4255 if ( !deviceEnumerator_ )
4256 return 0;
4257
4258 // Count capture devices
4259 errorText_.clear();
4260 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4263 goto Exit;
4264 }
4265
4266 hr = captureDevices->GetCount( &captureDeviceCount );
4267 if ( FAILED( hr ) ) {
4268 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4269 goto Exit;
4270 }
4271
4272 // Count render devices
4273 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4276 goto Exit;
4277 }
4278
4279 hr = renderDevices->GetCount( &renderDeviceCount );
4280 if ( FAILED( hr ) ) {
4281 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4282 goto Exit;
4283 }
4284
4285 Exit:
4286 // release all references
4287 SAFE_RELEASE( captureDevices );
4288 SAFE_RELEASE( renderDevices );
4289
4290 if ( errorText_.empty() )
4291 return captureDeviceCount + renderDeviceCount;
4292
4293 error( RtAudioError::DRIVER_ERROR );
4294 return 0;
4295 }
4296
4297 //-----------------------------------------------------------------------------
4298
getDeviceInfo(unsigned int device)4299 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4300 {
4301 RtAudio::DeviceInfo info;
4302 unsigned int captureDeviceCount = 0;
4303 unsigned int renderDeviceCount = 0;
4304 std::string defaultDeviceName;
4305 bool isCaptureDevice = false;
4306
4307 PROPVARIANT deviceNameProp;
4308 PROPVARIANT defaultDeviceNameProp;
4309
4310 IMMDeviceCollection* captureDevices = NULL;
4311 IMMDeviceCollection* renderDevices = NULL;
4312 IMMDevice* devicePtr = NULL;
4313 IMMDevice* defaultDevicePtr = NULL;
4314 IAudioClient* audioClient = NULL;
4315 IPropertyStore* devicePropStore = NULL;
4316 IPropertyStore* defaultDevicePropStore = NULL;
4317
4318 WAVEFORMATEX* deviceFormat = NULL;
4319 WAVEFORMATEX* closestMatchFormat = NULL;
4320
4321 // probed
4322 info.probed = false;
4323
4324 // Count capture devices
4325 errorText_.clear();
4326 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4327 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4330 goto Exit;
4331 }
4332
4333 hr = captureDevices->GetCount( &captureDeviceCount );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4336 goto Exit;
4337 }
4338
4339 // Count render devices
4340 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4343 goto Exit;
4344 }
4345
4346 hr = renderDevices->GetCount( &renderDeviceCount );
4347 if ( FAILED( hr ) ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4349 goto Exit;
4350 }
4351
4352 // validate device index
4353 if ( device >= captureDeviceCount + renderDeviceCount ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4355 errorType = RtAudioError::INVALID_USE;
4356 goto Exit;
4357 }
4358
4359 // determine whether index falls within capture or render devices
4360 if ( device >= renderDeviceCount ) {
4361 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4364 goto Exit;
4365 }
4366 isCaptureDevice = true;
4367 }
4368 else {
4369 hr = renderDevices->Item( device, &devicePtr );
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4372 goto Exit;
4373 }
4374 isCaptureDevice = false;
4375 }
4376
4377 // get default device name
4378 if ( isCaptureDevice ) {
4379 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4382 goto Exit;
4383 }
4384 }
4385 else {
4386 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4389 goto Exit;
4390 }
4391 }
4392
4393 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4394 if ( FAILED( hr ) ) {
4395 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4396 goto Exit;
4397 }
4398 PropVariantInit( &defaultDeviceNameProp );
4399
4400 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4403 goto Exit;
4404 }
4405
4406 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4407
4408 // name
4409 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4412 goto Exit;
4413 }
4414
4415 PropVariantInit( &deviceNameProp );
4416
4417 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4418 if ( FAILED( hr ) ) {
4419 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4420 goto Exit;
4421 }
4422
4423 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4424
4425 // is default
4426 if ( isCaptureDevice ) {
4427 info.isDefaultInput = info.name == defaultDeviceName;
4428 info.isDefaultOutput = false;
4429 }
4430 else {
4431 info.isDefaultInput = false;
4432 info.isDefaultOutput = info.name == defaultDeviceName;
4433 }
4434
4435 // channel count
4436 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4439 goto Exit;
4440 }
4441
4442 hr = audioClient->GetMixFormat( &deviceFormat );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4445 goto Exit;
4446 }
4447
4448 if ( isCaptureDevice ) {
4449 info.inputChannels = deviceFormat->nChannels;
4450 info.outputChannels = 0;
4451 info.duplexChannels = 0;
4452 }
4453 else {
4454 info.inputChannels = 0;
4455 info.outputChannels = deviceFormat->nChannels;
4456 info.duplexChannels = 0;
4457 }
4458
4459 // sample rates
4460 info.sampleRates.clear();
4461
4462 // allow support for all sample rates as we have a built-in sample rate converter
4463 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4464 info.sampleRates.push_back( SAMPLE_RATES[i] );
4465 }
4466 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4467
4468 // native format
4469 info.nativeFormats = 0;
4470
4471 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4472 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4473 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4474 {
4475 if ( deviceFormat->wBitsPerSample == 32 ) {
4476 info.nativeFormats |= RTAUDIO_FLOAT32;
4477 }
4478 else if ( deviceFormat->wBitsPerSample == 64 ) {
4479 info.nativeFormats |= RTAUDIO_FLOAT64;
4480 }
4481 }
4482 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4483 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4484 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4485 {
4486 if ( deviceFormat->wBitsPerSample == 8 ) {
4487 info.nativeFormats |= RTAUDIO_SINT8;
4488 }
4489 else if ( deviceFormat->wBitsPerSample == 16 ) {
4490 info.nativeFormats |= RTAUDIO_SINT16;
4491 }
4492 else if ( deviceFormat->wBitsPerSample == 24 ) {
4493 info.nativeFormats |= RTAUDIO_SINT24;
4494 }
4495 else if ( deviceFormat->wBitsPerSample == 32 ) {
4496 info.nativeFormats |= RTAUDIO_SINT32;
4497 }
4498 }
4499
4500 // probed
4501 info.probed = true;
4502
4503 Exit:
4504 // release all references
4505 PropVariantClear( &deviceNameProp );
4506 PropVariantClear( &defaultDeviceNameProp );
4507
4508 SAFE_RELEASE( captureDevices );
4509 SAFE_RELEASE( renderDevices );
4510 SAFE_RELEASE( devicePtr );
4511 SAFE_RELEASE( defaultDevicePtr );
4512 SAFE_RELEASE( audioClient );
4513 SAFE_RELEASE( devicePropStore );
4514 SAFE_RELEASE( defaultDevicePropStore );
4515
4516 CoTaskMemFree( deviceFormat );
4517 CoTaskMemFree( closestMatchFormat );
4518
4519 if ( !errorText_.empty() )
4520 error( errorType );
4521 return info;
4522 }
4523
4524 //-----------------------------------------------------------------------------
4525
getDefaultOutputDevice(void)4526 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4527 {
4528 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4529 if ( getDeviceInfo( i ).isDefaultOutput ) {
4530 return i;
4531 }
4532 }
4533
4534 return 0;
4535 }
4536
4537 //-----------------------------------------------------------------------------
4538
getDefaultInputDevice(void)4539 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4540 {
4541 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4542 if ( getDeviceInfo( i ).isDefaultInput ) {
4543 return i;
4544 }
4545 }
4546
4547 return 0;
4548 }
4549
4550 //-----------------------------------------------------------------------------
4551
closeStream(void)4552 void RtApiWasapi::closeStream( void )
4553 {
4554 if ( stream_.state == STREAM_CLOSED ) {
4555 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4556 error( RtAudioError::WARNING );
4557 return;
4558 }
4559
4560 if ( stream_.state != STREAM_STOPPED )
4561 stopStream();
4562
4563 // clean up stream memory
4564 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4565 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4566
4567 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4568 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4569
4570 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4571 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4572
4573 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4574 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4575
4576 delete ( WasapiHandle* ) stream_.apiHandle;
4577 stream_.apiHandle = NULL;
4578
4579 for ( int i = 0; i < 2; i++ ) {
4580 if ( stream_.userBuffer[i] ) {
4581 free( stream_.userBuffer[i] );
4582 stream_.userBuffer[i] = 0;
4583 }
4584 }
4585
4586 if ( stream_.deviceBuffer ) {
4587 free( stream_.deviceBuffer );
4588 stream_.deviceBuffer = 0;
4589 }
4590
4591 // update stream state
4592 stream_.state = STREAM_CLOSED;
4593 }
4594
4595 //-----------------------------------------------------------------------------
4596
startStream(void)4597 void RtApiWasapi::startStream( void )
4598 {
4599 verifyStream();
4600
4601 if ( stream_.state == STREAM_RUNNING ) {
4602 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4603 error( RtAudioError::WARNING );
4604 return;
4605 }
4606
4607 #if defined( HAVE_GETTIMEOFDAY )
4608 gettimeofday( &stream_.lastTickTimestamp, NULL );
4609 #endif
4610
4611 // update stream state
4612 stream_.state = STREAM_RUNNING;
4613
4614 // create WASAPI stream thread
4615 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4616
4617 if ( !stream_.callbackInfo.thread ) {
4618 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4619 error( RtAudioError::THREAD_ERROR );
4620 }
4621 else {
4622 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4623 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4624 }
4625 }
4626
4627 //-----------------------------------------------------------------------------
4628
stopStream(void)4629 void RtApiWasapi::stopStream( void )
4630 {
4631 verifyStream();
4632
4633 if ( stream_.state == STREAM_STOPPED ) {
4634 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4635 error( RtAudioError::WARNING );
4636 return;
4637 }
4638
4639 // inform stream thread by setting stream state to STREAM_STOPPING
4640 stream_.state = STREAM_STOPPING;
4641
4642 // wait until stream thread is stopped
4643 while( stream_.state != STREAM_STOPPED ) {
4644 Sleep( 1 );
4645 }
4646
4647 // Wait for the last buffer to play before stopping.
4648 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4649
4650 // close thread handle
4651 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4652 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4653 error( RtAudioError::THREAD_ERROR );
4654 return;
4655 }
4656
4657 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4658 }
4659
4660 //-----------------------------------------------------------------------------
4661
abortStream(void)4662 void RtApiWasapi::abortStream( void )
4663 {
4664 verifyStream();
4665
4666 if ( stream_.state == STREAM_STOPPED ) {
4667 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4668 error( RtAudioError::WARNING );
4669 return;
4670 }
4671
4672 // inform stream thread by setting stream state to STREAM_STOPPING
4673 stream_.state = STREAM_STOPPING;
4674
4675 // wait until stream thread is stopped
4676 while ( stream_.state != STREAM_STOPPED ) {
4677 Sleep( 1 );
4678 }
4679
4680 // close thread handle
4681 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4682 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4683 error( RtAudioError::THREAD_ERROR );
4684 return;
4685 }
4686
4687 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4688 }
4689
4690 //-----------------------------------------------------------------------------
4691
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4692 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4693 unsigned int firstChannel, unsigned int sampleRate,
4694 RtAudioFormat format, unsigned int* bufferSize,
4695 RtAudio::StreamOptions* options )
4696 {
4697 bool methodResult = FAILURE;
4698 unsigned int captureDeviceCount = 0;
4699 unsigned int renderDeviceCount = 0;
4700
4701 IMMDeviceCollection* captureDevices = NULL;
4702 IMMDeviceCollection* renderDevices = NULL;
4703 IMMDevice* devicePtr = NULL;
4704 WAVEFORMATEX* deviceFormat = NULL;
4705 unsigned int bufferBytes;
4706 stream_.state = STREAM_STOPPED;
4707
4708 // create API Handle if not already created
4709 if ( !stream_.apiHandle )
4710 stream_.apiHandle = ( void* ) new WasapiHandle();
4711
4712 // Count capture devices
4713 errorText_.clear();
4714 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4715 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4716 if ( FAILED( hr ) ) {
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4718 goto Exit;
4719 }
4720
4721 hr = captureDevices->GetCount( &captureDeviceCount );
4722 if ( FAILED( hr ) ) {
4723 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4724 goto Exit;
4725 }
4726
4727 // Count render devices
4728 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4729 if ( FAILED( hr ) ) {
4730 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4731 goto Exit;
4732 }
4733
4734 hr = renderDevices->GetCount( &renderDeviceCount );
4735 if ( FAILED( hr ) ) {
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4737 goto Exit;
4738 }
4739
4740 // validate device index
4741 if ( device >= captureDeviceCount + renderDeviceCount ) {
4742 errorType = RtAudioError::INVALID_USE;
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4744 goto Exit;
4745 }
4746
4747 // if device index falls within capture devices
4748 if ( device >= renderDeviceCount ) {
4749 if ( mode != INPUT ) {
4750 errorType = RtAudioError::INVALID_USE;
4751 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4752 goto Exit;
4753 }
4754
4755 // retrieve captureAudioClient from devicePtr
4756 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4757
4758 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4759 if ( FAILED( hr ) ) {
4760 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4761 goto Exit;
4762 }
4763
4764 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4765 NULL, ( void** ) &captureAudioClient );
4766 if ( FAILED( hr ) ) {
4767 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4768 goto Exit;
4769 }
4770
4771 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4772 if ( FAILED( hr ) ) {
4773 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4774 goto Exit;
4775 }
4776
4777 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4778 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4779 }
4780
4781 // if device index falls within render devices and is configured for loopback
4782 if ( device < renderDeviceCount && mode == INPUT )
4783 {
4784 // if renderAudioClient is not initialised, initialise it now
4785 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786 if ( !renderAudioClient )
4787 {
4788 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4789 }
4790
4791 // retrieve captureAudioClient from devicePtr
4792 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4793
4794 hr = renderDevices->Item( device, &devicePtr );
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4797 goto Exit;
4798 }
4799
4800 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4801 NULL, ( void** ) &captureAudioClient );
4802 if ( FAILED( hr ) ) {
4803 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4804 goto Exit;
4805 }
4806
4807 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4808 if ( FAILED( hr ) ) {
4809 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4810 goto Exit;
4811 }
4812
4813 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4814 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4815 }
4816
4817 // if device index falls within render devices and is configured for output
4818 if ( device < renderDeviceCount && mode == OUTPUT )
4819 {
4820 // if renderAudioClient is already initialised, don't initialise it again
4821 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4822 if ( renderAudioClient )
4823 {
4824 methodResult = SUCCESS;
4825 goto Exit;
4826 }
4827
4828 hr = renderDevices->Item( device, &devicePtr );
4829 if ( FAILED( hr ) ) {
4830 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4831 goto Exit;
4832 }
4833
4834 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4835 NULL, ( void** ) &renderAudioClient );
4836 if ( FAILED( hr ) ) {
4837 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4838 goto Exit;
4839 }
4840
4841 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4842 if ( FAILED( hr ) ) {
4843 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4844 goto Exit;
4845 }
4846
4847 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4848 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4849 }
4850
4851 // fill stream data
4852 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4853 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4854 stream_.mode = DUPLEX;
4855 }
4856 else {
4857 stream_.mode = mode;
4858 }
4859
4860 stream_.device[mode] = device;
4861 stream_.doByteSwap[mode] = false;
4862 stream_.sampleRate = sampleRate;
4863 stream_.bufferSize = *bufferSize;
4864 stream_.nBuffers = 1;
4865 stream_.nUserChannels[mode] = channels;
4866 stream_.channelOffset[mode] = firstChannel;
4867 stream_.userFormat = format;
4868 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4869
4870 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4871 stream_.userInterleaved = false;
4872 else
4873 stream_.userInterleaved = true;
4874 stream_.deviceInterleaved[mode] = true;
4875
4876 // Set flags for buffer conversion.
4877 stream_.doConvertBuffer[mode] = false;
4878 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4879 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4880 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4881 stream_.doConvertBuffer[mode] = true;
4882 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4883 stream_.nUserChannels[mode] > 1 )
4884 stream_.doConvertBuffer[mode] = true;
4885
4886 if ( stream_.doConvertBuffer[mode] )
4887 setConvertInfo( mode, firstChannel );
4888
4889 // Allocate necessary internal buffers
4890 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4891
4892 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4893 if ( !stream_.userBuffer[mode] ) {
4894 errorType = RtAudioError::MEMORY_ERROR;
4895 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4896 goto Exit;
4897 }
4898
4899 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4900 stream_.callbackInfo.priority = 15;
4901 else
4902 stream_.callbackInfo.priority = 0;
4903
4904 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4905 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4906
4907 methodResult = SUCCESS;
4908
4909 Exit:
4910 //clean up
4911 SAFE_RELEASE( captureDevices );
4912 SAFE_RELEASE( renderDevices );
4913 SAFE_RELEASE( devicePtr );
4914 CoTaskMemFree( deviceFormat );
4915
4916 // if method failed, close the stream
4917 if ( methodResult == FAILURE )
4918 closeStream();
4919
4920 if ( !errorText_.empty() )
4921 error( errorType );
4922 return methodResult;
4923 }
4924
4925 //=============================================================================
4926
runWasapiThread(void * wasapiPtr)4927 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4928 {
4929 if ( wasapiPtr )
4930 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4931
4932 return 0;
4933 }
4934
stopWasapiThread(void * wasapiPtr)4935 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4936 {
4937 if ( wasapiPtr )
4938 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4939
4940 return 0;
4941 }
4942
abortWasapiThread(void * wasapiPtr)4943 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4944 {
4945 if ( wasapiPtr )
4946 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4947
4948 return 0;
4949 }
4950
4951 //-----------------------------------------------------------------------------
4952
wasapiThread()4953 void RtApiWasapi::wasapiThread()
4954 {
4955 // as this is a new thread, we must CoInitialize it
4956 CoInitialize( NULL );
4957
4958 HRESULT hr;
4959
4960 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4961 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4962 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4963 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4964 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4965 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4966
4967 WAVEFORMATEX* captureFormat = NULL;
4968 WAVEFORMATEX* renderFormat = NULL;
4969 float captureSrRatio = 0.0f;
4970 float renderSrRatio = 0.0f;
4971 WasapiBuffer captureBuffer;
4972 WasapiBuffer renderBuffer;
4973 WasapiResampler* captureResampler = NULL;
4974 WasapiResampler* renderResampler = NULL;
4975
4976 // declare local stream variables
4977 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4978 BYTE* streamBuffer = NULL;
4979 DWORD captureFlags = 0;
4980 unsigned int bufferFrameCount = 0;
4981 unsigned int numFramesPadding = 0;
4982 unsigned int convBufferSize = 0;
4983 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4984 bool callbackPushed = true;
4985 bool callbackPulled = false;
4986 bool callbackStopped = false;
4987 int callbackResult = 0;
4988
4989 // convBuffer is used to store converted buffers between WASAPI and the user
4990 char* convBuffer = NULL;
4991 unsigned int convBuffSize = 0;
4992 unsigned int deviceBuffSize = 0;
4993
4994 std::string errorText;
4995 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4996
4997 // Attempt to assign "Pro Audio" characteristic to thread
4998 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4999 if ( AvrtDll ) {
5000 DWORD taskIndex = 0;
5001 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5002 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5003 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5004 FreeLibrary( AvrtDll );
5005 }
5006
5007 // start capture stream if applicable
5008 if ( captureAudioClient ) {
5009 hr = captureAudioClient->GetMixFormat( &captureFormat );
5010 if ( FAILED( hr ) ) {
5011 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5012 goto Exit;
5013 }
5014
5015 // init captureResampler
5016 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5017 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5018 captureFormat->nSamplesPerSec, stream_.sampleRate );
5019
5020 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5021
5022 if ( !captureClient ) {
5023 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5024 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5025 0,
5026 0,
5027 captureFormat,
5028 NULL );
5029 if ( FAILED( hr ) ) {
5030 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5031 goto Exit;
5032 }
5033
5034 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5035 ( void** ) &captureClient );
5036 if ( FAILED( hr ) ) {
5037 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5038 goto Exit;
5039 }
5040
5041 // don't configure captureEvent if in loopback mode
5042 if ( !loopbackEnabled )
5043 {
5044 // configure captureEvent to trigger on every available capture buffer
5045 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5046 if ( !captureEvent ) {
5047 errorType = RtAudioError::SYSTEM_ERROR;
5048 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5049 goto Exit;
5050 }
5051
5052 hr = captureAudioClient->SetEventHandle( captureEvent );
5053 if ( FAILED( hr ) ) {
5054 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5055 goto Exit;
5056 }
5057
5058 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5059 }
5060
5061 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5062
5063 // reset the capture stream
5064 hr = captureAudioClient->Reset();
5065 if ( FAILED( hr ) ) {
5066 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5067 goto Exit;
5068 }
5069
5070 // start the capture stream
5071 hr = captureAudioClient->Start();
5072 if ( FAILED( hr ) ) {
5073 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5074 goto Exit;
5075 }
5076 }
5077
5078 unsigned int inBufferSize = 0;
5079 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5080 if ( FAILED( hr ) ) {
5081 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5082 goto Exit;
5083 }
5084
5085 // scale outBufferSize according to stream->user sample rate ratio
5086 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5087 inBufferSize *= stream_.nDeviceChannels[INPUT];
5088
5089 // set captureBuffer size
5090 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5091 }
5092
5093 // start render stream if applicable
5094 if ( renderAudioClient ) {
5095 hr = renderAudioClient->GetMixFormat( &renderFormat );
5096 if ( FAILED( hr ) ) {
5097 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5098 goto Exit;
5099 }
5100
5101 // init renderResampler
5102 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5103 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5104 stream_.sampleRate, renderFormat->nSamplesPerSec );
5105
5106 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5107
5108 if ( !renderClient ) {
5109 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5110 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5111 0,
5112 0,
5113 renderFormat,
5114 NULL );
5115 if ( FAILED( hr ) ) {
5116 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5117 goto Exit;
5118 }
5119
5120 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5121 ( void** ) &renderClient );
5122 if ( FAILED( hr ) ) {
5123 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5124 goto Exit;
5125 }
5126
5127 // configure renderEvent to trigger on every available render buffer
5128 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5129 if ( !renderEvent ) {
5130 errorType = RtAudioError::SYSTEM_ERROR;
5131 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5132 goto Exit;
5133 }
5134
5135 hr = renderAudioClient->SetEventHandle( renderEvent );
5136 if ( FAILED( hr ) ) {
5137 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5138 goto Exit;
5139 }
5140
5141 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5142 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5143
5144 // reset the render stream
5145 hr = renderAudioClient->Reset();
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5148 goto Exit;
5149 }
5150
5151 // start the render stream
5152 hr = renderAudioClient->Start();
5153 if ( FAILED( hr ) ) {
5154 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5155 goto Exit;
5156 }
5157 }
5158
5159 unsigned int outBufferSize = 0;
5160 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5161 if ( FAILED( hr ) ) {
5162 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5163 goto Exit;
5164 }
5165
5166 // scale inBufferSize according to user->stream sample rate ratio
5167 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5168 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5169
5170 // set renderBuffer size
5171 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5172 }
5173
5174 // malloc buffer memory
5175 if ( stream_.mode == INPUT )
5176 {
5177 using namespace std; // for ceilf
5178 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5179 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5180 }
5181 else if ( stream_.mode == OUTPUT )
5182 {
5183 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5184 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5185 }
5186 else if ( stream_.mode == DUPLEX )
5187 {
5188 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5189 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5190 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5191 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5192 }
5193
5194 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5195 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5196 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5197 if ( !convBuffer || !stream_.deviceBuffer ) {
5198 errorType = RtAudioError::MEMORY_ERROR;
5199 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5200 goto Exit;
5201 }
5202
5203 // stream process loop
5204 while ( stream_.state != STREAM_STOPPING ) {
5205 if ( !callbackPulled ) {
5206 // Callback Input
5207 // ==============
5208 // 1. Pull callback buffer from inputBuffer
5209 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5210 // Convert callback buffer to user format
5211
5212 if ( captureAudioClient )
5213 {
5214 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5215
5216 convBufferSize = 0;
5217 while ( convBufferSize < stream_.bufferSize )
5218 {
5219 // Pull callback buffer from inputBuffer
5220 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5221 samplesToPull * stream_.nDeviceChannels[INPUT],
5222 stream_.deviceFormat[INPUT] );
5223
5224 if ( !callbackPulled )
5225 {
5226 break;
5227 }
5228
5229 // Convert callback buffer to user sample rate
5230 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5231 unsigned int convSamples = 0;
5232
5233 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5234 convBuffer,
5235 samplesToPull,
5236 convSamples,
5237 convBufferSize == 0 ? -1 : stream_.bufferSize - convBufferSize );
5238
5239 convBufferSize += convSamples;
5240 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5241 }
5242
5243 if ( callbackPulled )
5244 {
5245 if ( stream_.doConvertBuffer[INPUT] ) {
5246 // Convert callback buffer to user format
5247 convertBuffer( stream_.userBuffer[INPUT],
5248 stream_.deviceBuffer,
5249 stream_.convertInfo[INPUT] );
5250 }
5251 else {
5252 // no further conversion, simple copy deviceBuffer to userBuffer
5253 memcpy( stream_.userBuffer[INPUT],
5254 stream_.deviceBuffer,
5255 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5256 }
5257 }
5258 }
5259 else {
5260 // if there is no capture stream, set callbackPulled flag
5261 callbackPulled = true;
5262 }
5263
5264 // Execute Callback
5265 // ================
5266 // 1. Execute user callback method
5267 // 2. Handle return value from callback
5268
5269 // if callback has not requested the stream to stop
5270 if ( callbackPulled && !callbackStopped ) {
5271 // Execute user callback method
5272 callbackResult = callback( stream_.userBuffer[OUTPUT],
5273 stream_.userBuffer[INPUT],
5274 stream_.bufferSize,
5275 getStreamTime(),
5276 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5277 stream_.callbackInfo.userData );
5278
5279 // tick stream time
5280 RtApi::tickStreamTime();
5281
5282 // Handle return value from callback
5283 if ( callbackResult == 1 ) {
5284 // instantiate a thread to stop this thread
5285 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5286 if ( !threadHandle ) {
5287 errorType = RtAudioError::THREAD_ERROR;
5288 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5289 goto Exit;
5290 }
5291 else if ( !CloseHandle( threadHandle ) ) {
5292 errorType = RtAudioError::THREAD_ERROR;
5293 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5294 goto Exit;
5295 }
5296
5297 callbackStopped = true;
5298 }
5299 else if ( callbackResult == 2 ) {
5300 // instantiate a thread to stop this thread
5301 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5302 if ( !threadHandle ) {
5303 errorType = RtAudioError::THREAD_ERROR;
5304 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5305 goto Exit;
5306 }
5307 else if ( !CloseHandle( threadHandle ) ) {
5308 errorType = RtAudioError::THREAD_ERROR;
5309 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5310 goto Exit;
5311 }
5312
5313 callbackStopped = true;
5314 }
5315 }
5316 }
5317
5318 // Callback Output
5319 // ===============
5320 // 1. Convert callback buffer to stream format
5321 // 2. Convert callback buffer to stream sample rate and channel count
5322 // 3. Push callback buffer into outputBuffer
5323
5324 if ( renderAudioClient && callbackPulled )
5325 {
5326 // if the last call to renderBuffer.PushBuffer() was successful
5327 if ( callbackPushed || convBufferSize == 0 )
5328 {
5329 if ( stream_.doConvertBuffer[OUTPUT] )
5330 {
5331 // Convert callback buffer to stream format
5332 convertBuffer( stream_.deviceBuffer,
5333 stream_.userBuffer[OUTPUT],
5334 stream_.convertInfo[OUTPUT] );
5335
5336 }
5337 else {
5338 // no further conversion, simple copy userBuffer to deviceBuffer
5339 memcpy( stream_.deviceBuffer,
5340 stream_.userBuffer[OUTPUT],
5341 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5342 }
5343
5344 // Convert callback buffer to stream sample rate
5345 renderResampler->Convert( convBuffer,
5346 stream_.deviceBuffer,
5347 stream_.bufferSize,
5348 convBufferSize );
5349 }
5350
5351 // Push callback buffer into outputBuffer
5352 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5353 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5354 stream_.deviceFormat[OUTPUT] );
5355 }
5356 else {
5357 // if there is no render stream, set callbackPushed flag
5358 callbackPushed = true;
5359 }
5360
5361 // Stream Capture
5362 // ==============
5363 // 1. Get capture buffer from stream
5364 // 2. Push capture buffer into inputBuffer
5365 // 3. If 2. was successful: Release capture buffer
5366
5367 if ( captureAudioClient ) {
5368 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5369 if ( !callbackPulled ) {
5370 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5371 }
5372
5373 // Get capture buffer from stream
5374 hr = captureClient->GetBuffer( &streamBuffer,
5375 &bufferFrameCount,
5376 &captureFlags, NULL, NULL );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5379 goto Exit;
5380 }
5381
5382 if ( bufferFrameCount != 0 ) {
5383 // Push capture buffer into inputBuffer
5384 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5385 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5386 stream_.deviceFormat[INPUT] ) )
5387 {
5388 // Release capture buffer
5389 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5390 if ( FAILED( hr ) ) {
5391 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5392 goto Exit;
5393 }
5394 }
5395 else
5396 {
5397 // Inform WASAPI that capture was unsuccessful
5398 hr = captureClient->ReleaseBuffer( 0 );
5399 if ( FAILED( hr ) ) {
5400 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5401 goto Exit;
5402 }
5403 }
5404 }
5405 else
5406 {
5407 // Inform WASAPI that capture was unsuccessful
5408 hr = captureClient->ReleaseBuffer( 0 );
5409 if ( FAILED( hr ) ) {
5410 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5411 goto Exit;
5412 }
5413 }
5414 }
5415
5416 // Stream Render
5417 // =============
5418 // 1. Get render buffer from stream
5419 // 2. Pull next buffer from outputBuffer
5420 // 3. If 2. was successful: Fill render buffer with next buffer
5421 // Release render buffer
5422
5423 if ( renderAudioClient ) {
5424 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5425 if ( callbackPulled && !callbackPushed ) {
5426 WaitForSingleObject( renderEvent, INFINITE );
5427 }
5428
5429 // Get render buffer from stream
5430 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5431 if ( FAILED( hr ) ) {
5432 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5433 goto Exit;
5434 }
5435
5436 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5437 if ( FAILED( hr ) ) {
5438 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5439 goto Exit;
5440 }
5441
5442 bufferFrameCount -= numFramesPadding;
5443
5444 if ( bufferFrameCount != 0 ) {
5445 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5448 goto Exit;
5449 }
5450
5451 // Pull next buffer from outputBuffer
5452 // Fill render buffer with next buffer
5453 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5454 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5455 stream_.deviceFormat[OUTPUT] ) )
5456 {
5457 // Release render buffer
5458 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5459 if ( FAILED( hr ) ) {
5460 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5461 goto Exit;
5462 }
5463 }
5464 else
5465 {
5466 // Inform WASAPI that render was unsuccessful
5467 hr = renderClient->ReleaseBuffer( 0, 0 );
5468 if ( FAILED( hr ) ) {
5469 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5470 goto Exit;
5471 }
5472 }
5473 }
5474 else
5475 {
5476 // Inform WASAPI that render was unsuccessful
5477 hr = renderClient->ReleaseBuffer( 0, 0 );
5478 if ( FAILED( hr ) ) {
5479 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5480 goto Exit;
5481 }
5482 }
5483 }
5484
5485 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5486 if ( callbackPushed ) {
5487 // unsetting the callbackPulled flag lets the stream know that
5488 // the audio device is ready for another callback output buffer.
5489 callbackPulled = false;
5490 }
5491
5492 }
5493
5494 Exit:
5495 // clean up
5496 CoTaskMemFree( captureFormat );
5497 CoTaskMemFree( renderFormat );
5498
5499 free ( convBuffer );
5500 delete renderResampler;
5501 delete captureResampler;
5502
5503 CoUninitialize();
5504
5505 // update stream state
5506 stream_.state = STREAM_STOPPED;
5507
5508 if ( !errorText.empty() )
5509 {
5510 errorText_ = errorText;
5511 error( errorType );
5512 }
5513 }
5514
5515 //******************** End of __WINDOWS_WASAPI__ *********************//
5516 #endif
5517
5518
5519 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5520
5521 // Modified by Robin Davies, October 2005
5522 // - Improvements to DirectX pointer chasing.
5523 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5524 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5525 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5526 // Changed device query structure for RtAudio 4.0.7, January 2010
5527
5528 #include <windows.h>
5529 #include <process.h>
5530 #include <mmsystem.h>
5531 #include <mmreg.h>
5532 #include <dsound.h>
5533 #include <assert.h>
5534 #include <algorithm>
5535
5536 #if defined(__MINGW32__)
5537 // missing from latest mingw winapi
5538 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5539 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5540 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5541 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5542 #endif
5543
5544 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5545
5546 #ifdef _MSC_VER // if Microsoft Visual C++
5547 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5548 #endif
5549
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5550 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5551 {
5552 if ( pointer > bufferSize ) pointer -= bufferSize;
5553 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5554 if ( pointer < earlierPointer ) pointer += bufferSize;
5555 return pointer >= earlierPointer && pointer < laterPointer;
5556 }
5557
5558 // A structure to hold various information related to the DirectSound
5559 // API implementation.
5560 struct DsHandle {
5561 unsigned int drainCounter; // Tracks callback counts when draining
5562 bool internalDrain; // Indicates if stop is initiated from callback or not.
5563 void *id[2];
5564 void *buffer[2];
5565 bool xrun[2];
5566 UINT bufferPointer[2];
5567 DWORD dsBufferSize[2];
5568 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5569 HANDLE condition;
5570
DsHandleDsHandle5571 DsHandle()
5572 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5573 };
5574
5575 // Declarations for utility functions, callbacks, and structures
5576 // specific to the DirectSound implementation.
5577 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5578 LPCTSTR description,
5579 LPCTSTR module,
5580 LPVOID lpContext );
5581
5582 static const char* getErrorString( int code );
5583
5584 static unsigned __stdcall callbackHandler( void *ptr );
5585
5586 struct DsDevice {
5587 LPGUID id[2];
5588 bool validId[2];
5589 bool found;
5590 std::string name;
5591
DsDeviceDsDevice5592 DsDevice()
5593 : found(false) { validId[0] = false; validId[1] = false; }
5594 };
5595
5596 struct DsProbeData {
5597 bool isInput;
5598 std::vector<struct DsDevice>* dsDevices;
5599 };
5600
RtApiDs()5601 RtApiDs :: RtApiDs()
5602 {
5603 // Dsound will run both-threaded. If CoInitialize fails, then just
5604 // accept whatever the mainline chose for a threading model.
5605 coInitialized_ = false;
5606 HRESULT hr = CoInitialize( NULL );
5607 if ( !FAILED( hr ) ) coInitialized_ = true;
5608 }
5609
~RtApiDs()5610 RtApiDs :: ~RtApiDs()
5611 {
5612 if ( stream_.state != STREAM_CLOSED ) closeStream();
5613 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5614 }
5615
5616 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5617 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5618 {
5619 return 0;
5620 }
5621
5622 // The DirectSound default input is always the first input device,
5623 // which is the first capture device enumerated.
getDefaultInputDevice(void)5624 unsigned int RtApiDs :: getDefaultInputDevice( void )
5625 {
5626 return 0;
5627 }
5628
getDeviceCount(void)5629 unsigned int RtApiDs :: getDeviceCount( void )
5630 {
5631 // Set query flag for previously found devices to false, so that we
5632 // can check for any devices that have disappeared.
5633 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5634 dsDevices[i].found = false;
5635
5636 // Query DirectSound devices.
5637 struct DsProbeData probeInfo;
5638 probeInfo.isInput = false;
5639 probeInfo.dsDevices = &dsDevices;
5640 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5641 if ( FAILED( result ) ) {
5642 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5643 errorText_ = errorStream_.str();
5644 error( RtAudioError::WARNING );
5645 }
5646
5647 // Query DirectSoundCapture devices.
5648 probeInfo.isInput = true;
5649 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5650 if ( FAILED( result ) ) {
5651 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5652 errorText_ = errorStream_.str();
5653 error( RtAudioError::WARNING );
5654 }
5655
5656 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5657 for ( unsigned int i=0; i<dsDevices.size(); ) {
5658 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5659 else i++;
5660 }
5661
5662 return static_cast<unsigned int>(dsDevices.size());
5663 }
5664
getDeviceInfo(unsigned int device)5665 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5666 {
5667 RtAudio::DeviceInfo info;
5668 info.probed = false;
5669
5670 if ( dsDevices.size() == 0 ) {
5671 // Force a query of all devices
5672 getDeviceCount();
5673 if ( dsDevices.size() == 0 ) {
5674 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5675 error( RtAudioError::INVALID_USE );
5676 return info;
5677 }
5678 }
5679
5680 if ( device >= dsDevices.size() ) {
5681 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5682 error( RtAudioError::INVALID_USE );
5683 return info;
5684 }
5685
5686 HRESULT result;
5687 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5688
5689 LPDIRECTSOUND output;
5690 DSCAPS outCaps;
5691 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5692 if ( FAILED( result ) ) {
5693 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5694 errorText_ = errorStream_.str();
5695 error( RtAudioError::WARNING );
5696 goto probeInput;
5697 }
5698
5699 outCaps.dwSize = sizeof( outCaps );
5700 result = output->GetCaps( &outCaps );
5701 if ( FAILED( result ) ) {
5702 output->Release();
5703 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5704 errorText_ = errorStream_.str();
5705 error( RtAudioError::WARNING );
5706 goto probeInput;
5707 }
5708
5709 // Get output channel information.
5710 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5711
5712 // Get sample rate information.
5713 info.sampleRates.clear();
5714 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5715 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5716 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5717 info.sampleRates.push_back( SAMPLE_RATES[k] );
5718
5719 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5720 info.preferredSampleRate = SAMPLE_RATES[k];
5721 }
5722 }
5723
5724 // Get format information.
5725 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5726 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5727
5728 output->Release();
5729
5730 if ( getDefaultOutputDevice() == device )
5731 info.isDefaultOutput = true;
5732
5733 if ( dsDevices[ device ].validId[1] == false ) {
5734 info.name = dsDevices[ device ].name;
5735 info.probed = true;
5736 return info;
5737 }
5738
5739 probeInput:
5740
5741 LPDIRECTSOUNDCAPTURE input;
5742 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5743 if ( FAILED( result ) ) {
5744 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5745 errorText_ = errorStream_.str();
5746 error( RtAudioError::WARNING );
5747 return info;
5748 }
5749
5750 DSCCAPS inCaps;
5751 inCaps.dwSize = sizeof( inCaps );
5752 result = input->GetCaps( &inCaps );
5753 if ( FAILED( result ) ) {
5754 input->Release();
5755 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5756 errorText_ = errorStream_.str();
5757 error( RtAudioError::WARNING );
5758 return info;
5759 }
5760
5761 // Get input channel information.
5762 info.inputChannels = inCaps.dwChannels;
5763
5764 // Get sample rate and format information.
5765 std::vector<unsigned int> rates;
5766 if ( inCaps.dwChannels >= 2 ) {
5767 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5768 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775
5776 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5777 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5778 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5780 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5781 }
5782 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5783 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5784 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5785 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5786 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5787 }
5788 }
5789 else if ( inCaps.dwChannels == 1 ) {
5790 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5791 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5792 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798
5799 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5800 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5801 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5802 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5803 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5804 }
5805 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5806 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5807 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5808 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5809 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5810 }
5811 }
5812 else info.inputChannels = 0; // technically, this would be an error
5813
5814 input->Release();
5815
5816 if ( info.inputChannels == 0 ) return info;
5817
5818 // Copy the supported rates to the info structure but avoid duplication.
5819 bool found;
5820 for ( unsigned int i=0; i<rates.size(); i++ ) {
5821 found = false;
5822 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5823 if ( rates[i] == info.sampleRates[j] ) {
5824 found = true;
5825 break;
5826 }
5827 }
5828 if ( found == false ) info.sampleRates.push_back( rates[i] );
5829 }
5830 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5831
5832 // If device opens for both playback and capture, we determine the channels.
5833 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5834 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5835
5836 if ( device == 0 ) info.isDefaultInput = true;
5837
5838 // Copy name and return.
5839 info.name = dsDevices[ device ].name;
5840 info.probed = true;
5841 return info;
5842 }
5843
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)5844 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5845 unsigned int firstChannel, unsigned int sampleRate,
5846 RtAudioFormat format, unsigned int *bufferSize,
5847 RtAudio::StreamOptions *options )
5848 {
5849 if ( channels + firstChannel > 2 ) {
5850 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5851 return FAILURE;
5852 }
5853
5854 size_t nDevices = dsDevices.size();
5855 if ( nDevices == 0 ) {
5856 // This should not happen because a check is made before this function is called.
5857 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5858 return FAILURE;
5859 }
5860
5861 if ( device >= nDevices ) {
5862 // This should not happen because a check is made before this function is called.
5863 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5864 return FAILURE;
5865 }
5866
5867 if ( mode == OUTPUT ) {
5868 if ( dsDevices[ device ].validId[0] == false ) {
5869 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5870 errorText_ = errorStream_.str();
5871 return FAILURE;
5872 }
5873 }
5874 else { // mode == INPUT
5875 if ( dsDevices[ device ].validId[1] == false ) {
5876 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5877 errorText_ = errorStream_.str();
5878 return FAILURE;
5879 }
5880 }
5881
5882 // According to a note in PortAudio, using GetDesktopWindow()
5883 // instead of GetForegroundWindow() is supposed to avoid problems
5884 // that occur when the application's window is not the foreground
5885 // window. Also, if the application window closes before the
5886 // DirectSound buffer, DirectSound can crash. In the past, I had
5887 // problems when using GetDesktopWindow() but it seems fine now
5888 // (January 2010). I'll leave it commented here.
5889 // HWND hWnd = GetForegroundWindow();
5890 HWND hWnd = GetDesktopWindow();
5891
5892 // Check the numberOfBuffers parameter and limit the lowest value to
5893 // two. This is a judgement call and a value of two is probably too
5894 // low for capture, but it should work for playback.
5895 int nBuffers = 0;
5896 if ( options ) nBuffers = options->numberOfBuffers;
5897 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5898 if ( nBuffers < 2 ) nBuffers = 3;
5899
5900 // Check the lower range of the user-specified buffer size and set
5901 // (arbitrarily) to a lower bound of 32.
5902 if ( *bufferSize < 32 ) *bufferSize = 32;
5903
5904 // Create the wave format structure. The data format setting will
5905 // be determined later.
5906 WAVEFORMATEX waveFormat;
5907 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5908 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5909 waveFormat.nChannels = channels + firstChannel;
5910 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5911
5912 // Determine the device buffer size. By default, we'll use the value
5913 // defined above (32K), but we will grow it to make allowances for
5914 // very large software buffer sizes.
5915 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5916 DWORD dsPointerLeadTime = 0;
5917
5918 void *ohandle = 0, *bhandle = 0;
5919 HRESULT result;
5920 if ( mode == OUTPUT ) {
5921
5922 LPDIRECTSOUND output;
5923 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5924 if ( FAILED( result ) ) {
5925 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5926 errorText_ = errorStream_.str();
5927 return FAILURE;
5928 }
5929
5930 DSCAPS outCaps;
5931 outCaps.dwSize = sizeof( outCaps );
5932 result = output->GetCaps( &outCaps );
5933 if ( FAILED( result ) ) {
5934 output->Release();
5935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5936 errorText_ = errorStream_.str();
5937 return FAILURE;
5938 }
5939
5940 // Check channel information.
5941 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5942 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5943 errorText_ = errorStream_.str();
5944 return FAILURE;
5945 }
5946
5947 // Check format information. Use 16-bit format unless not
5948 // supported or user requests 8-bit.
5949 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5950 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5951 waveFormat.wBitsPerSample = 16;
5952 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5953 }
5954 else {
5955 waveFormat.wBitsPerSample = 8;
5956 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5957 }
5958 stream_.userFormat = format;
5959
5960 // Update wave format structure and buffer information.
5961 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5962 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5963 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5964
5965 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5966 while ( dsPointerLeadTime * 2U > dsBufferSize )
5967 dsBufferSize *= 2;
5968
5969 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5970 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5971 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5972 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5973 if ( FAILED( result ) ) {
5974 output->Release();
5975 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5976 errorText_ = errorStream_.str();
5977 return FAILURE;
5978 }
5979
5980 // Even though we will write to the secondary buffer, we need to
5981 // access the primary buffer to set the correct output format
5982 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5983 // buffer description.
5984 DSBUFFERDESC bufferDescription;
5985 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5986 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5987 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5988
5989 // Obtain the primary buffer
5990 LPDIRECTSOUNDBUFFER buffer;
5991 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5992 if ( FAILED( result ) ) {
5993 output->Release();
5994 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5995 errorText_ = errorStream_.str();
5996 return FAILURE;
5997 }
5998
5999 // Set the primary DS buffer sound format.
6000 result = buffer->SetFormat( &waveFormat );
6001 if ( FAILED( result ) ) {
6002 output->Release();
6003 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6004 errorText_ = errorStream_.str();
6005 return FAILURE;
6006 }
6007
6008 // Setup the secondary DS buffer description.
6009 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6010 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6011 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6012 DSBCAPS_GLOBALFOCUS |
6013 DSBCAPS_GETCURRENTPOSITION2 |
6014 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6015 bufferDescription.dwBufferBytes = dsBufferSize;
6016 bufferDescription.lpwfxFormat = &waveFormat;
6017
6018 // Try to create the secondary DS buffer. If that doesn't work,
6019 // try to use software mixing. Otherwise, there's a problem.
6020 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6021 if ( FAILED( result ) ) {
6022 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6023 DSBCAPS_GLOBALFOCUS |
6024 DSBCAPS_GETCURRENTPOSITION2 |
6025 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6026 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6027 if ( FAILED( result ) ) {
6028 output->Release();
6029 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6030 errorText_ = errorStream_.str();
6031 return FAILURE;
6032 }
6033 }
6034
6035 // Get the buffer size ... might be different from what we specified.
6036 DSBCAPS dsbcaps;
6037 dsbcaps.dwSize = sizeof( DSBCAPS );
6038 result = buffer->GetCaps( &dsbcaps );
6039 if ( FAILED( result ) ) {
6040 output->Release();
6041 buffer->Release();
6042 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6043 errorText_ = errorStream_.str();
6044 return FAILURE;
6045 }
6046
6047 dsBufferSize = dsbcaps.dwBufferBytes;
6048
6049 // Lock the DS buffer
6050 LPVOID audioPtr;
6051 DWORD dataLen;
6052 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6053 if ( FAILED( result ) ) {
6054 output->Release();
6055 buffer->Release();
6056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6057 errorText_ = errorStream_.str();
6058 return FAILURE;
6059 }
6060
6061 // Zero the DS buffer
6062 ZeroMemory( audioPtr, dataLen );
6063
6064 // Unlock the DS buffer
6065 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6066 if ( FAILED( result ) ) {
6067 output->Release();
6068 buffer->Release();
6069 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6070 errorText_ = errorStream_.str();
6071 return FAILURE;
6072 }
6073
6074 ohandle = (void *) output;
6075 bhandle = (void *) buffer;
6076 }
6077
6078 if ( mode == INPUT ) {
6079
6080 LPDIRECTSOUNDCAPTURE input;
6081 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6082 if ( FAILED( result ) ) {
6083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6084 errorText_ = errorStream_.str();
6085 return FAILURE;
6086 }
6087
6088 DSCCAPS inCaps;
6089 inCaps.dwSize = sizeof( inCaps );
6090 result = input->GetCaps( &inCaps );
6091 if ( FAILED( result ) ) {
6092 input->Release();
6093 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6094 errorText_ = errorStream_.str();
6095 return FAILURE;
6096 }
6097
6098 // Check channel information.
6099 if ( inCaps.dwChannels < channels + firstChannel ) {
6100 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6101 return FAILURE;
6102 }
6103
6104 // Check format information. Use 16-bit format unless user
6105 // requests 8-bit.
6106 DWORD deviceFormats;
6107 if ( channels + firstChannel == 2 ) {
6108 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6109 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6110 waveFormat.wBitsPerSample = 8;
6111 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6112 }
6113 else { // assume 16-bit is supported
6114 waveFormat.wBitsPerSample = 16;
6115 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6116 }
6117 }
6118 else { // channel == 1
6119 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6120 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6121 waveFormat.wBitsPerSample = 8;
6122 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6123 }
6124 else { // assume 16-bit is supported
6125 waveFormat.wBitsPerSample = 16;
6126 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6127 }
6128 }
6129 stream_.userFormat = format;
6130
6131 // Update wave format structure and buffer information.
6132 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6133 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6134 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6135
6136 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6137 while ( dsPointerLeadTime * 2U > dsBufferSize )
6138 dsBufferSize *= 2;
6139
6140 // Setup the secondary DS buffer description.
6141 DSCBUFFERDESC bufferDescription;
6142 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6143 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6144 bufferDescription.dwFlags = 0;
6145 bufferDescription.dwReserved = 0;
6146 bufferDescription.dwBufferBytes = dsBufferSize;
6147 bufferDescription.lpwfxFormat = &waveFormat;
6148
6149 // Create the capture buffer.
6150 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6151 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6152 if ( FAILED( result ) ) {
6153 input->Release();
6154 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6155 errorText_ = errorStream_.str();
6156 return FAILURE;
6157 }
6158
6159 // Get the buffer size ... might be different from what we specified.
6160 DSCBCAPS dscbcaps;
6161 dscbcaps.dwSize = sizeof( DSCBCAPS );
6162 result = buffer->GetCaps( &dscbcaps );
6163 if ( FAILED( result ) ) {
6164 input->Release();
6165 buffer->Release();
6166 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6167 errorText_ = errorStream_.str();
6168 return FAILURE;
6169 }
6170
6171 dsBufferSize = dscbcaps.dwBufferBytes;
6172
6173 // NOTE: We could have a problem here if this is a duplex stream
6174 // and the play and capture hardware buffer sizes are different
6175 // (I'm actually not sure if that is a problem or not).
6176 // Currently, we are not verifying that.
6177
6178 // Lock the capture buffer
6179 LPVOID audioPtr;
6180 DWORD dataLen;
6181 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6182 if ( FAILED( result ) ) {
6183 input->Release();
6184 buffer->Release();
6185 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6186 errorText_ = errorStream_.str();
6187 return FAILURE;
6188 }
6189
6190 // Zero the buffer
6191 ZeroMemory( audioPtr, dataLen );
6192
6193 // Unlock the buffer
6194 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6195 if ( FAILED( result ) ) {
6196 input->Release();
6197 buffer->Release();
6198 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6199 errorText_ = errorStream_.str();
6200 return FAILURE;
6201 }
6202
6203 ohandle = (void *) input;
6204 bhandle = (void *) buffer;
6205 }
6206
6207 // Set various stream parameters
6208 DsHandle *handle = 0;
6209 stream_.nDeviceChannels[mode] = channels + firstChannel;
6210 stream_.nUserChannels[mode] = channels;
6211 stream_.bufferSize = *bufferSize;
6212 stream_.channelOffset[mode] = firstChannel;
6213 stream_.deviceInterleaved[mode] = true;
6214 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6215 else stream_.userInterleaved = true;
6216
6217 // Set flag for buffer conversion
6218 stream_.doConvertBuffer[mode] = false;
6219 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6220 stream_.doConvertBuffer[mode] = true;
6221 if (stream_.userFormat != stream_.deviceFormat[mode])
6222 stream_.doConvertBuffer[mode] = true;
6223 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6224 stream_.nUserChannels[mode] > 1 )
6225 stream_.doConvertBuffer[mode] = true;
6226
6227 // Allocate necessary internal buffers
6228 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6229 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6230 if ( stream_.userBuffer[mode] == NULL ) {
6231 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6232 goto error;
6233 }
6234
6235 if ( stream_.doConvertBuffer[mode] ) {
6236
6237 bool makeBuffer = true;
6238 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6239 if ( mode == INPUT ) {
6240 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6241 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6242 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6243 }
6244 }
6245
6246 if ( makeBuffer ) {
6247 bufferBytes *= *bufferSize;
6248 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6249 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6250 if ( stream_.deviceBuffer == NULL ) {
6251 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6252 goto error;
6253 }
6254 }
6255 }
6256
6257 // Allocate our DsHandle structures for the stream.
6258 if ( stream_.apiHandle == 0 ) {
6259 try {
6260 handle = new DsHandle;
6261 }
6262 catch ( std::bad_alloc& ) {
6263 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6264 goto error;
6265 }
6266
6267 // Create a manual-reset event.
6268 handle->condition = CreateEvent( NULL, // no security
6269 TRUE, // manual-reset
6270 FALSE, // non-signaled initially
6271 NULL ); // unnamed
6272 stream_.apiHandle = (void *) handle;
6273 }
6274 else
6275 handle = (DsHandle *) stream_.apiHandle;
6276 handle->id[mode] = ohandle;
6277 handle->buffer[mode] = bhandle;
6278 handle->dsBufferSize[mode] = dsBufferSize;
6279 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6280
6281 stream_.device[mode] = device;
6282 stream_.state = STREAM_STOPPED;
6283 if ( stream_.mode == OUTPUT && mode == INPUT )
6284 // We had already set up an output stream.
6285 stream_.mode = DUPLEX;
6286 else
6287 stream_.mode = mode;
6288 stream_.nBuffers = nBuffers;
6289 stream_.sampleRate = sampleRate;
6290
6291 // Setup the buffer conversion information structure.
6292 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6293
6294 // Setup the callback thread.
6295 if ( stream_.callbackInfo.isRunning == false ) {
6296 unsigned threadId;
6297 stream_.callbackInfo.isRunning = true;
6298 stream_.callbackInfo.object = (void *) this;
6299 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6300 &stream_.callbackInfo, 0, &threadId );
6301 if ( stream_.callbackInfo.thread == 0 ) {
6302 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6303 goto error;
6304 }
6305
6306 // Boost DS thread priority
6307 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6308 }
6309 return SUCCESS;
6310
6311 error:
6312 if ( handle ) {
6313 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6314 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6315 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6316 if ( buffer ) buffer->Release();
6317 object->Release();
6318 }
6319 if ( handle->buffer[1] ) {
6320 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6321 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6322 if ( buffer ) buffer->Release();
6323 object->Release();
6324 }
6325 CloseHandle( handle->condition );
6326 delete handle;
6327 stream_.apiHandle = 0;
6328 }
6329
6330 for ( int i=0; i<2; i++ ) {
6331 if ( stream_.userBuffer[i] ) {
6332 free( stream_.userBuffer[i] );
6333 stream_.userBuffer[i] = 0;
6334 }
6335 }
6336
6337 if ( stream_.deviceBuffer ) {
6338 free( stream_.deviceBuffer );
6339 stream_.deviceBuffer = 0;
6340 }
6341
6342 stream_.state = STREAM_CLOSED;
6343 return FAILURE;
6344 }
6345
closeStream()6346 void RtApiDs :: closeStream()
6347 {
6348 if ( stream_.state == STREAM_CLOSED ) {
6349 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6350 error( RtAudioError::WARNING );
6351 return;
6352 }
6353
6354 // Stop the callback thread.
6355 stream_.callbackInfo.isRunning = false;
6356 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6357 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6358
6359 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6360 if ( handle ) {
6361 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6362 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6363 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6364 if ( buffer ) {
6365 buffer->Stop();
6366 buffer->Release();
6367 }
6368 object->Release();
6369 }
6370 if ( handle->buffer[1] ) {
6371 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6372 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6373 if ( buffer ) {
6374 buffer->Stop();
6375 buffer->Release();
6376 }
6377 object->Release();
6378 }
6379 CloseHandle( handle->condition );
6380 delete handle;
6381 stream_.apiHandle = 0;
6382 }
6383
6384 for ( int i=0; i<2; i++ ) {
6385 if ( stream_.userBuffer[i] ) {
6386 free( stream_.userBuffer[i] );
6387 stream_.userBuffer[i] = 0;
6388 }
6389 }
6390
6391 if ( stream_.deviceBuffer ) {
6392 free( stream_.deviceBuffer );
6393 stream_.deviceBuffer = 0;
6394 }
6395
6396 stream_.mode = UNINITIALIZED;
6397 stream_.state = STREAM_CLOSED;
6398 }
6399
startStream()6400 void RtApiDs :: startStream()
6401 {
6402 verifyStream();
6403 if ( stream_.state == STREAM_RUNNING ) {
6404 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6405 error( RtAudioError::WARNING );
6406 return;
6407 }
6408
6409 #if defined( HAVE_GETTIMEOFDAY )
6410 gettimeofday( &stream_.lastTickTimestamp, NULL );
6411 #endif
6412
6413 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6414
6415 // Increase scheduler frequency on lesser windows (a side-effect of
6416 // increasing timer accuracy). On greater windows (Win2K or later),
6417 // this is already in effect.
6418 timeBeginPeriod( 1 );
6419
6420 buffersRolling = false;
6421 duplexPrerollBytes = 0;
6422
6423 if ( stream_.mode == DUPLEX ) {
6424 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6425 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6426 }
6427
6428 HRESULT result = 0;
6429 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6430
6431 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6432 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6433 if ( FAILED( result ) ) {
6434 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6435 errorText_ = errorStream_.str();
6436 goto unlock;
6437 }
6438 }
6439
6440 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6441
6442 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6443 result = buffer->Start( DSCBSTART_LOOPING );
6444 if ( FAILED( result ) ) {
6445 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6446 errorText_ = errorStream_.str();
6447 goto unlock;
6448 }
6449 }
6450
6451 handle->drainCounter = 0;
6452 handle->internalDrain = false;
6453 ResetEvent( handle->condition );
6454 stream_.state = STREAM_RUNNING;
6455
6456 unlock:
6457 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6458 }
6459
stopStream()6460 void RtApiDs :: stopStream()
6461 {
6462 verifyStream();
6463 if ( stream_.state == STREAM_STOPPED ) {
6464 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6465 error( RtAudioError::WARNING );
6466 return;
6467 }
6468
6469 HRESULT result = 0;
6470 LPVOID audioPtr;
6471 DWORD dataLen;
6472 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6473 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6474 if ( handle->drainCounter == 0 ) {
6475 handle->drainCounter = 2;
6476 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6477 }
6478
6479 stream_.state = STREAM_STOPPED;
6480
6481 MUTEX_LOCK( &stream_.mutex );
6482
6483 // Stop the buffer and clear memory
6484 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6485 result = buffer->Stop();
6486 if ( FAILED( result ) ) {
6487 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6488 errorText_ = errorStream_.str();
6489 goto unlock;
6490 }
6491
6492 // Lock the buffer and clear it so that if we start to play again,
6493 // we won't have old data playing.
6494 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6497 errorText_ = errorStream_.str();
6498 goto unlock;
6499 }
6500
6501 // Zero the DS buffer
6502 ZeroMemory( audioPtr, dataLen );
6503
6504 // Unlock the DS buffer
6505 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6506 if ( FAILED( result ) ) {
6507 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6508 errorText_ = errorStream_.str();
6509 goto unlock;
6510 }
6511
6512 // If we start playing again, we must begin at beginning of buffer.
6513 handle->bufferPointer[0] = 0;
6514 }
6515
6516 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6517 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6518 audioPtr = NULL;
6519 dataLen = 0;
6520
6521 stream_.state = STREAM_STOPPED;
6522
6523 if ( stream_.mode != DUPLEX )
6524 MUTEX_LOCK( &stream_.mutex );
6525
6526 result = buffer->Stop();
6527 if ( FAILED( result ) ) {
6528 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6529 errorText_ = errorStream_.str();
6530 goto unlock;
6531 }
6532
6533 // Lock the buffer and clear it so that if we start to play again,
6534 // we won't have old data playing.
6535 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6536 if ( FAILED( result ) ) {
6537 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6538 errorText_ = errorStream_.str();
6539 goto unlock;
6540 }
6541
6542 // Zero the DS buffer
6543 ZeroMemory( audioPtr, dataLen );
6544
6545 // Unlock the DS buffer
6546 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6547 if ( FAILED( result ) ) {
6548 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6549 errorText_ = errorStream_.str();
6550 goto unlock;
6551 }
6552
6553 // If we start recording again, we must begin at beginning of buffer.
6554 handle->bufferPointer[1] = 0;
6555 }
6556
6557 unlock:
6558 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6559 MUTEX_UNLOCK( &stream_.mutex );
6560
6561 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6562 }
6563
abortStream()6564 void RtApiDs :: abortStream()
6565 {
6566 verifyStream();
6567 if ( stream_.state == STREAM_STOPPED ) {
6568 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6569 error( RtAudioError::WARNING );
6570 return;
6571 }
6572
6573 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6574 handle->drainCounter = 2;
6575
6576 stopStream();
6577 }
6578
callbackEvent()6579 void RtApiDs :: callbackEvent()
6580 {
6581 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6582 Sleep( 50 ); // sleep 50 milliseconds
6583 return;
6584 }
6585
6586 if ( stream_.state == STREAM_CLOSED ) {
6587 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6588 error( RtAudioError::WARNING );
6589 return;
6590 }
6591
6592 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6593 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6594
6595 // Check if we were draining the stream and signal is finished.
6596 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6597
6598 stream_.state = STREAM_STOPPING;
6599 if ( handle->internalDrain == false )
6600 SetEvent( handle->condition );
6601 else
6602 stopStream();
6603 return;
6604 }
6605
6606 // Invoke user callback to get fresh output data UNLESS we are
6607 // draining stream.
6608 if ( handle->drainCounter == 0 ) {
6609 RtAudioCallback callback = (RtAudioCallback) info->callback;
6610 double streamTime = getStreamTime();
6611 RtAudioStreamStatus status = 0;
6612 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6613 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6614 handle->xrun[0] = false;
6615 }
6616 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6617 status |= RTAUDIO_INPUT_OVERFLOW;
6618 handle->xrun[1] = false;
6619 }
6620 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6621 stream_.bufferSize, streamTime, status, info->userData );
6622 if ( cbReturnValue == 2 ) {
6623 stream_.state = STREAM_STOPPING;
6624 handle->drainCounter = 2;
6625 abortStream();
6626 return;
6627 }
6628 else if ( cbReturnValue == 1 ) {
6629 handle->drainCounter = 1;
6630 handle->internalDrain = true;
6631 }
6632 }
6633
6634 HRESULT result;
6635 DWORD currentWritePointer, safeWritePointer;
6636 DWORD currentReadPointer, safeReadPointer;
6637 UINT nextWritePointer;
6638
6639 LPVOID buffer1 = NULL;
6640 LPVOID buffer2 = NULL;
6641 DWORD bufferSize1 = 0;
6642 DWORD bufferSize2 = 0;
6643
6644 char *buffer;
6645 long bufferBytes;
6646
6647 MUTEX_LOCK( &stream_.mutex );
6648 if ( stream_.state == STREAM_STOPPED ) {
6649 MUTEX_UNLOCK( &stream_.mutex );
6650 return;
6651 }
6652
6653 if ( buffersRolling == false ) {
6654 if ( stream_.mode == DUPLEX ) {
6655 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6656
6657 // It takes a while for the devices to get rolling. As a result,
6658 // there's no guarantee that the capture and write device pointers
6659 // will move in lockstep. Wait here for both devices to start
6660 // rolling, and then set our buffer pointers accordingly.
6661 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6662 // bytes later than the write buffer.
6663
6664 // Stub: a serious risk of having a pre-emptive scheduling round
6665 // take place between the two GetCurrentPosition calls... but I'm
6666 // really not sure how to solve the problem. Temporarily boost to
6667 // Realtime priority, maybe; but I'm not sure what priority the
6668 // DirectSound service threads run at. We *should* be roughly
6669 // within a ms or so of correct.
6670
6671 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6672 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6673
6674 DWORD startSafeWritePointer, startSafeReadPointer;
6675
6676 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6677 if ( FAILED( result ) ) {
6678 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6679 errorText_ = errorStream_.str();
6680 MUTEX_UNLOCK( &stream_.mutex );
6681 error( RtAudioError::SYSTEM_ERROR );
6682 return;
6683 }
6684 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6685 if ( FAILED( result ) ) {
6686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6687 errorText_ = errorStream_.str();
6688 MUTEX_UNLOCK( &stream_.mutex );
6689 error( RtAudioError::SYSTEM_ERROR );
6690 return;
6691 }
6692 while ( true ) {
6693 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6694 if ( FAILED( result ) ) {
6695 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6696 errorText_ = errorStream_.str();
6697 MUTEX_UNLOCK( &stream_.mutex );
6698 error( RtAudioError::SYSTEM_ERROR );
6699 return;
6700 }
6701 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6702 if ( FAILED( result ) ) {
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6704 errorText_ = errorStream_.str();
6705 MUTEX_UNLOCK( &stream_.mutex );
6706 error( RtAudioError::SYSTEM_ERROR );
6707 return;
6708 }
6709 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6710 Sleep( 1 );
6711 }
6712
6713 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6714
6715 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6716 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6717 handle->bufferPointer[1] = safeReadPointer;
6718 }
6719 else if ( stream_.mode == OUTPUT ) {
6720
6721 // Set the proper nextWritePosition after initial startup.
6722 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6723 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6724 if ( FAILED( result ) ) {
6725 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6726 errorText_ = errorStream_.str();
6727 MUTEX_UNLOCK( &stream_.mutex );
6728 error( RtAudioError::SYSTEM_ERROR );
6729 return;
6730 }
6731 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6732 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6733 }
6734
6735 buffersRolling = true;
6736 }
6737
6738 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6739
6740 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6741
6742 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6743 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6744 bufferBytes *= formatBytes( stream_.userFormat );
6745 memset( stream_.userBuffer[0], 0, bufferBytes );
6746 }
6747
6748 // Setup parameters and do buffer conversion if necessary.
6749 if ( stream_.doConvertBuffer[0] ) {
6750 buffer = stream_.deviceBuffer;
6751 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6752 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6753 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6754 }
6755 else {
6756 buffer = stream_.userBuffer[0];
6757 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6758 bufferBytes *= formatBytes( stream_.userFormat );
6759 }
6760
6761 // No byte swapping necessary in DirectSound implementation.
6762
6763 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6764 // unsigned. So, we need to convert our signed 8-bit data here to
6765 // unsigned.
6766 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6767 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6768
6769 DWORD dsBufferSize = handle->dsBufferSize[0];
6770 nextWritePointer = handle->bufferPointer[0];
6771
6772 DWORD endWrite, leadPointer;
6773 while ( true ) {
6774 // Find out where the read and "safe write" pointers are.
6775 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6776 if ( FAILED( result ) ) {
6777 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6778 errorText_ = errorStream_.str();
6779 MUTEX_UNLOCK( &stream_.mutex );
6780 error( RtAudioError::SYSTEM_ERROR );
6781 return;
6782 }
6783
6784 // We will copy our output buffer into the region between
6785 // safeWritePointer and leadPointer. If leadPointer is not
6786 // beyond the next endWrite position, wait until it is.
6787 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6788 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6789 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6790 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6791 endWrite = nextWritePointer + bufferBytes;
6792
6793 // Check whether the entire write region is behind the play pointer.
6794 if ( leadPointer >= endWrite ) break;
6795
6796 // If we are here, then we must wait until the leadPointer advances
6797 // beyond the end of our next write region. We use the
6798 // Sleep() function to suspend operation until that happens.
6799 double millis = ( endWrite - leadPointer ) * 1000.0;
6800 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6801 if ( millis < 1.0 ) millis = 1.0;
6802 Sleep( (DWORD) millis );
6803 }
6804
6805 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6806 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6807 // We've strayed into the forbidden zone ... resync the read pointer.
6808 handle->xrun[0] = true;
6809 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6810 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6811 handle->bufferPointer[0] = nextWritePointer;
6812 endWrite = nextWritePointer + bufferBytes;
6813 }
6814
6815 // Lock free space in the buffer
6816 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6817 &bufferSize1, &buffer2, &bufferSize2, 0 );
6818 if ( FAILED( result ) ) {
6819 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6820 errorText_ = errorStream_.str();
6821 MUTEX_UNLOCK( &stream_.mutex );
6822 error( RtAudioError::SYSTEM_ERROR );
6823 return;
6824 }
6825
6826 // Copy our buffer into the DS buffer
6827 CopyMemory( buffer1, buffer, bufferSize1 );
6828 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6829
6830 // Update our buffer offset and unlock sound buffer
6831 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6832 if ( FAILED( result ) ) {
6833 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6834 errorText_ = errorStream_.str();
6835 MUTEX_UNLOCK( &stream_.mutex );
6836 error( RtAudioError::SYSTEM_ERROR );
6837 return;
6838 }
6839 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6840 handle->bufferPointer[0] = nextWritePointer;
6841 }
6842
6843 // Don't bother draining input
6844 if ( handle->drainCounter ) {
6845 handle->drainCounter++;
6846 goto unlock;
6847 }
6848
6849 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6850
6851 // Setup parameters.
6852 if ( stream_.doConvertBuffer[1] ) {
6853 buffer = stream_.deviceBuffer;
6854 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6855 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6856 }
6857 else {
6858 buffer = stream_.userBuffer[1];
6859 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6860 bufferBytes *= formatBytes( stream_.userFormat );
6861 }
6862
6863 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6864 long nextReadPointer = handle->bufferPointer[1];
6865 DWORD dsBufferSize = handle->dsBufferSize[1];
6866
6867 // Find out where the write and "safe read" pointers are.
6868 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6869 if ( FAILED( result ) ) {
6870 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6871 errorText_ = errorStream_.str();
6872 MUTEX_UNLOCK( &stream_.mutex );
6873 error( RtAudioError::SYSTEM_ERROR );
6874 return;
6875 }
6876
6877 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6878 DWORD endRead = nextReadPointer + bufferBytes;
6879
6880 // Handling depends on whether we are INPUT or DUPLEX.
6881 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6882 // then a wait here will drag the write pointers into the forbidden zone.
6883 //
6884 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6885 // it's in a safe position. This causes dropouts, but it seems to be the only
6886 // practical way to sync up the read and write pointers reliably, given the
6887 // the very complex relationship between phase and increment of the read and write
6888 // pointers.
6889 //
6890 // In order to minimize audible dropouts in DUPLEX mode, we will
6891 // provide a pre-roll period of 0.5 seconds in which we return
6892 // zeros from the read buffer while the pointers sync up.
6893
6894 if ( stream_.mode == DUPLEX ) {
6895 if ( safeReadPointer < endRead ) {
6896 if ( duplexPrerollBytes <= 0 ) {
6897 // Pre-roll time over. Be more agressive.
6898 int adjustment = endRead-safeReadPointer;
6899
6900 handle->xrun[1] = true;
6901 // Two cases:
6902 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6903 // and perform fine adjustments later.
6904 // - small adjustments: back off by twice as much.
6905 if ( adjustment >= 2*bufferBytes )
6906 nextReadPointer = safeReadPointer-2*bufferBytes;
6907 else
6908 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6909
6910 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6911
6912 }
6913 else {
6914 // In pre=roll time. Just do it.
6915 nextReadPointer = safeReadPointer - bufferBytes;
6916 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6917 }
6918 endRead = nextReadPointer + bufferBytes;
6919 }
6920 }
6921 else { // mode == INPUT
6922 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6923 // See comments for playback.
6924 double millis = (endRead - safeReadPointer) * 1000.0;
6925 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6926 if ( millis < 1.0 ) millis = 1.0;
6927 Sleep( (DWORD) millis );
6928
6929 // Wake up and find out where we are now.
6930 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6931 if ( FAILED( result ) ) {
6932 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6933 errorText_ = errorStream_.str();
6934 MUTEX_UNLOCK( &stream_.mutex );
6935 error( RtAudioError::SYSTEM_ERROR );
6936 return;
6937 }
6938
6939 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6940 }
6941 }
6942
6943 // Lock free space in the buffer
6944 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6945 &bufferSize1, &buffer2, &bufferSize2, 0 );
6946 if ( FAILED( result ) ) {
6947 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6948 errorText_ = errorStream_.str();
6949 MUTEX_UNLOCK( &stream_.mutex );
6950 error( RtAudioError::SYSTEM_ERROR );
6951 return;
6952 }
6953
6954 if ( duplexPrerollBytes <= 0 ) {
6955 // Copy our buffer into the DS buffer
6956 CopyMemory( buffer, buffer1, bufferSize1 );
6957 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6958 }
6959 else {
6960 memset( buffer, 0, bufferSize1 );
6961 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6962 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6963 }
6964
6965 // Update our buffer offset and unlock sound buffer
6966 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6967 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6968 if ( FAILED( result ) ) {
6969 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6970 errorText_ = errorStream_.str();
6971 MUTEX_UNLOCK( &stream_.mutex );
6972 error( RtAudioError::SYSTEM_ERROR );
6973 return;
6974 }
6975 handle->bufferPointer[1] = nextReadPointer;
6976
6977 // No byte swapping necessary in DirectSound implementation.
6978
6979 // If necessary, convert 8-bit data from unsigned to signed.
6980 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6981 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6982
6983 // Do buffer conversion if necessary.
6984 if ( stream_.doConvertBuffer[1] )
6985 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6986 }
6987
6988 unlock:
6989 MUTEX_UNLOCK( &stream_.mutex );
6990 RtApi::tickStreamTime();
6991 }
6992
6993 // Definitions for utility functions and callbacks
6994 // specific to the DirectSound implementation.
6995
callbackHandler(void * ptr)6996 static unsigned __stdcall callbackHandler( void *ptr )
6997 {
6998 CallbackInfo *info = (CallbackInfo *) ptr;
6999 RtApiDs *object = (RtApiDs *) info->object;
7000 bool* isRunning = &info->isRunning;
7001
7002 while ( *isRunning == true ) {
7003 object->callbackEvent();
7004 }
7005
7006 _endthreadex( 0 );
7007 return 0;
7008 }
7009
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)7010 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7011 LPCTSTR description,
7012 LPCTSTR /*module*/,
7013 LPVOID lpContext )
7014 {
7015 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7016 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7017
7018 HRESULT hr;
7019 bool validDevice = false;
7020 if ( probeInfo.isInput == true ) {
7021 DSCCAPS caps;
7022 LPDIRECTSOUNDCAPTURE object;
7023
7024 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7025 if ( hr != DS_OK ) return TRUE;
7026
7027 caps.dwSize = sizeof(caps);
7028 hr = object->GetCaps( &caps );
7029 if ( hr == DS_OK ) {
7030 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7031 validDevice = true;
7032 }
7033 object->Release();
7034 }
7035 else {
7036 DSCAPS caps;
7037 LPDIRECTSOUND object;
7038 hr = DirectSoundCreate( lpguid, &object, NULL );
7039 if ( hr != DS_OK ) return TRUE;
7040
7041 caps.dwSize = sizeof(caps);
7042 hr = object->GetCaps( &caps );
7043 if ( hr == DS_OK ) {
7044 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7045 validDevice = true;
7046 }
7047 object->Release();
7048 }
7049
7050 // If good device, then save its name and guid.
7051 std::string name = convertCharPointerToStdString( description );
7052 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7053 if ( lpguid == NULL )
7054 name = "Default Device";
7055 if ( validDevice ) {
7056 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7057 if ( dsDevices[i].name == name ) {
7058 dsDevices[i].found = true;
7059 if ( probeInfo.isInput ) {
7060 dsDevices[i].id[1] = lpguid;
7061 dsDevices[i].validId[1] = true;
7062 }
7063 else {
7064 dsDevices[i].id[0] = lpguid;
7065 dsDevices[i].validId[0] = true;
7066 }
7067 return TRUE;
7068 }
7069 }
7070
7071 DsDevice device;
7072 device.name = name;
7073 device.found = true;
7074 if ( probeInfo.isInput ) {
7075 device.id[1] = lpguid;
7076 device.validId[1] = true;
7077 }
7078 else {
7079 device.id[0] = lpguid;
7080 device.validId[0] = true;
7081 }
7082 dsDevices.push_back( device );
7083 }
7084
7085 return TRUE;
7086 }
7087
getErrorString(int code)7088 static const char* getErrorString( int code )
7089 {
7090 switch ( code ) {
7091
7092 case DSERR_ALLOCATED:
7093 return "Already allocated";
7094
7095 case DSERR_CONTROLUNAVAIL:
7096 return "Control unavailable";
7097
7098 case DSERR_INVALIDPARAM:
7099 return "Invalid parameter";
7100
7101 case DSERR_INVALIDCALL:
7102 return "Invalid call";
7103
7104 case DSERR_GENERIC:
7105 return "Generic error";
7106
7107 case DSERR_PRIOLEVELNEEDED:
7108 return "Priority level needed";
7109
7110 case DSERR_OUTOFMEMORY:
7111 return "Out of memory";
7112
7113 case DSERR_BADFORMAT:
7114 return "The sample rate or the channel format is not supported";
7115
7116 case DSERR_UNSUPPORTED:
7117 return "Not supported";
7118
7119 case DSERR_NODRIVER:
7120 return "No driver";
7121
7122 case DSERR_ALREADYINITIALIZED:
7123 return "Already initialized";
7124
7125 case DSERR_NOAGGREGATION:
7126 return "No aggregation";
7127
7128 case DSERR_BUFFERLOST:
7129 return "Buffer lost";
7130
7131 case DSERR_OTHERAPPHASPRIO:
7132 return "Another application already has priority";
7133
7134 case DSERR_UNINITIALIZED:
7135 return "Uninitialized";
7136
7137 default:
7138 return "DirectSound unknown error";
7139 }
7140 }
7141 //******************** End of __WINDOWS_DS__ *********************//
7142 #endif
7143
7144
7145 #if defined(__LINUX_ALSA__)
7146
7147 #include <alsa/asoundlib.h>
7148 #include <unistd.h>
7149
7150 // A structure to hold various information related to the ALSA API
7151 // implementation.
7152 struct AlsaHandle {
7153 snd_pcm_t *handles[2];
7154 bool synchronized;
7155 bool xrun[2];
7156 pthread_cond_t runnable_cv;
7157 bool runnable;
7158
AlsaHandleAlsaHandle7159 AlsaHandle()
7160 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7161 };
7162
7163 static void *alsaCallbackHandler( void * ptr );
7164
RtApiAlsa()7165 RtApiAlsa :: RtApiAlsa()
7166 {
7167 // Nothing to do here.
7168 }
7169
~RtApiAlsa()7170 RtApiAlsa :: ~RtApiAlsa()
7171 {
7172 if ( stream_.state != STREAM_CLOSED ) closeStream();
7173 }
7174
getDeviceCount(void)7175 unsigned int RtApiAlsa :: getDeviceCount( void )
7176 {
7177 unsigned nDevices = 0;
7178 int result, subdevice, card;
7179 char name[64];
7180 snd_ctl_t *handle = 0;
7181
7182 // Count cards and devices
7183 card = -1;
7184 snd_card_next( &card );
7185 while ( card >= 0 ) {
7186 sprintf( name, "hw:%d", card );
7187 result = snd_ctl_open( &handle, name, 0 );
7188 if ( result < 0 ) {
7189 handle = 0;
7190 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7191 errorText_ = errorStream_.str();
7192 error( RtAudioError::WARNING );
7193 goto nextcard;
7194 }
7195 subdevice = -1;
7196 while( 1 ) {
7197 result = snd_ctl_pcm_next_device( handle, &subdevice );
7198 if ( result < 0 ) {
7199 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7200 errorText_ = errorStream_.str();
7201 error( RtAudioError::WARNING );
7202 break;
7203 }
7204 if ( subdevice < 0 )
7205 break;
7206 nDevices++;
7207 }
7208 nextcard:
7209 if ( handle )
7210 snd_ctl_close( handle );
7211 snd_card_next( &card );
7212 }
7213
7214 result = snd_ctl_open( &handle, "default", 0 );
7215 if (result == 0) {
7216 nDevices++;
7217 snd_ctl_close( handle );
7218 }
7219
7220 return nDevices;
7221 }
7222
getDeviceInfo(unsigned int device)7223 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7224 {
7225 RtAudio::DeviceInfo info;
7226 info.probed = false;
7227
7228 unsigned nDevices = 0;
7229 int result, subdevice, card;
7230 char name[64];
7231 snd_ctl_t *chandle = 0;
7232
7233 // Count cards and devices
7234 card = -1;
7235 subdevice = -1;
7236 snd_card_next( &card );
7237 while ( card >= 0 ) {
7238 sprintf( name, "hw:%d", card );
7239 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7240 if ( result < 0 ) {
7241 chandle = 0;
7242 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7243 errorText_ = errorStream_.str();
7244 error( RtAudioError::WARNING );
7245 goto nextcard;
7246 }
7247 subdevice = -1;
7248 while( 1 ) {
7249 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7250 if ( result < 0 ) {
7251 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7252 errorText_ = errorStream_.str();
7253 error( RtAudioError::WARNING );
7254 break;
7255 }
7256 if ( subdevice < 0 ) break;
7257 if ( nDevices == device ) {
7258 sprintf( name, "hw:%d,%d", card, subdevice );
7259 goto foundDevice;
7260 }
7261 nDevices++;
7262 }
7263 nextcard:
7264 if ( chandle )
7265 snd_ctl_close( chandle );
7266 snd_card_next( &card );
7267 }
7268
7269 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7270 if ( result == 0 ) {
7271 if ( nDevices == device ) {
7272 strcpy( name, "default" );
7273 goto foundDevice;
7274 }
7275 nDevices++;
7276 }
7277
7278 if ( nDevices == 0 ) {
7279 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7280 error( RtAudioError::INVALID_USE );
7281 return info;
7282 }
7283
7284 if ( device >= nDevices ) {
7285 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7286 error( RtAudioError::INVALID_USE );
7287 return info;
7288 }
7289
7290 foundDevice:
7291
7292 // If a stream is already open, we cannot probe the stream devices.
7293 // Thus, use the saved results.
7294 if ( stream_.state != STREAM_CLOSED &&
7295 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7296 snd_ctl_close( chandle );
7297 if ( device >= devices_.size() ) {
7298 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7299 error( RtAudioError::WARNING );
7300 return info;
7301 }
7302 return devices_[ device ];
7303 }
7304
7305 int openMode = SND_PCM_ASYNC;
7306 snd_pcm_stream_t stream;
7307 snd_pcm_info_t *pcminfo;
7308 snd_pcm_info_alloca( &pcminfo );
7309 snd_pcm_t *phandle;
7310 snd_pcm_hw_params_t *params;
7311 snd_pcm_hw_params_alloca( ¶ms );
7312
7313 // First try for playback unless default device (which has subdev -1)
7314 stream = SND_PCM_STREAM_PLAYBACK;
7315 snd_pcm_info_set_stream( pcminfo, stream );
7316 if ( subdevice != -1 ) {
7317 snd_pcm_info_set_device( pcminfo, subdevice );
7318 snd_pcm_info_set_subdevice( pcminfo, 0 );
7319
7320 result = snd_ctl_pcm_info( chandle, pcminfo );
7321 if ( result < 0 ) {
7322 // Device probably doesn't support playback.
7323 goto captureProbe;
7324 }
7325 }
7326
7327 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7328 if ( result < 0 ) {
7329 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7330 errorText_ = errorStream_.str();
7331 error( RtAudioError::WARNING );
7332 goto captureProbe;
7333 }
7334
7335 // The device is open ... fill the parameter structure.
7336 result = snd_pcm_hw_params_any( phandle, params );
7337 if ( result < 0 ) {
7338 snd_pcm_close( phandle );
7339 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7340 errorText_ = errorStream_.str();
7341 error( RtAudioError::WARNING );
7342 goto captureProbe;
7343 }
7344
7345 // Get output channel information.
7346 unsigned int value;
7347 result = snd_pcm_hw_params_get_channels_max( params, &value );
7348 if ( result < 0 ) {
7349 snd_pcm_close( phandle );
7350 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7351 errorText_ = errorStream_.str();
7352 error( RtAudioError::WARNING );
7353 goto captureProbe;
7354 }
7355 info.outputChannels = value;
7356 snd_pcm_close( phandle );
7357
7358 captureProbe:
7359 stream = SND_PCM_STREAM_CAPTURE;
7360 snd_pcm_info_set_stream( pcminfo, stream );
7361
7362 // Now try for capture unless default device (with subdev = -1)
7363 if ( subdevice != -1 ) {
7364 result = snd_ctl_pcm_info( chandle, pcminfo );
7365 snd_ctl_close( chandle );
7366 if ( result < 0 ) {
7367 // Device probably doesn't support capture.
7368 if ( info.outputChannels == 0 ) return info;
7369 goto probeParameters;
7370 }
7371 }
7372 else
7373 snd_ctl_close( chandle );
7374
7375 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7376 if ( result < 0 ) {
7377 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7378 errorText_ = errorStream_.str();
7379 error( RtAudioError::WARNING );
7380 if ( info.outputChannels == 0 ) return info;
7381 goto probeParameters;
7382 }
7383
7384 // The device is open ... fill the parameter structure.
7385 result = snd_pcm_hw_params_any( phandle, params );
7386 if ( result < 0 ) {
7387 snd_pcm_close( phandle );
7388 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7389 errorText_ = errorStream_.str();
7390 error( RtAudioError::WARNING );
7391 if ( info.outputChannels == 0 ) return info;
7392 goto probeParameters;
7393 }
7394
7395 result = snd_pcm_hw_params_get_channels_max( params, &value );
7396 if ( result < 0 ) {
7397 snd_pcm_close( phandle );
7398 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7399 errorText_ = errorStream_.str();
7400 error( RtAudioError::WARNING );
7401 if ( info.outputChannels == 0 ) return info;
7402 goto probeParameters;
7403 }
7404 info.inputChannels = value;
7405 snd_pcm_close( phandle );
7406
7407 // If device opens for both playback and capture, we determine the channels.
7408 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7409 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7410
7411 // ALSA doesn't provide default devices so we'll use the first available one.
7412 if ( device == 0 && info.outputChannels > 0 )
7413 info.isDefaultOutput = true;
7414 if ( device == 0 && info.inputChannels > 0 )
7415 info.isDefaultInput = true;
7416
7417 probeParameters:
7418 // At this point, we just need to figure out the supported data
7419 // formats and sample rates. We'll proceed by opening the device in
7420 // the direction with the maximum number of channels, or playback if
7421 // they are equal. This might limit our sample rate options, but so
7422 // be it.
7423
7424 if ( info.outputChannels >= info.inputChannels )
7425 stream = SND_PCM_STREAM_PLAYBACK;
7426 else
7427 stream = SND_PCM_STREAM_CAPTURE;
7428 snd_pcm_info_set_stream( pcminfo, stream );
7429
7430 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7431 if ( result < 0 ) {
7432 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7433 errorText_ = errorStream_.str();
7434 error( RtAudioError::WARNING );
7435 return info;
7436 }
7437
7438 // The device is open ... fill the parameter structure.
7439 result = snd_pcm_hw_params_any( phandle, params );
7440 if ( result < 0 ) {
7441 snd_pcm_close( phandle );
7442 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7443 errorText_ = errorStream_.str();
7444 error( RtAudioError::WARNING );
7445 return info;
7446 }
7447
7448 // Test our discrete set of sample rate values.
7449 info.sampleRates.clear();
7450 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7451 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7452 info.sampleRates.push_back( SAMPLE_RATES[i] );
7453
7454 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7455 info.preferredSampleRate = SAMPLE_RATES[i];
7456 }
7457 }
7458 if ( info.sampleRates.size() == 0 ) {
7459 snd_pcm_close( phandle );
7460 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7461 errorText_ = errorStream_.str();
7462 error( RtAudioError::WARNING );
7463 return info;
7464 }
7465
7466 // Probe the supported data formats ... we don't care about endian-ness just yet
7467 snd_pcm_format_t format;
7468 info.nativeFormats = 0;
7469 format = SND_PCM_FORMAT_S8;
7470 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7471 info.nativeFormats |= RTAUDIO_SINT8;
7472 format = SND_PCM_FORMAT_S16;
7473 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7474 info.nativeFormats |= RTAUDIO_SINT16;
7475 format = SND_PCM_FORMAT_S24;
7476 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7477 info.nativeFormats |= RTAUDIO_SINT24;
7478 format = SND_PCM_FORMAT_S32;
7479 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7480 info.nativeFormats |= RTAUDIO_SINT32;
7481 format = SND_PCM_FORMAT_FLOAT;
7482 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7483 info.nativeFormats |= RTAUDIO_FLOAT32;
7484 format = SND_PCM_FORMAT_FLOAT64;
7485 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7486 info.nativeFormats |= RTAUDIO_FLOAT64;
7487
7488 // Check that we have at least one supported format
7489 if ( info.nativeFormats == 0 ) {
7490 snd_pcm_close( phandle );
7491 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7492 errorText_ = errorStream_.str();
7493 error( RtAudioError::WARNING );
7494 return info;
7495 }
7496
7497 // Get the device name
7498 char *cardname;
7499 result = snd_card_get_name( card, &cardname );
7500 if ( result >= 0 ) {
7501 sprintf( name, "hw:%s,%d", cardname, subdevice );
7502 free( cardname );
7503 }
7504 info.name = name;
7505
7506 // That's all ... close the device and return
7507 snd_pcm_close( phandle );
7508 info.probed = true;
7509 return info;
7510 }
7511
saveDeviceInfo(void)7512 void RtApiAlsa :: saveDeviceInfo( void )
7513 {
7514 devices_.clear();
7515
7516 unsigned int nDevices = getDeviceCount();
7517 devices_.resize( nDevices );
7518 for ( unsigned int i=0; i<nDevices; i++ )
7519 devices_[i] = getDeviceInfo( i );
7520 }
7521
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7522 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7523 unsigned int firstChannel, unsigned int sampleRate,
7524 RtAudioFormat format, unsigned int *bufferSize,
7525 RtAudio::StreamOptions *options )
7526
7527 {
7528 #if defined(__RTAUDIO_DEBUG__)
7529 snd_output_t *out;
7530 snd_output_stdio_attach(&out, stderr, 0);
7531 #endif
7532
7533 // I'm not using the "plug" interface ... too much inconsistent behavior.
7534
7535 unsigned nDevices = 0;
7536 int result, subdevice, card;
7537 char name[64];
7538 snd_ctl_t *chandle;
7539
7540 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7541 snprintf(name, sizeof(name), "%s", "default");
7542 else {
7543 // Count cards and devices
7544 card = -1;
7545 snd_card_next( &card );
7546 while ( card >= 0 ) {
7547 sprintf( name, "hw:%d", card );
7548 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7549 if ( result < 0 ) {
7550 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7551 errorText_ = errorStream_.str();
7552 return FAILURE;
7553 }
7554 subdevice = -1;
7555 while( 1 ) {
7556 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7557 if ( result < 0 ) break;
7558 if ( subdevice < 0 ) break;
7559 if ( nDevices == device ) {
7560 sprintf( name, "hw:%d,%d", card, subdevice );
7561 snd_ctl_close( chandle );
7562 goto foundDevice;
7563 }
7564 nDevices++;
7565 }
7566 snd_ctl_close( chandle );
7567 snd_card_next( &card );
7568 }
7569
7570 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7571 if ( result == 0 ) {
7572 if ( nDevices == device ) {
7573 strcpy( name, "default" );
7574 snd_ctl_close( chandle );
7575 goto foundDevice;
7576 }
7577 nDevices++;
7578 }
7579 snd_ctl_close( chandle );
7580
7581 if ( nDevices == 0 ) {
7582 // This should not happen because a check is made before this function is called.
7583 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7584 return FAILURE;
7585 }
7586
7587 if ( device >= nDevices ) {
7588 // This should not happen because a check is made before this function is called.
7589 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7590 return FAILURE;
7591 }
7592 }
7593
7594 foundDevice:
7595
7596 // The getDeviceInfo() function will not work for a device that is
7597 // already open. Thus, we'll probe the system before opening a
7598 // stream and save the results for use by getDeviceInfo().
7599 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7600 this->saveDeviceInfo();
7601
7602 snd_pcm_stream_t stream;
7603 if ( mode == OUTPUT )
7604 stream = SND_PCM_STREAM_PLAYBACK;
7605 else
7606 stream = SND_PCM_STREAM_CAPTURE;
7607
7608 snd_pcm_t *phandle;
7609 int openMode = SND_PCM_ASYNC;
7610 result = snd_pcm_open( &phandle, name, stream, openMode );
7611 if ( result < 0 ) {
7612 if ( mode == OUTPUT )
7613 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7614 else
7615 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7616 errorText_ = errorStream_.str();
7617 return FAILURE;
7618 }
7619
7620 // Fill the parameter structure.
7621 snd_pcm_hw_params_t *hw_params;
7622 snd_pcm_hw_params_alloca( &hw_params );
7623 result = snd_pcm_hw_params_any( phandle, hw_params );
7624 if ( result < 0 ) {
7625 snd_pcm_close( phandle );
7626 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7627 errorText_ = errorStream_.str();
7628 return FAILURE;
7629 }
7630
7631 #if defined(__RTAUDIO_DEBUG__)
7632 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7633 snd_pcm_hw_params_dump( hw_params, out );
7634 #endif
7635
7636 // Set access ... check user preference.
7637 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7638 stream_.userInterleaved = false;
7639 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7640 if ( result < 0 ) {
7641 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7642 stream_.deviceInterleaved[mode] = true;
7643 }
7644 else
7645 stream_.deviceInterleaved[mode] = false;
7646 }
7647 else {
7648 stream_.userInterleaved = true;
7649 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7650 if ( result < 0 ) {
7651 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7652 stream_.deviceInterleaved[mode] = false;
7653 }
7654 else
7655 stream_.deviceInterleaved[mode] = true;
7656 }
7657
7658 if ( result < 0 ) {
7659 snd_pcm_close( phandle );
7660 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7661 errorText_ = errorStream_.str();
7662 return FAILURE;
7663 }
7664
7665 // Determine how to set the device format.
7666 stream_.userFormat = format;
7667 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7668
7669 if ( format == RTAUDIO_SINT8 )
7670 deviceFormat = SND_PCM_FORMAT_S8;
7671 else if ( format == RTAUDIO_SINT16 )
7672 deviceFormat = SND_PCM_FORMAT_S16;
7673 else if ( format == RTAUDIO_SINT24 )
7674 deviceFormat = SND_PCM_FORMAT_S24;
7675 else if ( format == RTAUDIO_SINT32 )
7676 deviceFormat = SND_PCM_FORMAT_S32;
7677 else if ( format == RTAUDIO_FLOAT32 )
7678 deviceFormat = SND_PCM_FORMAT_FLOAT;
7679 else if ( format == RTAUDIO_FLOAT64 )
7680 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7681
7682 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7683 stream_.deviceFormat[mode] = format;
7684 goto setFormat;
7685 }
7686
7687 // The user requested format is not natively supported by the device.
7688 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7689 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7690 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7691 goto setFormat;
7692 }
7693
7694 deviceFormat = SND_PCM_FORMAT_FLOAT;
7695 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7696 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7697 goto setFormat;
7698 }
7699
7700 deviceFormat = SND_PCM_FORMAT_S32;
7701 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7702 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7703 goto setFormat;
7704 }
7705
7706 deviceFormat = SND_PCM_FORMAT_S24;
7707 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7708 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7709 goto setFormat;
7710 }
7711
7712 deviceFormat = SND_PCM_FORMAT_S16;
7713 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7715 goto setFormat;
7716 }
7717
7718 deviceFormat = SND_PCM_FORMAT_S8;
7719 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7720 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7721 goto setFormat;
7722 }
7723
7724 // If we get here, no supported format was found.
7725 snd_pcm_close( phandle );
7726 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7727 errorText_ = errorStream_.str();
7728 return FAILURE;
7729
7730 setFormat:
7731 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7732 if ( result < 0 ) {
7733 snd_pcm_close( phandle );
7734 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7735 errorText_ = errorStream_.str();
7736 return FAILURE;
7737 }
7738
7739 // Determine whether byte-swaping is necessary.
7740 stream_.doByteSwap[mode] = false;
7741 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7742 result = snd_pcm_format_cpu_endian( deviceFormat );
7743 if ( result == 0 )
7744 stream_.doByteSwap[mode] = true;
7745 else if (result < 0) {
7746 snd_pcm_close( phandle );
7747 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7748 errorText_ = errorStream_.str();
7749 return FAILURE;
7750 }
7751 }
7752
7753 // Set the sample rate.
7754 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7755 if ( result < 0 ) {
7756 snd_pcm_close( phandle );
7757 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7758 errorText_ = errorStream_.str();
7759 return FAILURE;
7760 }
7761
7762 // Determine the number of channels for this device. We support a possible
7763 // minimum device channel number > than the value requested by the user.
7764 stream_.nUserChannels[mode] = channels;
7765 unsigned int value;
7766 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7767 unsigned int deviceChannels = value;
7768 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7769 snd_pcm_close( phandle );
7770 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7771 errorText_ = errorStream_.str();
7772 return FAILURE;
7773 }
7774
7775 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7776 if ( result < 0 ) {
7777 snd_pcm_close( phandle );
7778 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7779 errorText_ = errorStream_.str();
7780 return FAILURE;
7781 }
7782 deviceChannels = value;
7783 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7784 stream_.nDeviceChannels[mode] = deviceChannels;
7785
7786 // Set the device channels.
7787 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7788 if ( result < 0 ) {
7789 snd_pcm_close( phandle );
7790 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7791 errorText_ = errorStream_.str();
7792 return FAILURE;
7793 }
7794
7795 // Set the buffer (or period) size.
7796 int dir = 0;
7797 snd_pcm_uframes_t periodSize = *bufferSize;
7798 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7799 if ( result < 0 ) {
7800 snd_pcm_close( phandle );
7801 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7802 errorText_ = errorStream_.str();
7803 return FAILURE;
7804 }
7805 *bufferSize = periodSize;
7806
7807 // Set the buffer number, which in ALSA is referred to as the "period".
7808 unsigned int periods = 0;
7809 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7810 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7811 if ( periods < 2 ) periods = 4; // a fairly safe default value
7812 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7813 if ( result < 0 ) {
7814 snd_pcm_close( phandle );
7815 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7816 errorText_ = errorStream_.str();
7817 return FAILURE;
7818 }
7819
7820 // If attempting to setup a duplex stream, the bufferSize parameter
7821 // MUST be the same in both directions!
7822 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7823 snd_pcm_close( phandle );
7824 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7825 errorText_ = errorStream_.str();
7826 return FAILURE;
7827 }
7828
7829 stream_.bufferSize = *bufferSize;
7830
7831 // Install the hardware configuration
7832 result = snd_pcm_hw_params( phandle, hw_params );
7833 if ( result < 0 ) {
7834 snd_pcm_close( phandle );
7835 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7836 errorText_ = errorStream_.str();
7837 return FAILURE;
7838 }
7839
7840 #if defined(__RTAUDIO_DEBUG__)
7841 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7842 snd_pcm_hw_params_dump( hw_params, out );
7843 #endif
7844
7845 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7846 snd_pcm_sw_params_t *sw_params = NULL;
7847 snd_pcm_sw_params_alloca( &sw_params );
7848 snd_pcm_sw_params_current( phandle, sw_params );
7849 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7850 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7851 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7852
7853 // The following two settings were suggested by Theo Veenker
7854 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7855 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7856
7857 // here are two options for a fix
7858 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7859 snd_pcm_uframes_t val;
7860 snd_pcm_sw_params_get_boundary( sw_params, &val );
7861 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7862
7863 result = snd_pcm_sw_params( phandle, sw_params );
7864 if ( result < 0 ) {
7865 snd_pcm_close( phandle );
7866 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7867 errorText_ = errorStream_.str();
7868 return FAILURE;
7869 }
7870
7871 #if defined(__RTAUDIO_DEBUG__)
7872 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7873 snd_pcm_sw_params_dump( sw_params, out );
7874 #endif
7875
7876 // Set flags for buffer conversion
7877 stream_.doConvertBuffer[mode] = false;
7878 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7879 stream_.doConvertBuffer[mode] = true;
7880 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7881 stream_.doConvertBuffer[mode] = true;
7882 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7883 stream_.nUserChannels[mode] > 1 )
7884 stream_.doConvertBuffer[mode] = true;
7885
7886 // Allocate the ApiHandle if necessary and then save.
7887 AlsaHandle *apiInfo = 0;
7888 if ( stream_.apiHandle == 0 ) {
7889 try {
7890 apiInfo = (AlsaHandle *) new AlsaHandle;
7891 }
7892 catch ( std::bad_alloc& ) {
7893 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7894 goto error;
7895 }
7896
7897 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7898 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7899 goto error;
7900 }
7901
7902 stream_.apiHandle = (void *) apiInfo;
7903 apiInfo->handles[0] = 0;
7904 apiInfo->handles[1] = 0;
7905 }
7906 else {
7907 apiInfo = (AlsaHandle *) stream_.apiHandle;
7908 }
7909 apiInfo->handles[mode] = phandle;
7910 phandle = 0;
7911
7912 // Allocate necessary internal buffers.
7913 unsigned long bufferBytes;
7914 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7915 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7916 if ( stream_.userBuffer[mode] == NULL ) {
7917 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7918 goto error;
7919 }
7920
7921 if ( stream_.doConvertBuffer[mode] ) {
7922
7923 bool makeBuffer = true;
7924 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7925 if ( mode == INPUT ) {
7926 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7927 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7928 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7929 }
7930 }
7931
7932 if ( makeBuffer ) {
7933 bufferBytes *= *bufferSize;
7934 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7935 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7936 if ( stream_.deviceBuffer == NULL ) {
7937 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7938 goto error;
7939 }
7940 }
7941 }
7942
7943 stream_.sampleRate = sampleRate;
7944 stream_.nBuffers = periods;
7945 stream_.device[mode] = device;
7946 stream_.state = STREAM_STOPPED;
7947
7948 // Setup the buffer conversion information structure.
7949 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7950
7951 // Setup thread if necessary.
7952 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7953 // We had already set up an output stream.
7954 stream_.mode = DUPLEX;
7955 // Link the streams if possible.
7956 apiInfo->synchronized = false;
7957 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7958 apiInfo->synchronized = true;
7959 else {
7960 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7961 error( RtAudioError::WARNING );
7962 }
7963 }
7964 else {
7965 stream_.mode = mode;
7966
7967 // Setup callback thread.
7968 stream_.callbackInfo.object = (void *) this;
7969
7970 // Set the thread attributes for joinable and realtime scheduling
7971 // priority (optional). The higher priority will only take affect
7972 // if the program is run as root or suid. Note, under Linux
7973 // processes with CAP_SYS_NICE privilege, a user can change
7974 // scheduling policy and priority (thus need not be root). See
7975 // POSIX "capabilities".
7976 pthread_attr_t attr;
7977 pthread_attr_init( &attr );
7978 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7979 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7980 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7981 stream_.callbackInfo.doRealtime = true;
7982 struct sched_param param;
7983 int priority = options->priority;
7984 int min = sched_get_priority_min( SCHED_RR );
7985 int max = sched_get_priority_max( SCHED_RR );
7986 if ( priority < min ) priority = min;
7987 else if ( priority > max ) priority = max;
7988 param.sched_priority = priority;
7989
7990 // Set the policy BEFORE the priority. Otherwise it fails.
7991 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7992 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7993 // This is definitely required. Otherwise it fails.
7994 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7995 pthread_attr_setschedparam(&attr, ¶m);
7996 }
7997 else
7998 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7999 #else
8000 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8001 #endif
8002
8003 stream_.callbackInfo.isRunning = true;
8004 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8005 pthread_attr_destroy( &attr );
8006 if ( result ) {
8007 // Failed. Try instead with default attributes.
8008 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8009 if ( result ) {
8010 stream_.callbackInfo.isRunning = false;
8011 errorText_ = "RtApiAlsa::error creating callback thread!";
8012 goto error;
8013 }
8014 }
8015 }
8016
8017 return SUCCESS;
8018
8019 error:
8020 if ( apiInfo ) {
8021 pthread_cond_destroy( &apiInfo->runnable_cv );
8022 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8023 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8024 delete apiInfo;
8025 stream_.apiHandle = 0;
8026 }
8027
8028 if ( phandle) snd_pcm_close( phandle );
8029
8030 for ( int i=0; i<2; i++ ) {
8031 if ( stream_.userBuffer[i] ) {
8032 free( stream_.userBuffer[i] );
8033 stream_.userBuffer[i] = 0;
8034 }
8035 }
8036
8037 if ( stream_.deviceBuffer ) {
8038 free( stream_.deviceBuffer );
8039 stream_.deviceBuffer = 0;
8040 }
8041
8042 stream_.state = STREAM_CLOSED;
8043 return FAILURE;
8044 }
8045
closeStream()8046 void RtApiAlsa :: closeStream()
8047 {
8048 if ( stream_.state == STREAM_CLOSED ) {
8049 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8050 error( RtAudioError::WARNING );
8051 return;
8052 }
8053
8054 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8055 stream_.callbackInfo.isRunning = false;
8056 MUTEX_LOCK( &stream_.mutex );
8057 if ( stream_.state == STREAM_STOPPED ) {
8058 apiInfo->runnable = true;
8059 pthread_cond_signal( &apiInfo->runnable_cv );
8060 }
8061 MUTEX_UNLOCK( &stream_.mutex );
8062 pthread_join( stream_.callbackInfo.thread, NULL );
8063
8064 if ( stream_.state == STREAM_RUNNING ) {
8065 stream_.state = STREAM_STOPPED;
8066 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8067 snd_pcm_drop( apiInfo->handles[0] );
8068 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8069 snd_pcm_drop( apiInfo->handles[1] );
8070 }
8071
8072 if ( apiInfo ) {
8073 pthread_cond_destroy( &apiInfo->runnable_cv );
8074 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8075 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8076 delete apiInfo;
8077 stream_.apiHandle = 0;
8078 }
8079
8080 for ( int i=0; i<2; i++ ) {
8081 if ( stream_.userBuffer[i] ) {
8082 free( stream_.userBuffer[i] );
8083 stream_.userBuffer[i] = 0;
8084 }
8085 }
8086
8087 if ( stream_.deviceBuffer ) {
8088 free( stream_.deviceBuffer );
8089 stream_.deviceBuffer = 0;
8090 }
8091
8092 stream_.mode = UNINITIALIZED;
8093 stream_.state = STREAM_CLOSED;
8094 }
8095
startStream()8096 void RtApiAlsa :: startStream()
8097 {
8098 // This method calls snd_pcm_prepare if the device isn't already in that state.
8099
8100 verifyStream();
8101 if ( stream_.state == STREAM_RUNNING ) {
8102 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8103 error( RtAudioError::WARNING );
8104 return;
8105 }
8106
8107 MUTEX_LOCK( &stream_.mutex );
8108
8109 #if defined( HAVE_GETTIMEOFDAY )
8110 gettimeofday( &stream_.lastTickTimestamp, NULL );
8111 #endif
8112
8113 int result = 0;
8114 snd_pcm_state_t state;
8115 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8116 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8118 state = snd_pcm_state( handle[0] );
8119 if ( state != SND_PCM_STATE_PREPARED ) {
8120 result = snd_pcm_prepare( handle[0] );
8121 if ( result < 0 ) {
8122 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8123 errorText_ = errorStream_.str();
8124 goto unlock;
8125 }
8126 }
8127 }
8128
8129 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8130 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8131 state = snd_pcm_state( handle[1] );
8132 if ( state != SND_PCM_STATE_PREPARED ) {
8133 result = snd_pcm_prepare( handle[1] );
8134 if ( result < 0 ) {
8135 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8136 errorText_ = errorStream_.str();
8137 goto unlock;
8138 }
8139 }
8140 }
8141
8142 stream_.state = STREAM_RUNNING;
8143
8144 unlock:
8145 apiInfo->runnable = true;
8146 pthread_cond_signal( &apiInfo->runnable_cv );
8147 MUTEX_UNLOCK( &stream_.mutex );
8148
8149 if ( result >= 0 ) return;
8150 error( RtAudioError::SYSTEM_ERROR );
8151 }
8152
stopStream()8153 void RtApiAlsa :: stopStream()
8154 {
8155 verifyStream();
8156 if ( stream_.state == STREAM_STOPPED ) {
8157 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8158 error( RtAudioError::WARNING );
8159 return;
8160 }
8161
8162 stream_.state = STREAM_STOPPED;
8163 MUTEX_LOCK( &stream_.mutex );
8164
8165 int result = 0;
8166 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8167 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8168 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8169 if ( apiInfo->synchronized )
8170 result = snd_pcm_drop( handle[0] );
8171 else
8172 result = snd_pcm_drain( handle[0] );
8173 if ( result < 0 ) {
8174 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8175 errorText_ = errorStream_.str();
8176 goto unlock;
8177 }
8178 }
8179
8180 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8181 result = snd_pcm_drop( handle[1] );
8182 if ( result < 0 ) {
8183 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8184 errorText_ = errorStream_.str();
8185 goto unlock;
8186 }
8187 }
8188
8189 unlock:
8190 apiInfo->runnable = false; // fixes high CPU usage when stopped
8191 MUTEX_UNLOCK( &stream_.mutex );
8192
8193 if ( result >= 0 ) return;
8194 error( RtAudioError::SYSTEM_ERROR );
8195 }
8196
abortStream()8197 void RtApiAlsa :: abortStream()
8198 {
8199 verifyStream();
8200 if ( stream_.state == STREAM_STOPPED ) {
8201 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8202 error( RtAudioError::WARNING );
8203 return;
8204 }
8205
8206 stream_.state = STREAM_STOPPED;
8207 MUTEX_LOCK( &stream_.mutex );
8208
8209 int result = 0;
8210 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8211 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8213 result = snd_pcm_drop( handle[0] );
8214 if ( result < 0 ) {
8215 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8216 errorText_ = errorStream_.str();
8217 goto unlock;
8218 }
8219 }
8220
8221 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8222 result = snd_pcm_drop( handle[1] );
8223 if ( result < 0 ) {
8224 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8225 errorText_ = errorStream_.str();
8226 goto unlock;
8227 }
8228 }
8229
8230 unlock:
8231 apiInfo->runnable = false; // fixes high CPU usage when stopped
8232 MUTEX_UNLOCK( &stream_.mutex );
8233
8234 if ( result >= 0 ) return;
8235 error( RtAudioError::SYSTEM_ERROR );
8236 }
8237
callbackEvent()8238 void RtApiAlsa :: callbackEvent()
8239 {
8240 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8241 if ( stream_.state == STREAM_STOPPED ) {
8242 MUTEX_LOCK( &stream_.mutex );
8243 while ( !apiInfo->runnable )
8244 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8245
8246 if ( stream_.state != STREAM_RUNNING ) {
8247 MUTEX_UNLOCK( &stream_.mutex );
8248 return;
8249 }
8250 MUTEX_UNLOCK( &stream_.mutex );
8251 }
8252
8253 if ( stream_.state == STREAM_CLOSED ) {
8254 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8255 error( RtAudioError::WARNING );
8256 return;
8257 }
8258
8259 int doStopStream = 0;
8260 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8261 double streamTime = getStreamTime();
8262 RtAudioStreamStatus status = 0;
8263 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8264 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8265 apiInfo->xrun[0] = false;
8266 }
8267 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8268 status |= RTAUDIO_INPUT_OVERFLOW;
8269 apiInfo->xrun[1] = false;
8270 }
8271 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8272 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8273
8274 if ( doStopStream == 2 ) {
8275 abortStream();
8276 return;
8277 }
8278
8279 MUTEX_LOCK( &stream_.mutex );
8280
8281 // The state might change while waiting on a mutex.
8282 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8283
8284 int result;
8285 char *buffer;
8286 int channels;
8287 snd_pcm_t **handle;
8288 snd_pcm_sframes_t frames;
8289 RtAudioFormat format;
8290 handle = (snd_pcm_t **) apiInfo->handles;
8291
8292 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8293
8294 // Setup parameters.
8295 if ( stream_.doConvertBuffer[1] ) {
8296 buffer = stream_.deviceBuffer;
8297 channels = stream_.nDeviceChannels[1];
8298 format = stream_.deviceFormat[1];
8299 }
8300 else {
8301 buffer = stream_.userBuffer[1];
8302 channels = stream_.nUserChannels[1];
8303 format = stream_.userFormat;
8304 }
8305
8306 // Read samples from device in interleaved/non-interleaved format.
8307 if ( stream_.deviceInterleaved[1] )
8308 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8309 else {
8310 void *bufs[channels];
8311 size_t offset = stream_.bufferSize * formatBytes( format );
8312 for ( int i=0; i<channels; i++ )
8313 bufs[i] = (void *) (buffer + (i * offset));
8314 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8315 }
8316
8317 if ( result < (int) stream_.bufferSize ) {
8318 // Either an error or overrun occured.
8319 if ( result == -EPIPE ) {
8320 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8321 if ( state == SND_PCM_STATE_XRUN ) {
8322 apiInfo->xrun[1] = true;
8323 result = snd_pcm_prepare( handle[1] );
8324 if ( result < 0 ) {
8325 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8326 errorText_ = errorStream_.str();
8327 }
8328 }
8329 else {
8330 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8331 errorText_ = errorStream_.str();
8332 }
8333 }
8334 else {
8335 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8336 errorText_ = errorStream_.str();
8337 }
8338 error( RtAudioError::WARNING );
8339 goto tryOutput;
8340 }
8341
8342 // Do byte swapping if necessary.
8343 if ( stream_.doByteSwap[1] )
8344 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8345
8346 // Do buffer conversion if necessary.
8347 if ( stream_.doConvertBuffer[1] )
8348 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8349
8350 // Check stream latency
8351 result = snd_pcm_delay( handle[1], &frames );
8352 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8353 }
8354
8355 tryOutput:
8356
8357 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8358
8359 // Setup parameters and do buffer conversion if necessary.
8360 if ( stream_.doConvertBuffer[0] ) {
8361 buffer = stream_.deviceBuffer;
8362 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8363 channels = stream_.nDeviceChannels[0];
8364 format = stream_.deviceFormat[0];
8365 }
8366 else {
8367 buffer = stream_.userBuffer[0];
8368 channels = stream_.nUserChannels[0];
8369 format = stream_.userFormat;
8370 }
8371
8372 // Do byte swapping if necessary.
8373 if ( stream_.doByteSwap[0] )
8374 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8375
8376 // Write samples to device in interleaved/non-interleaved format.
8377 if ( stream_.deviceInterleaved[0] )
8378 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8379 else {
8380 void *bufs[channels];
8381 size_t offset = stream_.bufferSize * formatBytes( format );
8382 for ( int i=0; i<channels; i++ )
8383 bufs[i] = (void *) (buffer + (i * offset));
8384 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8385 }
8386
8387 if ( result < (int) stream_.bufferSize ) {
8388 // Either an error or underrun occured.
8389 if ( result == -EPIPE ) {
8390 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8391 if ( state == SND_PCM_STATE_XRUN ) {
8392 apiInfo->xrun[0] = true;
8393 result = snd_pcm_prepare( handle[0] );
8394 if ( result < 0 ) {
8395 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8396 errorText_ = errorStream_.str();
8397 }
8398 else
8399 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8400 }
8401 else {
8402 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8403 errorText_ = errorStream_.str();
8404 }
8405 }
8406 else {
8407 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8408 errorText_ = errorStream_.str();
8409 }
8410 error( RtAudioError::WARNING );
8411 goto unlock;
8412 }
8413
8414 // Check stream latency
8415 result = snd_pcm_delay( handle[0], &frames );
8416 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8417 }
8418
8419 unlock:
8420 MUTEX_UNLOCK( &stream_.mutex );
8421
8422 RtApi::tickStreamTime();
8423 if ( doStopStream == 1 ) this->stopStream();
8424 }
8425
alsaCallbackHandler(void * ptr)8426 static void *alsaCallbackHandler( void *ptr )
8427 {
8428 CallbackInfo *info = (CallbackInfo *) ptr;
8429 RtApiAlsa *object = (RtApiAlsa *) info->object;
8430 bool *isRunning = &info->isRunning;
8431
8432 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8433 if ( info->doRealtime ) {
8434 std::cerr << "RtAudio alsa: " <<
8435 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8436 "running realtime scheduling" << std::endl;
8437 }
8438 #endif
8439
8440 while ( *isRunning == true ) {
8441 pthread_testcancel();
8442 object->callbackEvent();
8443 }
8444
8445 pthread_exit( NULL );
8446 }
8447
8448 //******************** End of __LINUX_ALSA__ *********************//
8449 #endif
8450
8451 #if defined(__LINUX_PULSE__)
8452
8453 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8454 // and Tristan Matthews.
8455
8456 #include <pulse/error.h>
8457 #include <pulse/simple.h>
8458 #include <cstdio>
8459
8460 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8461 44100, 48000, 96000, 0};
8462
8463 struct rtaudio_pa_format_mapping_t {
8464 RtAudioFormat rtaudio_format;
8465 pa_sample_format_t pa_format;
8466 };
8467
8468 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8469 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8470 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8471 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8472 {0, PA_SAMPLE_INVALID}};
8473
8474 struct PulseAudioHandle {
8475 pa_simple *s_play;
8476 pa_simple *s_rec;
8477 pthread_t thread;
8478 pthread_cond_t runnable_cv;
8479 bool runnable;
PulseAudioHandlePulseAudioHandle8480 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8481 };
8482
~RtApiPulse()8483 RtApiPulse::~RtApiPulse()
8484 {
8485 if ( stream_.state != STREAM_CLOSED )
8486 closeStream();
8487 }
8488
getDeviceCount(void)8489 unsigned int RtApiPulse::getDeviceCount( void )
8490 {
8491 return 1;
8492 }
8493
getDeviceInfo(unsigned int)8494 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8495 {
8496 RtAudio::DeviceInfo info;
8497 info.probed = true;
8498 info.name = "PulseAudio";
8499 info.outputChannels = 2;
8500 info.inputChannels = 2;
8501 info.duplexChannels = 2;
8502 info.isDefaultOutput = true;
8503 info.isDefaultInput = true;
8504
8505 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8506 info.sampleRates.push_back( *sr );
8507
8508 info.preferredSampleRate = 48000;
8509 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8510
8511 return info;
8512 }
8513
pulseaudio_callback(void * user)8514 static void *pulseaudio_callback( void * user )
8515 {
8516 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8517 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8518 volatile bool *isRunning = &cbi->isRunning;
8519
8520 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8521 if (cbi->doRealtime) {
8522 std::cerr << "RtAudio pulse: " <<
8523 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8524 "running realtime scheduling" << std::endl;
8525 }
8526 #endif
8527
8528 while ( *isRunning ) {
8529 pthread_testcancel();
8530 context->callbackEvent();
8531 }
8532
8533 pthread_exit( NULL );
8534 }
8535
closeStream(void)8536 void RtApiPulse::closeStream( void )
8537 {
8538 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8539
8540 stream_.callbackInfo.isRunning = false;
8541 if ( pah ) {
8542 MUTEX_LOCK( &stream_.mutex );
8543 if ( stream_.state == STREAM_STOPPED ) {
8544 pah->runnable = true;
8545 pthread_cond_signal( &pah->runnable_cv );
8546 }
8547 MUTEX_UNLOCK( &stream_.mutex );
8548
8549 pthread_join( pah->thread, 0 );
8550 if ( pah->s_play ) {
8551 pa_simple_flush( pah->s_play, NULL );
8552 pa_simple_free( pah->s_play );
8553 }
8554 if ( pah->s_rec )
8555 pa_simple_free( pah->s_rec );
8556
8557 pthread_cond_destroy( &pah->runnable_cv );
8558 delete pah;
8559 stream_.apiHandle = 0;
8560 }
8561
8562 if ( stream_.userBuffer[0] ) {
8563 free( stream_.userBuffer[0] );
8564 stream_.userBuffer[0] = 0;
8565 }
8566 if ( stream_.userBuffer[1] ) {
8567 free( stream_.userBuffer[1] );
8568 stream_.userBuffer[1] = 0;
8569 }
8570
8571 stream_.state = STREAM_CLOSED;
8572 stream_.mode = UNINITIALIZED;
8573 }
8574
callbackEvent(void)8575 void RtApiPulse::callbackEvent( void )
8576 {
8577 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8578
8579 if ( stream_.state == STREAM_STOPPED ) {
8580 MUTEX_LOCK( &stream_.mutex );
8581 while ( !pah->runnable )
8582 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8583
8584 if ( stream_.state != STREAM_RUNNING ) {
8585 MUTEX_UNLOCK( &stream_.mutex );
8586 return;
8587 }
8588 MUTEX_UNLOCK( &stream_.mutex );
8589 }
8590
8591 if ( stream_.state == STREAM_CLOSED ) {
8592 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8593 "this shouldn't happen!";
8594 error( RtAudioError::WARNING );
8595 return;
8596 }
8597
8598 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8599 double streamTime = getStreamTime();
8600 RtAudioStreamStatus status = 0;
8601 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8602 stream_.bufferSize, streamTime, status,
8603 stream_.callbackInfo.userData );
8604
8605 if ( doStopStream == 2 ) {
8606 abortStream();
8607 return;
8608 }
8609
8610 MUTEX_LOCK( &stream_.mutex );
8611 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8612 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8613
8614 if ( stream_.state != STREAM_RUNNING )
8615 goto unlock;
8616
8617 int pa_error;
8618 size_t bytes;
8619 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8620 if ( stream_.doConvertBuffer[OUTPUT] ) {
8621 convertBuffer( stream_.deviceBuffer,
8622 stream_.userBuffer[OUTPUT],
8623 stream_.convertInfo[OUTPUT] );
8624 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8625 formatBytes( stream_.deviceFormat[OUTPUT] );
8626 } else
8627 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8628 formatBytes( stream_.userFormat );
8629
8630 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8631 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8632 pa_strerror( pa_error ) << ".";
8633 errorText_ = errorStream_.str();
8634 error( RtAudioError::WARNING );
8635 }
8636 }
8637
8638 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8639 if ( stream_.doConvertBuffer[INPUT] )
8640 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8641 formatBytes( stream_.deviceFormat[INPUT] );
8642 else
8643 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8644 formatBytes( stream_.userFormat );
8645
8646 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8647 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8648 pa_strerror( pa_error ) << ".";
8649 errorText_ = errorStream_.str();
8650 error( RtAudioError::WARNING );
8651 }
8652 if ( stream_.doConvertBuffer[INPUT] ) {
8653 convertBuffer( stream_.userBuffer[INPUT],
8654 stream_.deviceBuffer,
8655 stream_.convertInfo[INPUT] );
8656 }
8657 }
8658
8659 unlock:
8660 MUTEX_UNLOCK( &stream_.mutex );
8661 RtApi::tickStreamTime();
8662
8663 if ( doStopStream == 1 )
8664 stopStream();
8665 }
8666
startStream(void)8667 void RtApiPulse::startStream( void )
8668 {
8669 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8670
8671 if ( stream_.state == STREAM_CLOSED ) {
8672 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8673 error( RtAudioError::INVALID_USE );
8674 return;
8675 }
8676 if ( stream_.state == STREAM_RUNNING ) {
8677 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8678 error( RtAudioError::WARNING );
8679 return;
8680 }
8681
8682 MUTEX_LOCK( &stream_.mutex );
8683
8684 #if defined( HAVE_GETTIMEOFDAY )
8685 gettimeofday( &stream_.lastTickTimestamp, NULL );
8686 #endif
8687
8688 stream_.state = STREAM_RUNNING;
8689
8690 pah->runnable = true;
8691 pthread_cond_signal( &pah->runnable_cv );
8692 MUTEX_UNLOCK( &stream_.mutex );
8693 }
8694
stopStream(void)8695 void RtApiPulse::stopStream( void )
8696 {
8697 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8698
8699 if ( stream_.state == STREAM_CLOSED ) {
8700 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8701 error( RtAudioError::INVALID_USE );
8702 return;
8703 }
8704 if ( stream_.state == STREAM_STOPPED ) {
8705 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8706 error( RtAudioError::WARNING );
8707 return;
8708 }
8709
8710 stream_.state = STREAM_STOPPED;
8711 MUTEX_LOCK( &stream_.mutex );
8712
8713 if ( pah ) {
8714 pah->runnable = false;
8715 if ( pah->s_play ) {
8716 int pa_error;
8717 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8718 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8719 pa_strerror( pa_error ) << ".";
8720 errorText_ = errorStream_.str();
8721 MUTEX_UNLOCK( &stream_.mutex );
8722 error( RtAudioError::SYSTEM_ERROR );
8723 return;
8724 }
8725 }
8726 }
8727
8728 stream_.state = STREAM_STOPPED;
8729 MUTEX_UNLOCK( &stream_.mutex );
8730 }
8731
abortStream(void)8732 void RtApiPulse::abortStream( void )
8733 {
8734 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8735
8736 if ( stream_.state == STREAM_CLOSED ) {
8737 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8738 error( RtAudioError::INVALID_USE );
8739 return;
8740 }
8741 if ( stream_.state == STREAM_STOPPED ) {
8742 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8743 error( RtAudioError::WARNING );
8744 return;
8745 }
8746
8747 stream_.state = STREAM_STOPPED;
8748 MUTEX_LOCK( &stream_.mutex );
8749
8750 if ( pah ) {
8751 pah->runnable = false;
8752 if ( pah->s_play ) {
8753 int pa_error;
8754 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8755 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8756 pa_strerror( pa_error ) << ".";
8757 errorText_ = errorStream_.str();
8758 MUTEX_UNLOCK( &stream_.mutex );
8759 error( RtAudioError::SYSTEM_ERROR );
8760 return;
8761 }
8762 }
8763 }
8764
8765 stream_.state = STREAM_STOPPED;
8766 MUTEX_UNLOCK( &stream_.mutex );
8767 }
8768
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)8769 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8770 unsigned int channels, unsigned int firstChannel,
8771 unsigned int sampleRate, RtAudioFormat format,
8772 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8773 {
8774 PulseAudioHandle *pah = 0;
8775 unsigned long bufferBytes = 0;
8776 pa_sample_spec ss;
8777
8778 if ( device != 0 ) return false;
8779 if ( mode != INPUT && mode != OUTPUT ) return false;
8780 if ( channels != 1 && channels != 2 ) {
8781 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8782 return false;
8783 }
8784 ss.channels = channels;
8785
8786 if ( firstChannel != 0 ) return false;
8787
8788 bool sr_found = false;
8789 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8790 if ( sampleRate == *sr ) {
8791 sr_found = true;
8792 stream_.sampleRate = sampleRate;
8793 ss.rate = sampleRate;
8794 break;
8795 }
8796 }
8797 if ( !sr_found ) {
8798 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8799 return false;
8800 }
8801
8802 bool sf_found = 0;
8803 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8804 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8805 if ( format == sf->rtaudio_format ) {
8806 sf_found = true;
8807 stream_.userFormat = sf->rtaudio_format;
8808 stream_.deviceFormat[mode] = stream_.userFormat;
8809 ss.format = sf->pa_format;
8810 break;
8811 }
8812 }
8813 if ( !sf_found ) { // Use internal data format conversion.
8814 stream_.userFormat = format;
8815 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8816 ss.format = PA_SAMPLE_FLOAT32LE;
8817 }
8818
8819 // Set other stream parameters.
8820 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8821 else stream_.userInterleaved = true;
8822 stream_.deviceInterleaved[mode] = true;
8823 stream_.nBuffers = 1;
8824 stream_.doByteSwap[mode] = false;
8825 stream_.nUserChannels[mode] = channels;
8826 stream_.nDeviceChannels[mode] = channels + firstChannel;
8827 stream_.channelOffset[mode] = 0;
8828 std::string streamName = "RtAudio";
8829
8830 // Set flags for buffer conversion.
8831 stream_.doConvertBuffer[mode] = false;
8832 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8833 stream_.doConvertBuffer[mode] = true;
8834 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8835 stream_.doConvertBuffer[mode] = true;
8836 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
8837 stream_.doConvertBuffer[mode] = true;
8838
8839 // Allocate necessary internal buffers.
8840 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8841 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8842 if ( stream_.userBuffer[mode] == NULL ) {
8843 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8844 goto error;
8845 }
8846 stream_.bufferSize = *bufferSize;
8847
8848 if ( stream_.doConvertBuffer[mode] ) {
8849
8850 bool makeBuffer = true;
8851 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8852 if ( mode == INPUT ) {
8853 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8854 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8855 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8856 }
8857 }
8858
8859 if ( makeBuffer ) {
8860 bufferBytes *= *bufferSize;
8861 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8862 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8863 if ( stream_.deviceBuffer == NULL ) {
8864 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8865 goto error;
8866 }
8867 }
8868 }
8869
8870 stream_.device[mode] = device;
8871
8872 // Setup the buffer conversion information structure.
8873 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8874
8875 if ( !stream_.apiHandle ) {
8876 PulseAudioHandle *pah = new PulseAudioHandle;
8877 if ( !pah ) {
8878 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8879 goto error;
8880 }
8881
8882 stream_.apiHandle = pah;
8883 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8884 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8885 goto error;
8886 }
8887 }
8888 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8889
8890 int error;
8891 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8892 switch ( mode ) {
8893 case INPUT:
8894 pa_buffer_attr buffer_attr;
8895 buffer_attr.fragsize = bufferBytes;
8896 buffer_attr.maxlength = -1;
8897
8898 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8899 if ( !pah->s_rec ) {
8900 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8901 goto error;
8902 }
8903 break;
8904 case OUTPUT:
8905 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8906 if ( !pah->s_play ) {
8907 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8908 goto error;
8909 }
8910 break;
8911 default:
8912 goto error;
8913 }
8914
8915 if ( stream_.mode == UNINITIALIZED )
8916 stream_.mode = mode;
8917 else if ( stream_.mode == mode )
8918 goto error;
8919 else
8920 stream_.mode = DUPLEX;
8921
8922 if ( !stream_.callbackInfo.isRunning ) {
8923 stream_.callbackInfo.object = this;
8924
8925 stream_.state = STREAM_STOPPED;
8926 // Set the thread attributes for joinable and realtime scheduling
8927 // priority (optional). The higher priority will only take affect
8928 // if the program is run as root or suid. Note, under Linux
8929 // processes with CAP_SYS_NICE privilege, a user can change
8930 // scheduling policy and priority (thus need not be root). See
8931 // POSIX "capabilities".
8932 pthread_attr_t attr;
8933 pthread_attr_init( &attr );
8934 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8935 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8936 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8937 stream_.callbackInfo.doRealtime = true;
8938 struct sched_param param;
8939 int priority = options->priority;
8940 int min = sched_get_priority_min( SCHED_RR );
8941 int max = sched_get_priority_max( SCHED_RR );
8942 if ( priority < min ) priority = min;
8943 else if ( priority > max ) priority = max;
8944 param.sched_priority = priority;
8945
8946 // Set the policy BEFORE the priority. Otherwise it fails.
8947 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8948 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8949 // This is definitely required. Otherwise it fails.
8950 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8951 pthread_attr_setschedparam(&attr, ¶m);
8952 }
8953 else
8954 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8955 #else
8956 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8957 #endif
8958
8959 stream_.callbackInfo.isRunning = true;
8960 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8961 pthread_attr_destroy(&attr);
8962 if(result != 0) {
8963 // Failed. Try instead with default attributes.
8964 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8965 if(result != 0) {
8966 stream_.callbackInfo.isRunning = false;
8967 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8968 goto error;
8969 }
8970 }
8971 }
8972
8973 return SUCCESS;
8974
8975 error:
8976 if ( pah && stream_.callbackInfo.isRunning ) {
8977 pthread_cond_destroy( &pah->runnable_cv );
8978 delete pah;
8979 stream_.apiHandle = 0;
8980 }
8981
8982 for ( int i=0; i<2; i++ ) {
8983 if ( stream_.userBuffer[i] ) {
8984 free( stream_.userBuffer[i] );
8985 stream_.userBuffer[i] = 0;
8986 }
8987 }
8988
8989 if ( stream_.deviceBuffer ) {
8990 free( stream_.deviceBuffer );
8991 stream_.deviceBuffer = 0;
8992 }
8993
8994 stream_.state = STREAM_CLOSED;
8995 return FAILURE;
8996 }
8997
8998 //******************** End of __LINUX_PULSE__ *********************//
8999 #endif
9000
9001 #if defined(__LINUX_OSS__)
9002
9003 #include <unistd.h>
9004 #include <sys/ioctl.h>
9005 #include <unistd.h>
9006 #include <fcntl.h>
9007 #include <sys/soundcard.h>
9008 #include <errno.h>
9009 #include <math.h>
9010
9011 static void *ossCallbackHandler(void * ptr);
9012
9013 // A structure to hold various information related to the OSS API
9014 // implementation.
9015 struct OssHandle {
9016 int id[2]; // device ids
9017 bool xrun[2];
9018 bool triggered;
9019 pthread_cond_t runnable;
9020
OssHandleOssHandle9021 OssHandle()
9022 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9023 };
9024
RtApiOss()9025 RtApiOss :: RtApiOss()
9026 {
9027 // Nothing to do here.
9028 }
9029
~RtApiOss()9030 RtApiOss :: ~RtApiOss()
9031 {
9032 if ( stream_.state != STREAM_CLOSED ) closeStream();
9033 }
9034
getDeviceCount(void)9035 unsigned int RtApiOss :: getDeviceCount( void )
9036 {
9037 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9038 if ( mixerfd == -1 ) {
9039 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9040 error( RtAudioError::WARNING );
9041 return 0;
9042 }
9043
9044 oss_sysinfo sysinfo;
9045 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9046 close( mixerfd );
9047 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9048 error( RtAudioError::WARNING );
9049 return 0;
9050 }
9051
9052 close( mixerfd );
9053 return sysinfo.numaudios;
9054 }
9055
getDeviceInfo(unsigned int device)9056 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9057 {
9058 RtAudio::DeviceInfo info;
9059 info.probed = false;
9060
9061 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9062 if ( mixerfd == -1 ) {
9063 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9064 error( RtAudioError::WARNING );
9065 return info;
9066 }
9067
9068 oss_sysinfo sysinfo;
9069 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9070 if ( result == -1 ) {
9071 close( mixerfd );
9072 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9073 error( RtAudioError::WARNING );
9074 return info;
9075 }
9076
9077 unsigned nDevices = sysinfo.numaudios;
9078 if ( nDevices == 0 ) {
9079 close( mixerfd );
9080 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9081 error( RtAudioError::INVALID_USE );
9082 return info;
9083 }
9084
9085 if ( device >= nDevices ) {
9086 close( mixerfd );
9087 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9088 error( RtAudioError::INVALID_USE );
9089 return info;
9090 }
9091
9092 oss_audioinfo ainfo;
9093 ainfo.dev = device;
9094 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9095 close( mixerfd );
9096 if ( result == -1 ) {
9097 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9098 errorText_ = errorStream_.str();
9099 error( RtAudioError::WARNING );
9100 return info;
9101 }
9102
9103 // Probe channels
9104 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9105 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9106 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9107 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9108 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9109 }
9110
9111 // Probe data formats ... do for input
9112 unsigned long mask = ainfo.iformats;
9113 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9114 info.nativeFormats |= RTAUDIO_SINT16;
9115 if ( mask & AFMT_S8 )
9116 info.nativeFormats |= RTAUDIO_SINT8;
9117 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9118 info.nativeFormats |= RTAUDIO_SINT32;
9119 #ifdef AFMT_FLOAT
9120 if ( mask & AFMT_FLOAT )
9121 info.nativeFormats |= RTAUDIO_FLOAT32;
9122 #endif
9123 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9124 info.nativeFormats |= RTAUDIO_SINT24;
9125
9126 // Check that we have at least one supported format
9127 if ( info.nativeFormats == 0 ) {
9128 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9129 errorText_ = errorStream_.str();
9130 error( RtAudioError::WARNING );
9131 return info;
9132 }
9133
9134 // Probe the supported sample rates.
9135 info.sampleRates.clear();
9136 if ( ainfo.nrates ) {
9137 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9138 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9139 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9140 info.sampleRates.push_back( SAMPLE_RATES[k] );
9141
9142 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9143 info.preferredSampleRate = SAMPLE_RATES[k];
9144
9145 break;
9146 }
9147 }
9148 }
9149 }
9150 else {
9151 // Check min and max rate values;
9152 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9153 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9154 info.sampleRates.push_back( SAMPLE_RATES[k] );
9155
9156 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9157 info.preferredSampleRate = SAMPLE_RATES[k];
9158 }
9159 }
9160 }
9161
9162 if ( info.sampleRates.size() == 0 ) {
9163 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9164 errorText_ = errorStream_.str();
9165 error( RtAudioError::WARNING );
9166 }
9167 else {
9168 info.probed = true;
9169 info.name = ainfo.name;
9170 }
9171
9172 return info;
9173 }
9174
9175
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)9176 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9177 unsigned int firstChannel, unsigned int sampleRate,
9178 RtAudioFormat format, unsigned int *bufferSize,
9179 RtAudio::StreamOptions *options )
9180 {
9181 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9182 if ( mixerfd == -1 ) {
9183 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9184 return FAILURE;
9185 }
9186
9187 oss_sysinfo sysinfo;
9188 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9189 if ( result == -1 ) {
9190 close( mixerfd );
9191 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9192 return FAILURE;
9193 }
9194
9195 unsigned nDevices = sysinfo.numaudios;
9196 if ( nDevices == 0 ) {
9197 // This should not happen because a check is made before this function is called.
9198 close( mixerfd );
9199 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9200 return FAILURE;
9201 }
9202
9203 if ( device >= nDevices ) {
9204 // This should not happen because a check is made before this function is called.
9205 close( mixerfd );
9206 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9207 return FAILURE;
9208 }
9209
9210 oss_audioinfo ainfo;
9211 ainfo.dev = device;
9212 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9213 close( mixerfd );
9214 if ( result == -1 ) {
9215 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9216 errorText_ = errorStream_.str();
9217 return FAILURE;
9218 }
9219
9220 // Check if device supports input or output
9221 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9222 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9223 if ( mode == OUTPUT )
9224 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9225 else
9226 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9227 errorText_ = errorStream_.str();
9228 return FAILURE;
9229 }
9230
9231 int flags = 0;
9232 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9233 if ( mode == OUTPUT )
9234 flags |= O_WRONLY;
9235 else { // mode == INPUT
9236 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9237 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9238 close( handle->id[0] );
9239 handle->id[0] = 0;
9240 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9241 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9242 errorText_ = errorStream_.str();
9243 return FAILURE;
9244 }
9245 // Check that the number previously set channels is the same.
9246 if ( stream_.nUserChannels[0] != channels ) {
9247 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9248 errorText_ = errorStream_.str();
9249 return FAILURE;
9250 }
9251 flags |= O_RDWR;
9252 }
9253 else
9254 flags |= O_RDONLY;
9255 }
9256
9257 // Set exclusive access if specified.
9258 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9259
9260 // Try to open the device.
9261 int fd;
9262 fd = open( ainfo.devnode, flags, 0 );
9263 if ( fd == -1 ) {
9264 if ( errno == EBUSY )
9265 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9266 else
9267 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9268 errorText_ = errorStream_.str();
9269 return FAILURE;
9270 }
9271
9272 // For duplex operation, specifically set this mode (this doesn't seem to work).
9273 /*
9274 if ( flags | O_RDWR ) {
9275 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9276 if ( result == -1) {
9277 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9278 errorText_ = errorStream_.str();
9279 return FAILURE;
9280 }
9281 }
9282 */
9283
9284 // Check the device channel support.
9285 stream_.nUserChannels[mode] = channels;
9286 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9287 close( fd );
9288 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9289 errorText_ = errorStream_.str();
9290 return FAILURE;
9291 }
9292
9293 // Set the number of channels.
9294 int deviceChannels = channels + firstChannel;
9295 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9296 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9297 close( fd );
9298 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9299 errorText_ = errorStream_.str();
9300 return FAILURE;
9301 }
9302 stream_.nDeviceChannels[mode] = deviceChannels;
9303
9304 // Get the data format mask
9305 int mask;
9306 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9307 if ( result == -1 ) {
9308 close( fd );
9309 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9310 errorText_ = errorStream_.str();
9311 return FAILURE;
9312 }
9313
9314 // Determine how to set the device format.
9315 stream_.userFormat = format;
9316 int deviceFormat = -1;
9317 stream_.doByteSwap[mode] = false;
9318 if ( format == RTAUDIO_SINT8 ) {
9319 if ( mask & AFMT_S8 ) {
9320 deviceFormat = AFMT_S8;
9321 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9322 }
9323 }
9324 else if ( format == RTAUDIO_SINT16 ) {
9325 if ( mask & AFMT_S16_NE ) {
9326 deviceFormat = AFMT_S16_NE;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9328 }
9329 else if ( mask & AFMT_S16_OE ) {
9330 deviceFormat = AFMT_S16_OE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9332 stream_.doByteSwap[mode] = true;
9333 }
9334 }
9335 else if ( format == RTAUDIO_SINT24 ) {
9336 if ( mask & AFMT_S24_NE ) {
9337 deviceFormat = AFMT_S24_NE;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9339 }
9340 else if ( mask & AFMT_S24_OE ) {
9341 deviceFormat = AFMT_S24_OE;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9343 stream_.doByteSwap[mode] = true;
9344 }
9345 }
9346 else if ( format == RTAUDIO_SINT32 ) {
9347 if ( mask & AFMT_S32_NE ) {
9348 deviceFormat = AFMT_S32_NE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9350 }
9351 else if ( mask & AFMT_S32_OE ) {
9352 deviceFormat = AFMT_S32_OE;
9353 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9354 stream_.doByteSwap[mode] = true;
9355 }
9356 }
9357
9358 if ( deviceFormat == -1 ) {
9359 // The user requested format is not natively supported by the device.
9360 if ( mask & AFMT_S16_NE ) {
9361 deviceFormat = AFMT_S16_NE;
9362 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9363 }
9364 else if ( mask & AFMT_S32_NE ) {
9365 deviceFormat = AFMT_S32_NE;
9366 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9367 }
9368 else if ( mask & AFMT_S24_NE ) {
9369 deviceFormat = AFMT_S24_NE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9371 }
9372 else if ( mask & AFMT_S16_OE ) {
9373 deviceFormat = AFMT_S16_OE;
9374 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9375 stream_.doByteSwap[mode] = true;
9376 }
9377 else if ( mask & AFMT_S32_OE ) {
9378 deviceFormat = AFMT_S32_OE;
9379 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9380 stream_.doByteSwap[mode] = true;
9381 }
9382 else if ( mask & AFMT_S24_OE ) {
9383 deviceFormat = AFMT_S24_OE;
9384 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9385 stream_.doByteSwap[mode] = true;
9386 }
9387 else if ( mask & AFMT_S8) {
9388 deviceFormat = AFMT_S8;
9389 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9390 }
9391 }
9392
9393 if ( stream_.deviceFormat[mode] == 0 ) {
9394 // This really shouldn't happen ...
9395 close( fd );
9396 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9397 errorText_ = errorStream_.str();
9398 return FAILURE;
9399 }
9400
9401 // Set the data format.
9402 int temp = deviceFormat;
9403 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9404 if ( result == -1 || deviceFormat != temp ) {
9405 close( fd );
9406 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9407 errorText_ = errorStream_.str();
9408 return FAILURE;
9409 }
9410
9411 // Attempt to set the buffer size. According to OSS, the minimum
9412 // number of buffers is two. The supposed minimum buffer size is 16
9413 // bytes, so that will be our lower bound. The argument to this
9414 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9415 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9416 // We'll check the actual value used near the end of the setup
9417 // procedure.
9418 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9419 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9420 int buffers = 0;
9421 if ( options ) buffers = options->numberOfBuffers;
9422 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9423 if ( buffers < 2 ) buffers = 3;
9424 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9425 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9426 if ( result == -1 ) {
9427 close( fd );
9428 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9429 errorText_ = errorStream_.str();
9430 return FAILURE;
9431 }
9432 stream_.nBuffers = buffers;
9433
9434 // Save buffer size (in sample frames).
9435 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9436 stream_.bufferSize = *bufferSize;
9437
9438 // Set the sample rate.
9439 int srate = sampleRate;
9440 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9441 if ( result == -1 ) {
9442 close( fd );
9443 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9444 errorText_ = errorStream_.str();
9445 return FAILURE;
9446 }
9447
9448 // Verify the sample rate setup worked.
9449 if ( abs( srate - (int)sampleRate ) > 100 ) {
9450 close( fd );
9451 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9452 errorText_ = errorStream_.str();
9453 return FAILURE;
9454 }
9455 stream_.sampleRate = sampleRate;
9456
9457 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9458 // We're doing duplex setup here.
9459 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9460 stream_.nDeviceChannels[0] = deviceChannels;
9461 }
9462
9463 // Set interleaving parameters.
9464 stream_.userInterleaved = true;
9465 stream_.deviceInterleaved[mode] = true;
9466 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9467 stream_.userInterleaved = false;
9468
9469 // Set flags for buffer conversion
9470 stream_.doConvertBuffer[mode] = false;
9471 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9472 stream_.doConvertBuffer[mode] = true;
9473 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9474 stream_.doConvertBuffer[mode] = true;
9475 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9476 stream_.nUserChannels[mode] > 1 )
9477 stream_.doConvertBuffer[mode] = true;
9478
9479 // Allocate the stream handles if necessary and then save.
9480 if ( stream_.apiHandle == 0 ) {
9481 try {
9482 handle = new OssHandle;
9483 }
9484 catch ( std::bad_alloc& ) {
9485 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9486 goto error;
9487 }
9488
9489 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9490 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9491 goto error;
9492 }
9493
9494 stream_.apiHandle = (void *) handle;
9495 }
9496 else {
9497 handle = (OssHandle *) stream_.apiHandle;
9498 }
9499 handle->id[mode] = fd;
9500
9501 // Allocate necessary internal buffers.
9502 unsigned long bufferBytes;
9503 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9504 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9505 if ( stream_.userBuffer[mode] == NULL ) {
9506 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9507 goto error;
9508 }
9509
9510 if ( stream_.doConvertBuffer[mode] ) {
9511
9512 bool makeBuffer = true;
9513 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9514 if ( mode == INPUT ) {
9515 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9516 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9517 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9518 }
9519 }
9520
9521 if ( makeBuffer ) {
9522 bufferBytes *= *bufferSize;
9523 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9524 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9525 if ( stream_.deviceBuffer == NULL ) {
9526 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9527 goto error;
9528 }
9529 }
9530 }
9531
9532 stream_.device[mode] = device;
9533 stream_.state = STREAM_STOPPED;
9534
9535 // Setup the buffer conversion information structure.
9536 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9537
9538 // Setup thread if necessary.
9539 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9540 // We had already set up an output stream.
9541 stream_.mode = DUPLEX;
9542 if ( stream_.device[0] == device ) handle->id[0] = fd;
9543 }
9544 else {
9545 stream_.mode = mode;
9546
9547 // Setup callback thread.
9548 stream_.callbackInfo.object = (void *) this;
9549
9550 // Set the thread attributes for joinable and realtime scheduling
9551 // priority. The higher priority will only take affect if the
9552 // program is run as root or suid.
9553 pthread_attr_t attr;
9554 pthread_attr_init( &attr );
9555 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9556 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9557 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9558 stream_.callbackInfo.doRealtime = true;
9559 struct sched_param param;
9560 int priority = options->priority;
9561 int min = sched_get_priority_min( SCHED_RR );
9562 int max = sched_get_priority_max( SCHED_RR );
9563 if ( priority < min ) priority = min;
9564 else if ( priority > max ) priority = max;
9565 param.sched_priority = priority;
9566
9567 // Set the policy BEFORE the priority. Otherwise it fails.
9568 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9569 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9570 // This is definitely required. Otherwise it fails.
9571 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9572 pthread_attr_setschedparam(&attr, ¶m);
9573 }
9574 else
9575 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9576 #else
9577 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9578 #endif
9579
9580 stream_.callbackInfo.isRunning = true;
9581 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9582 pthread_attr_destroy( &attr );
9583 if ( result ) {
9584 // Failed. Try instead with default attributes.
9585 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9586 if ( result ) {
9587 stream_.callbackInfo.isRunning = false;
9588 errorText_ = "RtApiOss::error creating callback thread!";
9589 goto error;
9590 }
9591 }
9592 }
9593
9594 return SUCCESS;
9595
9596 error:
9597 if ( handle ) {
9598 pthread_cond_destroy( &handle->runnable );
9599 if ( handle->id[0] ) close( handle->id[0] );
9600 if ( handle->id[1] ) close( handle->id[1] );
9601 delete handle;
9602 stream_.apiHandle = 0;
9603 }
9604
9605 for ( int i=0; i<2; i++ ) {
9606 if ( stream_.userBuffer[i] ) {
9607 free( stream_.userBuffer[i] );
9608 stream_.userBuffer[i] = 0;
9609 }
9610 }
9611
9612 if ( stream_.deviceBuffer ) {
9613 free( stream_.deviceBuffer );
9614 stream_.deviceBuffer = 0;
9615 }
9616
9617 stream_.state = STREAM_CLOSED;
9618 return FAILURE;
9619 }
9620
closeStream()9621 void RtApiOss :: closeStream()
9622 {
9623 if ( stream_.state == STREAM_CLOSED ) {
9624 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9625 error( RtAudioError::WARNING );
9626 return;
9627 }
9628
9629 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9630 stream_.callbackInfo.isRunning = false;
9631 MUTEX_LOCK( &stream_.mutex );
9632 if ( stream_.state == STREAM_STOPPED )
9633 pthread_cond_signal( &handle->runnable );
9634 MUTEX_UNLOCK( &stream_.mutex );
9635 pthread_join( stream_.callbackInfo.thread, NULL );
9636
9637 if ( stream_.state == STREAM_RUNNING ) {
9638 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9639 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9640 else
9641 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9642 stream_.state = STREAM_STOPPED;
9643 }
9644
9645 if ( handle ) {
9646 pthread_cond_destroy( &handle->runnable );
9647 if ( handle->id[0] ) close( handle->id[0] );
9648 if ( handle->id[1] ) close( handle->id[1] );
9649 delete handle;
9650 stream_.apiHandle = 0;
9651 }
9652
9653 for ( int i=0; i<2; i++ ) {
9654 if ( stream_.userBuffer[i] ) {
9655 free( stream_.userBuffer[i] );
9656 stream_.userBuffer[i] = 0;
9657 }
9658 }
9659
9660 if ( stream_.deviceBuffer ) {
9661 free( stream_.deviceBuffer );
9662 stream_.deviceBuffer = 0;
9663 }
9664
9665 stream_.mode = UNINITIALIZED;
9666 stream_.state = STREAM_CLOSED;
9667 }
9668
startStream()9669 void RtApiOss :: startStream()
9670 {
9671 verifyStream();
9672 if ( stream_.state == STREAM_RUNNING ) {
9673 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9674 error( RtAudioError::WARNING );
9675 return;
9676 }
9677
9678 MUTEX_LOCK( &stream_.mutex );
9679
9680 #if defined( HAVE_GETTIMEOFDAY )
9681 gettimeofday( &stream_.lastTickTimestamp, NULL );
9682 #endif
9683
9684 stream_.state = STREAM_RUNNING;
9685
9686 // No need to do anything else here ... OSS automatically starts
9687 // when fed samples.
9688
9689 MUTEX_UNLOCK( &stream_.mutex );
9690
9691 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9692 pthread_cond_signal( &handle->runnable );
9693 }
9694
stopStream()9695 void RtApiOss :: stopStream()
9696 {
9697 verifyStream();
9698 if ( stream_.state == STREAM_STOPPED ) {
9699 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9700 error( RtAudioError::WARNING );
9701 return;
9702 }
9703
9704 MUTEX_LOCK( &stream_.mutex );
9705
9706 // The state might change while waiting on a mutex.
9707 if ( stream_.state == STREAM_STOPPED ) {
9708 MUTEX_UNLOCK( &stream_.mutex );
9709 return;
9710 }
9711
9712 int result = 0;
9713 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9714 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9715
9716 // Flush the output with zeros a few times.
9717 char *buffer;
9718 int samples;
9719 RtAudioFormat format;
9720
9721 if ( stream_.doConvertBuffer[0] ) {
9722 buffer = stream_.deviceBuffer;
9723 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9724 format = stream_.deviceFormat[0];
9725 }
9726 else {
9727 buffer = stream_.userBuffer[0];
9728 samples = stream_.bufferSize * stream_.nUserChannels[0];
9729 format = stream_.userFormat;
9730 }
9731
9732 memset( buffer, 0, samples * formatBytes(format) );
9733 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9734 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9735 if ( result == -1 ) {
9736 errorText_ = "RtApiOss::stopStream: audio write error.";
9737 error( RtAudioError::WARNING );
9738 }
9739 }
9740
9741 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9742 if ( result == -1 ) {
9743 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9744 errorText_ = errorStream_.str();
9745 goto unlock;
9746 }
9747 handle->triggered = false;
9748 }
9749
9750 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9751 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9752 if ( result == -1 ) {
9753 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9754 errorText_ = errorStream_.str();
9755 goto unlock;
9756 }
9757 }
9758
9759 unlock:
9760 stream_.state = STREAM_STOPPED;
9761 MUTEX_UNLOCK( &stream_.mutex );
9762
9763 if ( result != -1 ) return;
9764 error( RtAudioError::SYSTEM_ERROR );
9765 }
9766
abortStream()9767 void RtApiOss :: abortStream()
9768 {
9769 verifyStream();
9770 if ( stream_.state == STREAM_STOPPED ) {
9771 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9772 error( RtAudioError::WARNING );
9773 return;
9774 }
9775
9776 MUTEX_LOCK( &stream_.mutex );
9777
9778 // The state might change while waiting on a mutex.
9779 if ( stream_.state == STREAM_STOPPED ) {
9780 MUTEX_UNLOCK( &stream_.mutex );
9781 return;
9782 }
9783
9784 int result = 0;
9785 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9786 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9787 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9788 if ( result == -1 ) {
9789 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9790 errorText_ = errorStream_.str();
9791 goto unlock;
9792 }
9793 handle->triggered = false;
9794 }
9795
9796 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9797 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9798 if ( result == -1 ) {
9799 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9800 errorText_ = errorStream_.str();
9801 goto unlock;
9802 }
9803 }
9804
9805 unlock:
9806 stream_.state = STREAM_STOPPED;
9807 MUTEX_UNLOCK( &stream_.mutex );
9808
9809 if ( result != -1 ) return;
9810 error( RtAudioError::SYSTEM_ERROR );
9811 }
9812
callbackEvent()9813 void RtApiOss :: callbackEvent()
9814 {
9815 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9816 if ( stream_.state == STREAM_STOPPED ) {
9817 MUTEX_LOCK( &stream_.mutex );
9818 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9819 if ( stream_.state != STREAM_RUNNING ) {
9820 MUTEX_UNLOCK( &stream_.mutex );
9821 return;
9822 }
9823 MUTEX_UNLOCK( &stream_.mutex );
9824 }
9825
9826 if ( stream_.state == STREAM_CLOSED ) {
9827 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9828 error( RtAudioError::WARNING );
9829 return;
9830 }
9831
9832 // Invoke user callback to get fresh output data.
9833 int doStopStream = 0;
9834 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9835 double streamTime = getStreamTime();
9836 RtAudioStreamStatus status = 0;
9837 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9838 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9839 handle->xrun[0] = false;
9840 }
9841 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9842 status |= RTAUDIO_INPUT_OVERFLOW;
9843 handle->xrun[1] = false;
9844 }
9845 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9846 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9847 if ( doStopStream == 2 ) {
9848 this->abortStream();
9849 return;
9850 }
9851
9852 MUTEX_LOCK( &stream_.mutex );
9853
9854 // The state might change while waiting on a mutex.
9855 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9856
9857 int result;
9858 char *buffer;
9859 int samples;
9860 RtAudioFormat format;
9861
9862 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9863
9864 // Setup parameters and do buffer conversion if necessary.
9865 if ( stream_.doConvertBuffer[0] ) {
9866 buffer = stream_.deviceBuffer;
9867 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9868 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9869 format = stream_.deviceFormat[0];
9870 }
9871 else {
9872 buffer = stream_.userBuffer[0];
9873 samples = stream_.bufferSize * stream_.nUserChannels[0];
9874 format = stream_.userFormat;
9875 }
9876
9877 // Do byte swapping if necessary.
9878 if ( stream_.doByteSwap[0] )
9879 byteSwapBuffer( buffer, samples, format );
9880
9881 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9882 int trig = 0;
9883 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9884 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9885 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9886 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9887 handle->triggered = true;
9888 }
9889 else
9890 // Write samples to device.
9891 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9892
9893 if ( result == -1 ) {
9894 // We'll assume this is an underrun, though there isn't a
9895 // specific means for determining that.
9896 handle->xrun[0] = true;
9897 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9898 error( RtAudioError::WARNING );
9899 // Continue on to input section.
9900 }
9901 }
9902
9903 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9904
9905 // Setup parameters.
9906 if ( stream_.doConvertBuffer[1] ) {
9907 buffer = stream_.deviceBuffer;
9908 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9909 format = stream_.deviceFormat[1];
9910 }
9911 else {
9912 buffer = stream_.userBuffer[1];
9913 samples = stream_.bufferSize * stream_.nUserChannels[1];
9914 format = stream_.userFormat;
9915 }
9916
9917 // Read samples from device.
9918 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9919
9920 if ( result == -1 ) {
9921 // We'll assume this is an overrun, though there isn't a
9922 // specific means for determining that.
9923 handle->xrun[1] = true;
9924 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9925 error( RtAudioError::WARNING );
9926 goto unlock;
9927 }
9928
9929 // Do byte swapping if necessary.
9930 if ( stream_.doByteSwap[1] )
9931 byteSwapBuffer( buffer, samples, format );
9932
9933 // Do buffer conversion if necessary.
9934 if ( stream_.doConvertBuffer[1] )
9935 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9936 }
9937
9938 unlock:
9939 MUTEX_UNLOCK( &stream_.mutex );
9940
9941 RtApi::tickStreamTime();
9942 if ( doStopStream == 1 ) this->stopStream();
9943 }
9944
ossCallbackHandler(void * ptr)9945 static void *ossCallbackHandler( void *ptr )
9946 {
9947 CallbackInfo *info = (CallbackInfo *) ptr;
9948 RtApiOss *object = (RtApiOss *) info->object;
9949 bool *isRunning = &info->isRunning;
9950
9951 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9952 if (info->doRealtime) {
9953 std::cerr << "RtAudio oss: " <<
9954 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9955 "running realtime scheduling" << std::endl;
9956 }
9957 #endif
9958
9959 while ( *isRunning == true ) {
9960 pthread_testcancel();
9961 object->callbackEvent();
9962 }
9963
9964 pthread_exit( NULL );
9965 }
9966
9967 //******************** End of __LINUX_OSS__ *********************//
9968 #endif
9969
9970
9971 // *************************************************** //
9972 //
9973 // Protected common (OS-independent) RtAudio methods.
9974 //
9975 // *************************************************** //
9976
9977 // This method can be modified to control the behavior of error
9978 // message printing.
error(RtAudioError::Type type)9979 void RtApi :: error( RtAudioError::Type type )
9980 {
9981 errorStream_.str(""); // clear the ostringstream
9982
9983 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9984 if ( errorCallback ) {
9985 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9986
9987 if ( firstErrorOccurred_ )
9988 return;
9989
9990 firstErrorOccurred_ = true;
9991 const std::string errorMessage = errorText_;
9992
9993 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9994 stream_.callbackInfo.isRunning = false; // exit from the thread
9995 abortStream();
9996 }
9997
9998 errorCallback( type, errorMessage );
9999 firstErrorOccurred_ = false;
10000 return;
10001 }
10002
10003 if ( type == RtAudioError::WARNING && showWarnings_ == true )
10004 std::cerr << '\n' << errorText_ << "\n\n";
10005 else if ( type != RtAudioError::WARNING )
10006 throw( RtAudioError( errorText_, type ) );
10007 }
10008
verifyStream()10009 void RtApi :: verifyStream()
10010 {
10011 if ( stream_.state == STREAM_CLOSED ) {
10012 errorText_ = "RtApi:: a stream is not open!";
10013 error( RtAudioError::INVALID_USE );
10014 }
10015 }
10016
clearStreamInfo()10017 void RtApi :: clearStreamInfo()
10018 {
10019 stream_.mode = UNINITIALIZED;
10020 stream_.state = STREAM_CLOSED;
10021 stream_.sampleRate = 0;
10022 stream_.bufferSize = 0;
10023 stream_.nBuffers = 0;
10024 stream_.userFormat = 0;
10025 stream_.userInterleaved = true;
10026 stream_.streamTime = 0.0;
10027 stream_.apiHandle = 0;
10028 stream_.deviceBuffer = 0;
10029 stream_.callbackInfo.callback = 0;
10030 stream_.callbackInfo.userData = 0;
10031 stream_.callbackInfo.isRunning = false;
10032 stream_.callbackInfo.errorCallback = 0;
10033 for ( int i=0; i<2; i++ ) {
10034 stream_.device[i] = 11111;
10035 stream_.doConvertBuffer[i] = false;
10036 stream_.deviceInterleaved[i] = true;
10037 stream_.doByteSwap[i] = false;
10038 stream_.nUserChannels[i] = 0;
10039 stream_.nDeviceChannels[i] = 0;
10040 stream_.channelOffset[i] = 0;
10041 stream_.deviceFormat[i] = 0;
10042 stream_.latency[i] = 0;
10043 stream_.userBuffer[i] = 0;
10044 stream_.convertInfo[i].channels = 0;
10045 stream_.convertInfo[i].inJump = 0;
10046 stream_.convertInfo[i].outJump = 0;
10047 stream_.convertInfo[i].inFormat = 0;
10048 stream_.convertInfo[i].outFormat = 0;
10049 stream_.convertInfo[i].inOffset.clear();
10050 stream_.convertInfo[i].outOffset.clear();
10051 }
10052 }
10053
formatBytes(RtAudioFormat format)10054 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10055 {
10056 if ( format == RTAUDIO_SINT16 )
10057 return 2;
10058 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10059 return 4;
10060 else if ( format == RTAUDIO_FLOAT64 )
10061 return 8;
10062 else if ( format == RTAUDIO_SINT24 )
10063 return 3;
10064 else if ( format == RTAUDIO_SINT8 )
10065 return 1;
10066
10067 errorText_ = "RtApi::formatBytes: undefined format.";
10068 error( RtAudioError::WARNING );
10069
10070 return 0;
10071 }
10072
setConvertInfo(StreamMode mode,unsigned int firstChannel)10073 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10074 {
10075 if ( mode == INPUT ) { // convert device to user buffer
10076 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10077 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10078 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10079 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10080 }
10081 else { // convert user to device buffer
10082 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10083 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10084 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10085 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10086 }
10087
10088 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10089 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10090 else
10091 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10092
10093 // Set up the interleave/deinterleave offsets.
10094 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10095 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10096 ( mode == INPUT && stream_.userInterleaved ) ) {
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10098 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10099 stream_.convertInfo[mode].outOffset.push_back( k );
10100 stream_.convertInfo[mode].inJump = 1;
10101 }
10102 }
10103 else {
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10105 stream_.convertInfo[mode].inOffset.push_back( k );
10106 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10107 stream_.convertInfo[mode].outJump = 1;
10108 }
10109 }
10110 }
10111 else { // no (de)interleaving
10112 if ( stream_.userInterleaved ) {
10113 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10114 stream_.convertInfo[mode].inOffset.push_back( k );
10115 stream_.convertInfo[mode].outOffset.push_back( k );
10116 }
10117 }
10118 else {
10119 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10120 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10121 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10122 stream_.convertInfo[mode].inJump = 1;
10123 stream_.convertInfo[mode].outJump = 1;
10124 }
10125 }
10126 }
10127
10128 // Add channel offset.
10129 if ( firstChannel > 0 ) {
10130 if ( stream_.deviceInterleaved[mode] ) {
10131 if ( mode == OUTPUT ) {
10132 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10133 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10134 }
10135 else {
10136 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10137 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10138 }
10139 }
10140 else {
10141 if ( mode == OUTPUT ) {
10142 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10143 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10144 }
10145 else {
10146 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10147 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10148 }
10149 }
10150 }
10151 }
10152
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)10153 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10154 {
10155 // This function does format conversion, input/output channel compensation, and
10156 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10157 // the lower three bytes of a 32-bit integer.
10158
10159 // Clear our device buffer when in/out duplex device channels are different
10160 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10161 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10162 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10163
10164 int j;
10165 if (info.outFormat == RTAUDIO_FLOAT64) {
10166 Float64 scale;
10167 Float64 *out = (Float64 *)outBuffer;
10168
10169 if (info.inFormat == RTAUDIO_SINT8) {
10170 signed char *in = (signed char *)inBuffer;
10171 scale = 1.0 / 127.5;
10172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10173 for (j=0; j<info.channels; j++) {
10174 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10175 out[info.outOffset[j]] += 0.5;
10176 out[info.outOffset[j]] *= scale;
10177 }
10178 in += info.inJump;
10179 out += info.outJump;
10180 }
10181 }
10182 else if (info.inFormat == RTAUDIO_SINT16) {
10183 Int16 *in = (Int16 *)inBuffer;
10184 scale = 1.0 / 32767.5;
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10186 for (j=0; j<info.channels; j++) {
10187 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10188 out[info.outOffset[j]] += 0.5;
10189 out[info.outOffset[j]] *= scale;
10190 }
10191 in += info.inJump;
10192 out += info.outJump;
10193 }
10194 }
10195 else if (info.inFormat == RTAUDIO_SINT24) {
10196 Int24 *in = (Int24 *)inBuffer;
10197 scale = 1.0 / 8388607.5;
10198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10199 for (j=0; j<info.channels; j++) {
10200 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10201 out[info.outOffset[j]] += 0.5;
10202 out[info.outOffset[j]] *= scale;
10203 }
10204 in += info.inJump;
10205 out += info.outJump;
10206 }
10207 }
10208 else if (info.inFormat == RTAUDIO_SINT32) {
10209 Int32 *in = (Int32 *)inBuffer;
10210 scale = 1.0 / 2147483647.5;
10211 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10212 for (j=0; j<info.channels; j++) {
10213 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10214 out[info.outOffset[j]] += 0.5;
10215 out[info.outOffset[j]] *= scale;
10216 }
10217 in += info.inJump;
10218 out += info.outJump;
10219 }
10220 }
10221 else if (info.inFormat == RTAUDIO_FLOAT32) {
10222 Float32 *in = (Float32 *)inBuffer;
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10226 }
10227 in += info.inJump;
10228 out += info.outJump;
10229 }
10230 }
10231 else if (info.inFormat == RTAUDIO_FLOAT64) {
10232 // Channel compensation and/or (de)interleaving only.
10233 Float64 *in = (Float64 *)inBuffer;
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = in[info.inOffset[j]];
10237 }
10238 in += info.inJump;
10239 out += info.outJump;
10240 }
10241 }
10242 }
10243 else if (info.outFormat == RTAUDIO_FLOAT32) {
10244 Float32 scale;
10245 Float32 *out = (Float32 *)outBuffer;
10246
10247 if (info.inFormat == RTAUDIO_SINT8) {
10248 signed char *in = (signed char *)inBuffer;
10249 scale = (Float32) ( 1.0 / 127.5 );
10250 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10251 for (j=0; j<info.channels; j++) {
10252 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10253 out[info.outOffset[j]] += 0.5;
10254 out[info.outOffset[j]] *= scale;
10255 }
10256 in += info.inJump;
10257 out += info.outJump;
10258 }
10259 }
10260 else if (info.inFormat == RTAUDIO_SINT16) {
10261 Int16 *in = (Int16 *)inBuffer;
10262 scale = (Float32) ( 1.0 / 32767.5 );
10263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10264 for (j=0; j<info.channels; j++) {
10265 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10266 out[info.outOffset[j]] += 0.5;
10267 out[info.outOffset[j]] *= scale;
10268 }
10269 in += info.inJump;
10270 out += info.outJump;
10271 }
10272 }
10273 else if (info.inFormat == RTAUDIO_SINT24) {
10274 Int24 *in = (Int24 *)inBuffer;
10275 scale = (Float32) ( 1.0 / 8388607.5 );
10276 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10277 for (j=0; j<info.channels; j++) {
10278 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10279 out[info.outOffset[j]] += 0.5;
10280 out[info.outOffset[j]] *= scale;
10281 }
10282 in += info.inJump;
10283 out += info.outJump;
10284 }
10285 }
10286 else if (info.inFormat == RTAUDIO_SINT32) {
10287 Int32 *in = (Int32 *)inBuffer;
10288 scale = (Float32) ( 1.0 / 2147483647.5 );
10289 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10290 for (j=0; j<info.channels; j++) {
10291 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10292 out[info.outOffset[j]] += 0.5;
10293 out[info.outOffset[j]] *= scale;
10294 }
10295 in += info.inJump;
10296 out += info.outJump;
10297 }
10298 }
10299 else if (info.inFormat == RTAUDIO_FLOAT32) {
10300 // Channel compensation and/or (de)interleaving only.
10301 Float32 *in = (Float32 *)inBuffer;
10302 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10303 for (j=0; j<info.channels; j++) {
10304 out[info.outOffset[j]] = in[info.inOffset[j]];
10305 }
10306 in += info.inJump;
10307 out += info.outJump;
10308 }
10309 }
10310 else if (info.inFormat == RTAUDIO_FLOAT64) {
10311 Float64 *in = (Float64 *)inBuffer;
10312 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10313 for (j=0; j<info.channels; j++) {
10314 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10315 }
10316 in += info.inJump;
10317 out += info.outJump;
10318 }
10319 }
10320 }
10321 else if (info.outFormat == RTAUDIO_SINT32) {
10322 Int32 *out = (Int32 *)outBuffer;
10323 if (info.inFormat == RTAUDIO_SINT8) {
10324 signed char *in = (signed char *)inBuffer;
10325 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10326 for (j=0; j<info.channels; j++) {
10327 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10328 out[info.outOffset[j]] <<= 24;
10329 }
10330 in += info.inJump;
10331 out += info.outJump;
10332 }
10333 }
10334 else if (info.inFormat == RTAUDIO_SINT16) {
10335 Int16 *in = (Int16 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10339 out[info.outOffset[j]] <<= 16;
10340 }
10341 in += info.inJump;
10342 out += info.outJump;
10343 }
10344 }
10345 else if (info.inFormat == RTAUDIO_SINT24) {
10346 Int24 *in = (Int24 *)inBuffer;
10347 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10348 for (j=0; j<info.channels; j++) {
10349 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10350 out[info.outOffset[j]] <<= 8;
10351 }
10352 in += info.inJump;
10353 out += info.outJump;
10354 }
10355 }
10356 else if (info.inFormat == RTAUDIO_SINT32) {
10357 // Channel compensation and/or (de)interleaving only.
10358 Int32 *in = (Int32 *)inBuffer;
10359 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10360 for (j=0; j<info.channels; j++) {
10361 out[info.outOffset[j]] = in[info.inOffset[j]];
10362 }
10363 in += info.inJump;
10364 out += info.outJump;
10365 }
10366 }
10367 else if (info.inFormat == RTAUDIO_FLOAT32) {
10368 Float32 *in = (Float32 *)inBuffer;
10369 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10370 for (j=0; j<info.channels; j++) {
10371 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10372 }
10373 in += info.inJump;
10374 out += info.outJump;
10375 }
10376 }
10377 else if (info.inFormat == RTAUDIO_FLOAT64) {
10378 Float64 *in = (Float64 *)inBuffer;
10379 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10380 for (j=0; j<info.channels; j++) {
10381 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10382 }
10383 in += info.inJump;
10384 out += info.outJump;
10385 }
10386 }
10387 }
10388 else if (info.outFormat == RTAUDIO_SINT24) {
10389 Int24 *out = (Int24 *)outBuffer;
10390 if (info.inFormat == RTAUDIO_SINT8) {
10391 signed char *in = (signed char *)inBuffer;
10392 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10393 for (j=0; j<info.channels; j++) {
10394 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10395 //out[info.outOffset[j]] <<= 16;
10396 }
10397 in += info.inJump;
10398 out += info.outJump;
10399 }
10400 }
10401 else if (info.inFormat == RTAUDIO_SINT16) {
10402 Int16 *in = (Int16 *)inBuffer;
10403 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10404 for (j=0; j<info.channels; j++) {
10405 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10406 //out[info.outOffset[j]] <<= 8;
10407 }
10408 in += info.inJump;
10409 out += info.outJump;
10410 }
10411 }
10412 else if (info.inFormat == RTAUDIO_SINT24) {
10413 // Channel compensation and/or (de)interleaving only.
10414 Int24 *in = (Int24 *)inBuffer;
10415 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10416 for (j=0; j<info.channels; j++) {
10417 out[info.outOffset[j]] = in[info.inOffset[j]];
10418 }
10419 in += info.inJump;
10420 out += info.outJump;
10421 }
10422 }
10423 else if (info.inFormat == RTAUDIO_SINT32) {
10424 Int32 *in = (Int32 *)inBuffer;
10425 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10426 for (j=0; j<info.channels; j++) {
10427 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10428 //out[info.outOffset[j]] >>= 8;
10429 }
10430 in += info.inJump;
10431 out += info.outJump;
10432 }
10433 }
10434 else if (info.inFormat == RTAUDIO_FLOAT32) {
10435 Float32 *in = (Float32 *)inBuffer;
10436 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10437 for (j=0; j<info.channels; j++) {
10438 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10439 }
10440 in += info.inJump;
10441 out += info.outJump;
10442 }
10443 }
10444 else if (info.inFormat == RTAUDIO_FLOAT64) {
10445 Float64 *in = (Float64 *)inBuffer;
10446 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10447 for (j=0; j<info.channels; j++) {
10448 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10449 }
10450 in += info.inJump;
10451 out += info.outJump;
10452 }
10453 }
10454 }
10455 else if (info.outFormat == RTAUDIO_SINT16) {
10456 Int16 *out = (Int16 *)outBuffer;
10457 if (info.inFormat == RTAUDIO_SINT8) {
10458 signed char *in = (signed char *)inBuffer;
10459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10460 for (j=0; j<info.channels; j++) {
10461 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10462 out[info.outOffset[j]] <<= 8;
10463 }
10464 in += info.inJump;
10465 out += info.outJump;
10466 }
10467 }
10468 else if (info.inFormat == RTAUDIO_SINT16) {
10469 // Channel compensation and/or (de)interleaving only.
10470 Int16 *in = (Int16 *)inBuffer;
10471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472 for (j=0; j<info.channels; j++) {
10473 out[info.outOffset[j]] = in[info.inOffset[j]];
10474 }
10475 in += info.inJump;
10476 out += info.outJump;
10477 }
10478 }
10479 else if (info.inFormat == RTAUDIO_SINT24) {
10480 Int24 *in = (Int24 *)inBuffer;
10481 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10482 for (j=0; j<info.channels; j++) {
10483 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10484 }
10485 in += info.inJump;
10486 out += info.outJump;
10487 }
10488 }
10489 else if (info.inFormat == RTAUDIO_SINT32) {
10490 Int32 *in = (Int32 *)inBuffer;
10491 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10492 for (j=0; j<info.channels; j++) {
10493 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10494 }
10495 in += info.inJump;
10496 out += info.outJump;
10497 }
10498 }
10499 else if (info.inFormat == RTAUDIO_FLOAT32) {
10500 Float32 *in = (Float32 *)inBuffer;
10501 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10502 for (j=0; j<info.channels; j++) {
10503 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10504 }
10505 in += info.inJump;
10506 out += info.outJump;
10507 }
10508 }
10509 else if (info.inFormat == RTAUDIO_FLOAT64) {
10510 Float64 *in = (Float64 *)inBuffer;
10511 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10512 for (j=0; j<info.channels; j++) {
10513 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10514 }
10515 in += info.inJump;
10516 out += info.outJump;
10517 }
10518 }
10519 }
10520 else if (info.outFormat == RTAUDIO_SINT8) {
10521 signed char *out = (signed char *)outBuffer;
10522 if (info.inFormat == RTAUDIO_SINT8) {
10523 // Channel compensation and/or (de)interleaving only.
10524 signed char *in = (signed char *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = in[info.inOffset[j]];
10528 }
10529 in += info.inJump;
10530 out += info.outJump;
10531 }
10532 }
10533 if (info.inFormat == RTAUDIO_SINT16) {
10534 Int16 *in = (Int16 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10538 }
10539 in += info.inJump;
10540 out += info.outJump;
10541 }
10542 }
10543 else if (info.inFormat == RTAUDIO_SINT24) {
10544 Int24 *in = (Int24 *)inBuffer;
10545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10546 for (j=0; j<info.channels; j++) {
10547 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10548 }
10549 in += info.inJump;
10550 out += info.outJump;
10551 }
10552 }
10553 else if (info.inFormat == RTAUDIO_SINT32) {
10554 Int32 *in = (Int32 *)inBuffer;
10555 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10556 for (j=0; j<info.channels; j++) {
10557 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10558 }
10559 in += info.inJump;
10560 out += info.outJump;
10561 }
10562 }
10563 else if (info.inFormat == RTAUDIO_FLOAT32) {
10564 Float32 *in = (Float32 *)inBuffer;
10565 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10566 for (j=0; j<info.channels; j++) {
10567 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10568 }
10569 in += info.inJump;
10570 out += info.outJump;
10571 }
10572 }
10573 else if (info.inFormat == RTAUDIO_FLOAT64) {
10574 Float64 *in = (Float64 *)inBuffer;
10575 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10576 for (j=0; j<info.channels; j++) {
10577 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10578 }
10579 in += info.inJump;
10580 out += info.outJump;
10581 }
10582 }
10583 }
10584 }
10585
10586 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10587 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10588 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10589
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)10590 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10591 {
10592 char val;
10593 char *ptr;
10594
10595 ptr = buffer;
10596 if ( format == RTAUDIO_SINT16 ) {
10597 for ( unsigned int i=0; i<samples; i++ ) {
10598 // Swap 1st and 2nd bytes.
10599 val = *(ptr);
10600 *(ptr) = *(ptr+1);
10601 *(ptr+1) = val;
10602
10603 // Increment 2 bytes.
10604 ptr += 2;
10605 }
10606 }
10607 else if ( format == RTAUDIO_SINT32 ||
10608 format == RTAUDIO_FLOAT32 ) {
10609 for ( unsigned int i=0; i<samples; i++ ) {
10610 // Swap 1st and 4th bytes.
10611 val = *(ptr);
10612 *(ptr) = *(ptr+3);
10613 *(ptr+3) = val;
10614
10615 // Swap 2nd and 3rd bytes.
10616 ptr += 1;
10617 val = *(ptr);
10618 *(ptr) = *(ptr+1);
10619 *(ptr+1) = val;
10620
10621 // Increment 3 more bytes.
10622 ptr += 3;
10623 }
10624 }
10625 else if ( format == RTAUDIO_SINT24 ) {
10626 for ( unsigned int i=0; i<samples; i++ ) {
10627 // Swap 1st and 3rd bytes.
10628 val = *(ptr);
10629 *(ptr) = *(ptr+2);
10630 *(ptr+2) = val;
10631
10632 // Increment 2 more bytes.
10633 ptr += 2;
10634 }
10635 }
10636 else if ( format == RTAUDIO_FLOAT64 ) {
10637 for ( unsigned int i=0; i<samples; i++ ) {
10638 // Swap 1st and 8th bytes
10639 val = *(ptr);
10640 *(ptr) = *(ptr+7);
10641 *(ptr+7) = val;
10642
10643 // Swap 2nd and 7th bytes
10644 ptr += 1;
10645 val = *(ptr);
10646 *(ptr) = *(ptr+5);
10647 *(ptr+5) = val;
10648
10649 // Swap 3rd and 6th bytes
10650 ptr += 1;
10651 val = *(ptr);
10652 *(ptr) = *(ptr+3);
10653 *(ptr+3) = val;
10654
10655 // Swap 4th and 5th bytes
10656 ptr += 1;
10657 val = *(ptr);
10658 *(ptr) = *(ptr+1);
10659 *(ptr+1) = val;
10660
10661 // Increment 5 more bytes.
10662 ptr += 5;
10663 }
10664 }
10665 }
10666
10667 // Indentation settings for Vim and Emacs
10668 //
10669 // Local Variables:
10670 // c-basic-offset: 2
10671 // indent-tabs-mode: nil
10672 // End:
10673 //
10674 // vim: et sts=2 sw=2
10675
10676