1 /************************************************************************/
2 /*! \class RtAudio
3     \brief Realtime audio i/o C++ classes.
4 
5     RtAudio provides a common API (Application Programming Interface)
6     for realtime audio input/output across Linux (native ALSA, Jack,
7     and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8     (DirectSound, ASIO and WASAPI) operating systems.
9 
10     RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11 
12     RtAudio: realtime audio i/o C++ classes
13     Copyright (c) 2001-2016 Gary P. Scavone
14 
15     Permission is hereby granted, free of charge, to any person
16     obtaining a copy of this software and associated documentation files
17     (the "Software"), to deal in the Software without restriction,
18     including without limitation the rights to use, copy, modify, merge,
19     publish, distribute, sublicense, and/or sell copies of the Software,
20     and to permit persons to whom the Software is furnished to do so,
21     subject to the following conditions:
22 
23     The above copyright notice and this permission notice shall be
24     included in all copies or substantial portions of the Software.
25 
26     Any person wishing to distribute modifications to the Software is
27     asked to send the modifications to the original developer so that
28     they can be incorporated into the canonical version.  This is,
29     however, not a binding provision of this license.
30 
31     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33     MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34     IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35     ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36     CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37     WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 /************************************************************************/
40 
41 // RtAudio: Version 4.1.2
42 
43 #include "RtAudio.h"
44 #include <iostream>
45 #include <cstdlib>
46 #include <cstring>
47 #include <climits>
48 #include <algorithm>
49 
50 // Static variable definitions.
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
52 const unsigned int RtApi::SAMPLE_RATES[] = {
53 	4000, 5512, 8000, 9600, 11025, 16000, 22050,
54 	32000, 44100, 48000, 88200, 96000, 176400, 192000};
55 
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
61 
62 #include "tchar.h"
63 
convertCharPointerToStdString(const char * text)64 static std::string convertCharPointerToStdString(const char *text)
65 {
66 	return std::string(text);
67 }
68 
convertCharPointerToStdString(const wchar_t * text)69 static std::string convertCharPointerToStdString(const wchar_t *text)
70 {
71 	int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
72 	std::string s(length - 1, '\0');
73 	WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
74 	return s;
75 }
76 
77 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
78 // pthread API
79 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
80 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
81 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
82 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
83 #else
84 #define MUTEX_INITIALIZE(A) abs(*A)  // dummy definitions
85 #define MUTEX_DESTROY(A) abs(*A)     // dummy definitions
86 #endif
87 
88 // *************************************************** //
89 //
90 // RtAudio definitions.
91 //
92 // *************************************************** //
93 
getVersion(void)94 std::string RtAudio ::getVersion(void) throw()
95 {
96 	return RTAUDIO_VERSION;
97 }
98 
getCompiledApi(std::vector<RtAudio::Api> & apis)99 void RtAudio ::getCompiledApi(std::vector<RtAudio::Api> &apis) throw()
100 {
101 	apis.clear();
102 
103 	// The order here will control the order of RtAudio's API search in
104 	// the constructor.
105 #if defined(__UNIX_JACK__)
106 	apis.push_back(UNIX_JACK);
107 #endif
108 #if defined(__LINUX_ALSA__)
109 	apis.push_back(LINUX_ALSA);
110 #endif
111 #if defined(__LINUX_PULSE__)
112 	apis.push_back(LINUX_PULSE);
113 #endif
114 #if defined(__LINUX_OSS__)
115 	apis.push_back(LINUX_OSS);
116 #endif
117 #if defined(__WINDOWS_ASIO__)
118 	apis.push_back(WINDOWS_ASIO);
119 #endif
120 #if defined(__WINDOWS_WASAPI__)
121 	apis.push_back(WINDOWS_WASAPI);
122 #endif
123 #if defined(__WINDOWS_DS__)
124 	apis.push_back(WINDOWS_DS);
125 #endif
126 #if defined(__MACOSX_CORE__)
127 	apis.push_back(MACOSX_CORE);
128 #endif
129 #if defined(__RTAUDIO_DUMMY__)
130 	apis.push_back(RTAUDIO_DUMMY);
131 #endif
132 }
133 
openRtApi(RtAudio::Api api)134 void RtAudio ::openRtApi(RtAudio::Api api)
135 {
136 	if (rtapi_)
137 		delete rtapi_;
138 	rtapi_ = 0;
139 
140 #if defined(__UNIX_JACK__)
141 	if (api == UNIX_JACK)
142 		rtapi_ = new RtApiJack();
143 #endif
144 #if defined(__LINUX_ALSA__)
145 	if (api == LINUX_ALSA)
146 		rtapi_ = new RtApiAlsa();
147 #endif
148 #if defined(__LINUX_PULSE__)
149 	if (api == LINUX_PULSE)
150 		rtapi_ = new RtApiPulse();
151 #endif
152 #if defined(__LINUX_OSS__)
153 	if (api == LINUX_OSS)
154 		rtapi_ = new RtApiOss();
155 #endif
156 #if defined(__WINDOWS_ASIO__)
157 	if (api == WINDOWS_ASIO)
158 		rtapi_ = new RtApiAsio();
159 #endif
160 #if defined(__WINDOWS_WASAPI__)
161 	if (api == WINDOWS_WASAPI)
162 		rtapi_ = new RtApiWasapi();
163 #endif
164 #if defined(__WINDOWS_DS__)
165 	if (api == WINDOWS_DS)
166 		rtapi_ = new RtApiDs();
167 #endif
168 #if defined(__MACOSX_CORE__)
169 	if (api == MACOSX_CORE)
170 		rtapi_ = new RtApiCore();
171 #endif
172 #if defined(__RTAUDIO_DUMMY__)
173 	if (api == RTAUDIO_DUMMY)
174 		rtapi_ = new RtApiDummy();
175 #endif
176 }
177 
RtAudio(RtAudio::Api api)178 RtAudio ::RtAudio(RtAudio::Api api)
179 {
180 	rtapi_ = 0;
181 
182 	if (api != UNSPECIFIED)
183 	{
184 		// Attempt to open the specified API.
185 		openRtApi(api);
186 		if (rtapi_) return;
187 
188 		// No compiled support for specified API value.  Issue a debug
189 		// warning and continue as if no API was specified.
190 		std::cerr << "\nRtAudio: no compiled support for specified API argument!\n"
191 				  << std::endl;
192 	}
193 
194 	// Iterate through the compiled APIs and return as soon as we find
195 	// one with at least one device or we reach the end of the list.
196 	std::vector<RtAudio::Api> apis;
197 	getCompiledApi(apis);
198 	for (unsigned int i = 0; i < apis.size(); i++)
199 	{
200 		openRtApi(apis[i]);
201 		if (rtapi_ && rtapi_->getDeviceCount()) break;
202 	}
203 
204 	if (rtapi_) return;
205 
206 	// It should not be possible to get here because the preprocessor
207 	// definition __RTAUDIO_DUMMY__ is automatically defined if no
208 	// API-specific definitions are passed to the compiler. But just in
209 	// case something weird happens, we'll thow an error.
210 	std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
211 	throw(RtAudioError(errorText, RtAudioError::UNSPECIFIED));
212 }
213 
~RtAudio()214 RtAudio ::~RtAudio() throw()
215 {
216 	if (rtapi_)
217 		delete rtapi_;
218 }
219 
openStream(RtAudio::StreamParameters * outputParameters,RtAudio::StreamParameters * inputParameters,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)220 void RtAudio ::openStream(RtAudio::StreamParameters *outputParameters,
221 						  RtAudio::StreamParameters *inputParameters,
222 						  RtAudioFormat format, unsigned int sampleRate,
223 						  unsigned int *bufferFrames,
224 						  RtAudioCallback callback, void *userData,
225 						  RtAudio::StreamOptions *options,
226 						  RtAudioErrorCallback errorCallback)
227 {
228 	return rtapi_->openStream(outputParameters, inputParameters, format,
229 							  sampleRate, bufferFrames, callback,
230 							  userData, options, errorCallback);
231 }
232 
233 // *************************************************** //
234 //
235 // Public RtApi definitions (see end of file for
236 // private or protected utility functions).
237 //
238 // *************************************************** //
239 
RtApi()240 RtApi ::RtApi()
241 {
242 	stream_.state = STREAM_CLOSED;
243 	stream_.mode = UNINITIALIZED;
244 	stream_.apiHandle = 0;
245 	stream_.userBuffer[0] = 0;
246 	stream_.userBuffer[1] = 0;
247 	MUTEX_INITIALIZE(&stream_.mutex);
248 	showWarnings_ = true;
249 	firstErrorOccurred_ = false;
250 }
251 
~RtApi()252 RtApi ::~RtApi()
253 {
254 	MUTEX_DESTROY(&stream_.mutex);
255 }
256 
openStream(RtAudio::StreamParameters * oParams,RtAudio::StreamParameters * iParams,RtAudioFormat format,unsigned int sampleRate,unsigned int * bufferFrames,RtAudioCallback callback,void * userData,RtAudio::StreamOptions * options,RtAudioErrorCallback errorCallback)257 void RtApi ::openStream(RtAudio::StreamParameters *oParams,
258 						RtAudio::StreamParameters *iParams,
259 						RtAudioFormat format, unsigned int sampleRate,
260 						unsigned int *bufferFrames,
261 						RtAudioCallback callback, void *userData,
262 						RtAudio::StreamOptions *options,
263 						RtAudioErrorCallback errorCallback)
264 {
265 	if (stream_.state != STREAM_CLOSED)
266 	{
267 		errorText_ = "RtApi::openStream: a stream is already open!";
268 		error(RtAudioError::INVALID_USE);
269 		return;
270 	}
271 
272 	// Clear stream information potentially left from a previously open stream.
273 	clearStreamInfo();
274 
275 	if (oParams && oParams->nChannels < 1)
276 	{
277 		errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
278 		error(RtAudioError::INVALID_USE);
279 		return;
280 	}
281 
282 	if (iParams && iParams->nChannels < 1)
283 	{
284 		errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
285 		error(RtAudioError::INVALID_USE);
286 		return;
287 	}
288 
289 	if (oParams == NULL && iParams == NULL)
290 	{
291 		errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
292 		error(RtAudioError::INVALID_USE);
293 		return;
294 	}
295 
296 	if (formatBytes(format) == 0)
297 	{
298 		errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
299 		error(RtAudioError::INVALID_USE);
300 		return;
301 	}
302 
303 	unsigned int nDevices = getDeviceCount();
304 	unsigned int oChannels = 0;
305 	if (oParams)
306 	{
307 		oChannels = oParams->nChannels;
308 		if (oParams->deviceId >= nDevices)
309 		{
310 			errorText_ = "RtApi::openStream: output device parameter value is invalid.";
311 			error(RtAudioError::INVALID_USE);
312 			return;
313 		}
314 	}
315 
316 	unsigned int iChannels = 0;
317 	if (iParams)
318 	{
319 		iChannels = iParams->nChannels;
320 		if (iParams->deviceId >= nDevices)
321 		{
322 			errorText_ = "RtApi::openStream: input device parameter value is invalid.";
323 			error(RtAudioError::INVALID_USE);
324 			return;
325 		}
326 	}
327 
328 	bool result;
329 
330 	if (oChannels > 0)
331 	{
332 		result = probeDeviceOpen(oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
333 								 sampleRate, format, bufferFrames, options);
334 		if (result == false)
335 		{
336 			error(RtAudioError::SYSTEM_ERROR);
337 			return;
338 		}
339 	}
340 
341 	if (iChannels > 0)
342 	{
343 		result = probeDeviceOpen(iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
344 								 sampleRate, format, bufferFrames, options);
345 		if (result == false)
346 		{
347 			if (oChannels > 0) closeStream();
348 			error(RtAudioError::SYSTEM_ERROR);
349 			return;
350 		}
351 	}
352 
353 	stream_.callbackInfo.callback = (void *)callback;
354 	stream_.callbackInfo.userData = userData;
355 	stream_.callbackInfo.errorCallback = (void *)errorCallback;
356 
357 	if (options) options->numberOfBuffers = stream_.nBuffers;
358 	stream_.state = STREAM_STOPPED;
359 }
360 
getDefaultInputDevice(void)361 unsigned int RtApi ::getDefaultInputDevice(void)
362 {
363 	// Should be implemented in subclasses if possible.
364 	return 0;
365 }
366 
getDefaultOutputDevice(void)367 unsigned int RtApi ::getDefaultOutputDevice(void)
368 {
369 	// Should be implemented in subclasses if possible.
370 	return 0;
371 }
372 
closeStream(void)373 void RtApi ::closeStream(void)
374 {
375 	// MUST be implemented in subclasses!
376 	return;
377 }
378 
probeDeviceOpen(unsigned int,StreamMode,unsigned int,unsigned int,unsigned int,RtAudioFormat,unsigned int *,RtAudio::StreamOptions *)379 bool RtApi ::probeDeviceOpen(unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
380 							 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
381 							 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
382 							 RtAudio::StreamOptions * /*options*/)
383 {
384 	// MUST be implemented in subclasses!
385 	return FAILURE;
386 }
387 
tickStreamTime(void)388 void RtApi ::tickStreamTime(void)
389 {
390 	// Subclasses that do not provide their own implementation of
391 	// getStreamTime should call this function once per buffer I/O to
392 	// provide basic stream time support.
393 
394 	stream_.streamTime += (stream_.bufferSize * 1.0 / stream_.sampleRate);
395 
396 #if defined(HAVE_GETTIMEOFDAY)
397 	gettimeofday(&stream_.lastTickTimestamp, NULL);
398 #endif
399 }
400 
getStreamLatency(void)401 long RtApi ::getStreamLatency(void)
402 {
403 	verifyStream();
404 
405 	long totalLatency = 0;
406 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
407 		totalLatency = stream_.latency[0];
408 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
409 		totalLatency += stream_.latency[1];
410 
411 	return totalLatency;
412 }
413 
getStreamTime(void)414 double RtApi ::getStreamTime(void)
415 {
416 	verifyStream();
417 
418 #if defined(HAVE_GETTIMEOFDAY)
419 	// Return a very accurate estimate of the stream time by
420 	// adding in the elapsed time since the last tick.
421 	struct timeval then;
422 	struct timeval now;
423 
424 	if (stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0)
425 		return stream_.streamTime;
426 
427 	gettimeofday(&now, NULL);
428 	then = stream_.lastTickTimestamp;
429 	return stream_.streamTime +
430 		   ((now.tv_sec + 0.000001 * now.tv_usec) -
431 			(then.tv_sec + 0.000001 * then.tv_usec));
432 #else
433 	return stream_.streamTime;
434 #endif
435 }
436 
setStreamTime(double time)437 void RtApi ::setStreamTime(double time)
438 {
439 	verifyStream();
440 
441 	if (time >= 0.0)
442 		stream_.streamTime = time;
443 }
444 
getStreamSampleRate(void)445 unsigned int RtApi ::getStreamSampleRate(void)
446 {
447 	verifyStream();
448 
449 	return stream_.sampleRate;
450 }
451 
452 // *************************************************** //
453 //
454 // OS/API-specific methods.
455 //
456 // *************************************************** //
457 
458 #if defined(__MACOSX_CORE__)
459 
460 // The OS X CoreAudio API is designed to use a separate callback
461 // procedure for each of its audio devices.  A single RtAudio duplex
462 // stream using two different devices is supported here, though it
463 // cannot be guaranteed to always behave correctly because we cannot
464 // synchronize these two callbacks.
465 //
466 // A property listener is installed for over/underrun information.
467 // However, no functionality is currently provided to allow property
468 // listeners to trigger user handlers because it is unclear what could
469 // be done if a critical stream parameter (buffer size, sample rate,
470 // device disconnect) notification arrived.  The listeners entail
471 // quite a bit of extra code and most likely, a user program wouldn't
472 // be prepared for the result anyway.  However, we do provide a flag
473 // to the client callback function to inform of an over/underrun.
474 
475 // A structure to hold various information related to the CoreAudio API
476 // implementation.
477 struct CoreHandle
478 {
479 	AudioDeviceID id[2];  // device ids
480 #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
481 	AudioDeviceIOProcID procId[2];
482 #endif
483 	UInt32 iStream[2];   // device stream index (or first if using multiple)
484 	UInt32 nStreams[2];  // number of streams to use
485 	bool xrun[2];
486 	char *deviceBuffer;
487 	pthread_cond_t condition;
488 	int drainCounter;    // Tracks callback counts when draining
489 	bool internalDrain;  // Indicates if stop is initiated from callback or not.
490 
CoreHandleCoreHandle491 	CoreHandle()
492 		: deviceBuffer(0), drainCounter(0), internalDrain(false)
493 	{
494 		nStreams[0] = 1;
495 		nStreams[1] = 1;
496 		id[0] = 0;
497 		id[1] = 0;
498 		xrun[0] = false;
499 		xrun[1] = false;
500 	}
501 };
502 
RtApiCore()503 RtApiCore::RtApiCore()
504 {
505 #if defined(AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER)
506 	// This is a largely undocumented but absolutely necessary
507 	// requirement starting with OS-X 10.6.  If not called, queries and
508 	// updates to various audio device properties are not handled
509 	// correctly.
510 	CFRunLoopRef theRunLoop = NULL;
511 	AudioObjectPropertyAddress property = {kAudioHardwarePropertyRunLoop,
512 										   kAudioObjectPropertyScopeGlobal,
513 										   kAudioObjectPropertyElementMaster};
514 	OSStatus result = AudioObjectSetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
515 	if (result != noErr)
516 	{
517 		errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
518 		error(RtAudioError::WARNING);
519 	}
520 #endif
521 }
522 
~RtApiCore()523 RtApiCore ::~RtApiCore()
524 {
525 	// The subclass destructor gets called before the base class
526 	// destructor, so close an existing stream before deallocating
527 	// apiDeviceId memory.
528 	if (stream_.state != STREAM_CLOSED) closeStream();
529 }
530 
getDeviceCount(void)531 unsigned int RtApiCore ::getDeviceCount(void)
532 {
533 	// Find out how many audio devices there are, if any.
534 	UInt32 dataSize;
535 	AudioObjectPropertyAddress propertyAddress = {kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
536 	OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize);
537 	if (result != noErr)
538 	{
539 		errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
540 		error(RtAudioError::WARNING);
541 		return 0;
542 	}
543 
544 	return dataSize / sizeof(AudioDeviceID);
545 }
546 
getDefaultInputDevice(void)547 unsigned int RtApiCore ::getDefaultInputDevice(void)
548 {
549 	unsigned int nDevices = getDeviceCount();
550 	if (nDevices <= 1) return 0;
551 
552 	AudioDeviceID id;
553 	UInt32 dataSize = sizeof(AudioDeviceID);
554 	AudioObjectPropertyAddress property = {kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
555 	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id);
556 	if (result != noErr)
557 	{
558 		errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
559 		error(RtAudioError::WARNING);
560 		return 0;
561 	}
562 
563 	dataSize *= nDevices;
564 	AudioDeviceID deviceList[nDevices];
565 	property.mSelector = kAudioHardwarePropertyDevices;
566 	result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *)&deviceList);
567 	if (result != noErr)
568 	{
569 		errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
570 		error(RtAudioError::WARNING);
571 		return 0;
572 	}
573 
574 	for (unsigned int i = 0; i < nDevices; i++)
575 		if (id == deviceList[i]) return i;
576 
577 	errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
578 	error(RtAudioError::WARNING);
579 	return 0;
580 }
581 
getDefaultOutputDevice(void)582 unsigned int RtApiCore ::getDefaultOutputDevice(void)
583 {
584 	unsigned int nDevices = getDeviceCount();
585 	if (nDevices <= 1) return 0;
586 
587 	AudioDeviceID id;
588 	UInt32 dataSize = sizeof(AudioDeviceID);
589 	AudioObjectPropertyAddress property = {kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
590 	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id);
591 	if (result != noErr)
592 	{
593 		errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
594 		error(RtAudioError::WARNING);
595 		return 0;
596 	}
597 
598 	dataSize = sizeof(AudioDeviceID) * nDevices;
599 	AudioDeviceID deviceList[nDevices];
600 	property.mSelector = kAudioHardwarePropertyDevices;
601 	result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *)&deviceList);
602 	if (result != noErr)
603 	{
604 		errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
605 		error(RtAudioError::WARNING);
606 		return 0;
607 	}
608 
609 	for (unsigned int i = 0; i < nDevices; i++)
610 		if (id == deviceList[i]) return i;
611 
612 	errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
613 	error(RtAudioError::WARNING);
614 	return 0;
615 }
616 
getDeviceInfo(unsigned int device)617 RtAudio::DeviceInfo RtApiCore ::getDeviceInfo(unsigned int device)
618 {
619 	RtAudio::DeviceInfo info;
620 	info.probed = false;
621 
622 	// Get device ID
623 	unsigned int nDevices = getDeviceCount();
624 	if (nDevices == 0)
625 	{
626 		errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
627 		error(RtAudioError::INVALID_USE);
628 		return info;
629 	}
630 
631 	if (device >= nDevices)
632 	{
633 		errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
634 		error(RtAudioError::INVALID_USE);
635 		return info;
636 	}
637 
638 	AudioDeviceID deviceList[nDevices];
639 	UInt32 dataSize = sizeof(AudioDeviceID) * nDevices;
640 	AudioObjectPropertyAddress property = {kAudioHardwarePropertyDevices,
641 										   kAudioObjectPropertyScopeGlobal,
642 										   kAudioObjectPropertyElementMaster};
643 	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property,
644 												 0, NULL, &dataSize, (void *)&deviceList);
645 	if (result != noErr)
646 	{
647 		errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
648 		error(RtAudioError::WARNING);
649 		return info;
650 	}
651 
652 	AudioDeviceID id = deviceList[device];
653 
654 	// Get the device name.
655 	info.name.erase();
656 	CFStringRef cfname;
657 	dataSize = sizeof(CFStringRef);
658 	property.mSelector = kAudioObjectPropertyManufacturer;
659 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &cfname);
660 	if (result != noErr)
661 	{
662 		errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode(result) << ") getting device manufacturer.";
663 		errorText_ = errorStream_.str();
664 		error(RtAudioError::WARNING);
665 		return info;
666 	}
667 
668 	//const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
669 	int length = CFStringGetLength(cfname);
670 	char *mname = (char *)malloc(length * 3 + 1);
671 #if defined(UNICODE) || defined(_UNICODE)
672 	CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
673 #else
674 	CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
675 #endif
676 	info.name.append((const char *)mname, strlen(mname));
677 	info.name.append(": ");
678 	CFRelease(cfname);
679 	free(mname);
680 
681 	property.mSelector = kAudioObjectPropertyName;
682 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &cfname);
683 	if (result != noErr)
684 	{
685 		errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode(result) << ") getting device name.";
686 		errorText_ = errorStream_.str();
687 		error(RtAudioError::WARNING);
688 		return info;
689 	}
690 
691 	//const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
692 	length = CFStringGetLength(cfname);
693 	char *name = (char *)malloc(length * 3 + 1);
694 #if defined(UNICODE) || defined(_UNICODE)
695 	CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
696 #else
697 	CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
698 #endif
699 	info.name.append((const char *)name, strlen(name));
700 	CFRelease(cfname);
701 	free(name);
702 
703 	// Get the output stream "configuration".
704 	AudioBufferList *bufferList = nil;
705 	property.mSelector = kAudioDevicePropertyStreamConfiguration;
706 	property.mScope = kAudioDevicePropertyScopeOutput;
707 	//  property.mElement = kAudioObjectPropertyElementWildcard;
708 	dataSize = 0;
709 	result = AudioObjectGetPropertyDataSize(id, &property, 0, NULL, &dataSize);
710 	if (result != noErr || dataSize == 0)
711 	{
712 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting output stream configuration info for device (" << device << ").";
713 		errorText_ = errorStream_.str();
714 		error(RtAudioError::WARNING);
715 		return info;
716 	}
717 
718 	// Allocate the AudioBufferList.
719 	bufferList = (AudioBufferList *)malloc(dataSize);
720 	if (bufferList == NULL)
721 	{
722 		errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
723 		error(RtAudioError::WARNING);
724 		return info;
725 	}
726 
727 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, bufferList);
728 	if (result != noErr || dataSize == 0)
729 	{
730 		free(bufferList);
731 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting output stream configuration for device (" << device << ").";
732 		errorText_ = errorStream_.str();
733 		error(RtAudioError::WARNING);
734 		return info;
735 	}
736 
737 	// Get output channel information.
738 	unsigned int i, nStreams = bufferList->mNumberBuffers;
739 	for (i = 0; i < nStreams; i++)
740 		info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
741 	free(bufferList);
742 
743 	// Get the input stream "configuration".
744 	property.mScope = kAudioDevicePropertyScopeInput;
745 	result = AudioObjectGetPropertyDataSize(id, &property, 0, NULL, &dataSize);
746 	if (result != noErr || dataSize == 0)
747 	{
748 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting input stream configuration info for device (" << device << ").";
749 		errorText_ = errorStream_.str();
750 		error(RtAudioError::WARNING);
751 		return info;
752 	}
753 
754 	// Allocate the AudioBufferList.
755 	bufferList = (AudioBufferList *)malloc(dataSize);
756 	if (bufferList == NULL)
757 	{
758 		errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
759 		error(RtAudioError::WARNING);
760 		return info;
761 	}
762 
763 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, bufferList);
764 	if (result != noErr || dataSize == 0)
765 	{
766 		free(bufferList);
767 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting input stream configuration for device (" << device << ").";
768 		errorText_ = errorStream_.str();
769 		error(RtAudioError::WARNING);
770 		return info;
771 	}
772 
773 	// Get input channel information.
774 	nStreams = bufferList->mNumberBuffers;
775 	for (i = 0; i < nStreams; i++)
776 		info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
777 	free(bufferList);
778 
779 	// If device opens for both playback and capture, we determine the channels.
780 	if (info.outputChannels > 0 && info.inputChannels > 0)
781 		info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
782 
783 	// Probe the device sample rates.
784 	bool isInput = false;
785 	if (info.outputChannels == 0) isInput = true;
786 
787 	// Determine the supported sample rates.
788 	property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
789 	if (isInput == false) property.mScope = kAudioDevicePropertyScopeOutput;
790 	result = AudioObjectGetPropertyDataSize(id, &property, 0, NULL, &dataSize);
791 	if (result != kAudioHardwareNoError || dataSize == 0)
792 	{
793 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting sample rate info.";
794 		errorText_ = errorStream_.str();
795 		error(RtAudioError::WARNING);
796 		return info;
797 	}
798 
799 	UInt32 nRanges = dataSize / sizeof(AudioValueRange);
800 	AudioValueRange rangeList[nRanges];
801 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &rangeList);
802 	if (result != kAudioHardwareNoError)
803 	{
804 		errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode(result) << ") getting sample rates.";
805 		errorText_ = errorStream_.str();
806 		error(RtAudioError::WARNING);
807 		return info;
808 	}
809 
810 	// The sample rate reporting mechanism is a bit of a mystery.  It
811 	// seems that it can either return individual rates or a range of
812 	// rates.  I assume that if the min / max range values are the same,
813 	// then that represents a single supported rate and if the min / max
814 	// range values are different, the device supports an arbitrary
815 	// range of values (though there might be multiple ranges, so we'll
816 	// use the most conservative range).
817 	Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
818 	bool haveValueRange = false;
819 	info.sampleRates.clear();
820 	for (UInt32 i = 0; i < nRanges; i++)
821 	{
822 		if (rangeList[i].mMinimum == rangeList[i].mMaximum)
823 		{
824 			unsigned int tmpSr = (unsigned int)rangeList[i].mMinimum;
825 			info.sampleRates.push_back(tmpSr);
826 
827 			if (!info.preferredSampleRate || (tmpSr <= 48000 && tmpSr > info.preferredSampleRate))
828 				info.preferredSampleRate = tmpSr;
829 		}
830 		else
831 		{
832 			haveValueRange = true;
833 			if (rangeList[i].mMinimum > minimumRate) minimumRate = rangeList[i].mMinimum;
834 			if (rangeList[i].mMaximum < maximumRate) maximumRate = rangeList[i].mMaximum;
835 		}
836 	}
837 
838 	if (haveValueRange)
839 	{
840 		for (unsigned int k = 0; k < MAX_SAMPLE_RATES; k++)
841 		{
842 			if (SAMPLE_RATES[k] >= (unsigned int)minimumRate && SAMPLE_RATES[k] <= (unsigned int)maximumRate)
843 			{
844 				info.sampleRates.push_back(SAMPLE_RATES[k]);
845 
846 				if (!info.preferredSampleRate || (SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate))
847 					info.preferredSampleRate = SAMPLE_RATES[k];
848 			}
849 		}
850 	}
851 
852 	// Sort and remove any redundant values
853 	std::sort(info.sampleRates.begin(), info.sampleRates.end());
854 	info.sampleRates.erase(unique(info.sampleRates.begin(), info.sampleRates.end()), info.sampleRates.end());
855 
856 	if (info.sampleRates.size() == 0)
857 	{
858 		errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
859 		errorText_ = errorStream_.str();
860 		error(RtAudioError::WARNING);
861 		return info;
862 	}
863 
864 	// CoreAudio always uses 32-bit floating point data for PCM streams.
865 	// Thus, any other "physical" formats supported by the device are of
866 	// no interest to the client.
867 	info.nativeFormats = RTAUDIO_FLOAT32;
868 
869 	if (info.outputChannels > 0)
870 		if (getDefaultOutputDevice() == device) info.isDefaultOutput = true;
871 	if (info.inputChannels > 0)
872 		if (getDefaultInputDevice() == device) info.isDefaultInput = true;
873 
874 	info.probed = true;
875 	return info;
876 }
877 
callbackHandler(AudioDeviceID inDevice,const AudioTimeStamp *,const AudioBufferList * inInputData,const AudioTimeStamp *,AudioBufferList * outOutputData,const AudioTimeStamp *,void * infoPointer)878 static OSStatus callbackHandler(AudioDeviceID inDevice,
879 								const AudioTimeStamp * /*inNow*/,
880 								const AudioBufferList *inInputData,
881 								const AudioTimeStamp * /*inInputTime*/,
882 								AudioBufferList *outOutputData,
883 								const AudioTimeStamp * /*inOutputTime*/,
884 								void *infoPointer)
885 {
886 	CallbackInfo *info = (CallbackInfo *)infoPointer;
887 
888 	RtApiCore *object = (RtApiCore *)info->object;
889 	if (object->callbackEvent(inDevice, inInputData, outOutputData) == false)
890 		return kAudioHardwareUnspecifiedError;
891 	else
892 		return kAudioHardwareNoError;
893 }
894 
xrunListener(AudioObjectID,UInt32 nAddresses,const AudioObjectPropertyAddress properties[],void * handlePointer)895 static OSStatus xrunListener(AudioObjectID /*inDevice*/,
896 							 UInt32 nAddresses,
897 							 const AudioObjectPropertyAddress properties[],
898 							 void *handlePointer)
899 {
900 	CoreHandle *handle = (CoreHandle *)handlePointer;
901 	for (UInt32 i = 0; i < nAddresses; i++)
902 	{
903 		if (properties[i].mSelector == kAudioDeviceProcessorOverload)
904 		{
905 			if (properties[i].mScope == kAudioDevicePropertyScopeInput)
906 				handle->xrun[1] = true;
907 			else
908 				handle->xrun[0] = true;
909 		}
910 	}
911 
912 	return kAudioHardwareNoError;
913 }
914 
rateListener(AudioObjectID inDevice,UInt32,const AudioObjectPropertyAddress[],void * ratePointer)915 static OSStatus rateListener(AudioObjectID inDevice,
916 							 UInt32 /*nAddresses*/,
917 							 const AudioObjectPropertyAddress /*properties*/[],
918 							 void *ratePointer)
919 {
920 	Float64 *rate = (Float64 *)ratePointer;
921 	UInt32 dataSize = sizeof(Float64);
922 	AudioObjectPropertyAddress property = {kAudioDevicePropertyNominalSampleRate,
923 										   kAudioObjectPropertyScopeGlobal,
924 										   kAudioObjectPropertyElementMaster};
925 	AudioObjectGetPropertyData(inDevice, &property, 0, NULL, &dataSize, rate);
926 	return kAudioHardwareNoError;
927 }
928 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)929 bool RtApiCore ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
930 								 unsigned int firstChannel, unsigned int sampleRate,
931 								 RtAudioFormat format, unsigned int *bufferSize,
932 								 RtAudio::StreamOptions *options)
933 {
934 	// Get device ID
935 	unsigned int nDevices = getDeviceCount();
936 	if (nDevices == 0)
937 	{
938 		// This should not happen because a check is made before this function is called.
939 		errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
940 		return FAILURE;
941 	}
942 
943 	if (device >= nDevices)
944 	{
945 		// This should not happen because a check is made before this function is called.
946 		errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
947 		return FAILURE;
948 	}
949 
950 	AudioDeviceID deviceList[nDevices];
951 	UInt32 dataSize = sizeof(AudioDeviceID) * nDevices;
952 	AudioObjectPropertyAddress property = {kAudioHardwarePropertyDevices,
953 										   kAudioObjectPropertyScopeGlobal,
954 										   kAudioObjectPropertyElementMaster};
955 	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property,
956 												 0, NULL, &dataSize, (void *)&deviceList);
957 	if (result != noErr)
958 	{
959 		errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
960 		return FAILURE;
961 	}
962 
963 	AudioDeviceID id = deviceList[device];
964 
965 	// Setup for stream mode.
966 	bool isInput = false;
967 	if (mode == INPUT)
968 	{
969 		isInput = true;
970 		property.mScope = kAudioDevicePropertyScopeInput;
971 	}
972 	else
973 		property.mScope = kAudioDevicePropertyScopeOutput;
974 
975 	// Get the stream "configuration".
976 	AudioBufferList *bufferList = nil;
977 	dataSize = 0;
978 	property.mSelector = kAudioDevicePropertyStreamConfiguration;
979 	result = AudioObjectGetPropertyDataSize(id, &property, 0, NULL, &dataSize);
980 	if (result != noErr || dataSize == 0)
981 	{
982 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << device << ").";
983 		errorText_ = errorStream_.str();
984 		return FAILURE;
985 	}
986 
987 	// Allocate the AudioBufferList.
988 	bufferList = (AudioBufferList *)malloc(dataSize);
989 	if (bufferList == NULL)
990 	{
991 		errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
992 		return FAILURE;
993 	}
994 
995 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, bufferList);
996 	if (result != noErr || dataSize == 0)
997 	{
998 		free(bufferList);
999 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting stream configuration for device (" << device << ").";
1000 		errorText_ = errorStream_.str();
1001 		return FAILURE;
1002 	}
1003 
1004 	// Search for one or more streams that contain the desired number of
1005 	// channels. CoreAudio devices can have an arbitrary number of
1006 	// streams and each stream can have an arbitrary number of channels.
1007 	// For each stream, a single buffer of interleaved samples is
1008 	// provided.  RtAudio prefers the use of one stream of interleaved
1009 	// data or multiple consecutive single-channel streams.  However, we
1010 	// now support multiple consecutive multi-channel streams of
1011 	// interleaved data as well.
1012 	UInt32 iStream, offsetCounter = firstChannel;
1013 	UInt32 nStreams = bufferList->mNumberBuffers;
1014 	bool monoMode = false;
1015 	bool foundStream = false;
1016 
1017 	// First check that the device supports the requested number of
1018 	// channels.
1019 	UInt32 deviceChannels = 0;
1020 	for (iStream = 0; iStream < nStreams; iStream++)
1021 		deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1022 
1023 	if (deviceChannels < (channels + firstChannel))
1024 	{
1025 		free(bufferList);
1026 		errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1027 		errorText_ = errorStream_.str();
1028 		return FAILURE;
1029 	}
1030 
1031 	// Look for a single stream meeting our needs.
1032 	UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1033 	for (iStream = 0; iStream < nStreams; iStream++)
1034 	{
1035 		streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1036 		if (streamChannels >= channels + offsetCounter)
1037 		{
1038 			firstStream = iStream;
1039 			channelOffset = offsetCounter;
1040 			foundStream = true;
1041 			break;
1042 		}
1043 		if (streamChannels > offsetCounter) break;
1044 		offsetCounter -= streamChannels;
1045 	}
1046 
1047 	// If we didn't find a single stream above, then we should be able
1048 	// to meet the channel specification with multiple streams.
1049 	if (foundStream == false)
1050 	{
1051 		monoMode = true;
1052 		offsetCounter = firstChannel;
1053 		for (iStream = 0; iStream < nStreams; iStream++)
1054 		{
1055 			streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 			if (streamChannels > offsetCounter) break;
1057 			offsetCounter -= streamChannels;
1058 		}
1059 
1060 		firstStream = iStream;
1061 		channelOffset = offsetCounter;
1062 		Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 
1064 		if (streamChannels > 1) monoMode = false;
1065 		while (channelCounter > 0)
1066 		{
1067 			streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1068 			if (streamChannels > 1) monoMode = false;
1069 			channelCounter -= streamChannels;
1070 			streamCount++;
1071 		}
1072 	}
1073 
1074 	free(bufferList);
1075 
1076 	// Determine the buffer size.
1077 	AudioValueRange bufferRange;
1078 	dataSize = sizeof(AudioValueRange);
1079 	property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1080 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &bufferRange);
1081 
1082 	if (result != noErr)
1083 	{
1084 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting buffer size range for device (" << device << ").";
1085 		errorText_ = errorStream_.str();
1086 		return FAILURE;
1087 	}
1088 
1089 	if (bufferRange.mMinimum > *bufferSize)
1090 		*bufferSize = (unsigned long)bufferRange.mMinimum;
1091 	else if (bufferRange.mMaximum < *bufferSize)
1092 		*bufferSize = (unsigned long)bufferRange.mMaximum;
1093 	if (options && options->flags & RTAUDIO_MINIMIZE_LATENCY) *bufferSize = (unsigned long)bufferRange.mMinimum;
1094 
1095 	// Set the buffer size.  For multiple streams, I'm assuming we only
1096 	// need to make this setting for the master channel.
1097 	UInt32 theSize = (UInt32)*bufferSize;
1098 	dataSize = sizeof(UInt32);
1099 	property.mSelector = kAudioDevicePropertyBufferFrameSize;
1100 	result = AudioObjectSetPropertyData(id, &property, 0, NULL, dataSize, &theSize);
1101 
1102 	if (result != noErr)
1103 	{
1104 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting the buffer size for device (" << device << ").";
1105 		errorText_ = errorStream_.str();
1106 		return FAILURE;
1107 	}
1108 
1109 	// If attempting to setup a duplex stream, the bufferSize parameter
1110 	// MUST be the same in both directions!
1111 	*bufferSize = theSize;
1112 	if (stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize)
1113 	{
1114 		errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1115 		errorText_ = errorStream_.str();
1116 		return FAILURE;
1117 	}
1118 
1119 	stream_.bufferSize = *bufferSize;
1120 	stream_.nBuffers = 1;
1121 
1122 	// Try to set "hog" mode ... it's not clear to me this is working.
1123 	if (options && options->flags & RTAUDIO_HOG_DEVICE)
1124 	{
1125 		pid_t hog_pid;
1126 		dataSize = sizeof(hog_pid);
1127 		property.mSelector = kAudioDevicePropertyHogMode;
1128 		result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &hog_pid);
1129 		if (result != noErr)
1130 		{
1131 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting 'hog' state!";
1132 			errorText_ = errorStream_.str();
1133 			return FAILURE;
1134 		}
1135 
1136 		if (hog_pid != getpid())
1137 		{
1138 			hog_pid = getpid();
1139 			result = AudioObjectSetPropertyData(id, &property, 0, NULL, dataSize, &hog_pid);
1140 			if (result != noErr)
1141 			{
1142 				errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting 'hog' state!";
1143 				errorText_ = errorStream_.str();
1144 				return FAILURE;
1145 			}
1146 		}
1147 	}
1148 
1149 	// Check and if necessary, change the sample rate for the device.
1150 	Float64 nominalRate;
1151 	dataSize = sizeof(Float64);
1152 	property.mSelector = kAudioDevicePropertyNominalSampleRate;
1153 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &nominalRate);
1154 	if (result != noErr)
1155 	{
1156 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting current sample rate.";
1157 		errorText_ = errorStream_.str();
1158 		return FAILURE;
1159 	}
1160 
1161 	// Only change the sample rate if off by more than 1 Hz.
1162 	if (fabs(nominalRate - (double)sampleRate) > 1.0)
1163 	{
1164 		// Set a property listener for the sample rate change
1165 		Float64 reportedRate = 0.0;
1166 		AudioObjectPropertyAddress tmp = {kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
1167 		result = AudioObjectAddPropertyListener(id, &tmp, rateListener, (void *)&reportedRate);
1168 		if (result != noErr)
1169 		{
1170 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting sample rate property listener for device (" << device << ").";
1171 			errorText_ = errorStream_.str();
1172 			return FAILURE;
1173 		}
1174 
1175 		nominalRate = (Float64)sampleRate;
1176 		result = AudioObjectSetPropertyData(id, &property, 0, NULL, dataSize, &nominalRate);
1177 		if (result != noErr)
1178 		{
1179 			AudioObjectRemovePropertyListener(id, &tmp, rateListener, (void *)&reportedRate);
1180 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting sample rate for device (" << device << ").";
1181 			errorText_ = errorStream_.str();
1182 			return FAILURE;
1183 		}
1184 
1185 		// Now wait until the reported nominal rate is what we just set.
1186 		UInt32 microCounter = 0;
1187 		while (reportedRate != nominalRate)
1188 		{
1189 			microCounter += 5000;
1190 			if (microCounter > 5000000) break;
1191 			usleep(5000);
1192 		}
1193 
1194 		// Remove the property listener.
1195 		AudioObjectRemovePropertyListener(id, &tmp, rateListener, (void *)&reportedRate);
1196 
1197 		if (microCounter > 5000000)
1198 		{
1199 			errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1200 			errorText_ = errorStream_.str();
1201 			return FAILURE;
1202 		}
1203 	}
1204 
1205 	// Now set the stream format for all streams.  Also, check the
1206 	// physical format of the device and change that if necessary.
1207 	AudioStreamBasicDescription description;
1208 	dataSize = sizeof(AudioStreamBasicDescription);
1209 	property.mSelector = kAudioStreamPropertyVirtualFormat;
1210 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &description);
1211 	if (result != noErr)
1212 	{
1213 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting stream format for device (" << device << ").";
1214 		errorText_ = errorStream_.str();
1215 		return FAILURE;
1216 	}
1217 
1218 	// Set the sample rate and data format id.  However, only make the
1219 	// change if the sample rate is not within 1.0 of the desired
1220 	// rate and the format is not linear pcm.
1221 	bool updateFormat = false;
1222 	if (fabs(description.mSampleRate - (Float64)sampleRate) > 1.0)
1223 	{
1224 		description.mSampleRate = (Float64)sampleRate;
1225 		updateFormat = true;
1226 	}
1227 
1228 	if (description.mFormatID != kAudioFormatLinearPCM)
1229 	{
1230 		description.mFormatID = kAudioFormatLinearPCM;
1231 		updateFormat = true;
1232 	}
1233 
1234 	if (updateFormat)
1235 	{
1236 		result = AudioObjectSetPropertyData(id, &property, 0, NULL, dataSize, &description);
1237 		if (result != noErr)
1238 		{
1239 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << device << ").";
1240 			errorText_ = errorStream_.str();
1241 			return FAILURE;
1242 		}
1243 	}
1244 
1245 	// Now check the physical format.
1246 	property.mSelector = kAudioStreamPropertyPhysicalFormat;
1247 	result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &description);
1248 	if (result != noErr)
1249 	{
1250 		errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting stream physical format for device (" << device << ").";
1251 		errorText_ = errorStream_.str();
1252 		return FAILURE;
1253 	}
1254 
1255 	//std::cout << "Current physical stream format:" << std::endl;
1256 	//std::cout << "   mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1257 	//std::cout << "   aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1258 	//std::cout << "   bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1259 	//std::cout << "   sample rate = " << description.mSampleRate << std::endl;
1260 
1261 	if (description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16)
1262 	{
1263 		description.mFormatID = kAudioFormatLinearPCM;
1264 		//description.mSampleRate = (Float64) sampleRate;
1265 		AudioStreamBasicDescription testDescription = description;
1266 		UInt32 formatFlags;
1267 
1268 		// We'll try higher bit rates first and then work our way down.
1269 		std::vector<std::pair<UInt32, UInt32> > physicalFormats;
1270 		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1271 		physicalFormats.push_back(std::pair<Float32, UInt32>(32, formatFlags));
1272 		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1273 		physicalFormats.push_back(std::pair<Float32, UInt32>(32, formatFlags));
1274 		physicalFormats.push_back(std::pair<Float32, UInt32>(24, formatFlags));  // 24-bit packed
1275 		formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh);
1276 		physicalFormats.push_back(std::pair<Float32, UInt32>(24.2, formatFlags));  // 24-bit in 4 bytes, aligned low
1277 		formatFlags |= kAudioFormatFlagIsAlignedHigh;
1278 		physicalFormats.push_back(std::pair<Float32, UInt32>(24.4, formatFlags));  // 24-bit in 4 bytes, aligned high
1279 		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1280 		physicalFormats.push_back(std::pair<Float32, UInt32>(16, formatFlags));
1281 		physicalFormats.push_back(std::pair<Float32, UInt32>(8, formatFlags));
1282 
1283 		bool setPhysicalFormat = false;
1284 		for (unsigned int i = 0; i < physicalFormats.size(); i++)
1285 		{
1286 			testDescription = description;
1287 			testDescription.mBitsPerChannel = (UInt32)physicalFormats[i].first;
1288 			testDescription.mFormatFlags = physicalFormats[i].second;
1289 			if ((24 == (UInt32)physicalFormats[i].first) && ~(physicalFormats[i].second & kAudioFormatFlagIsPacked))
1290 				testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1291 			else
1292 				testDescription.mBytesPerFrame = testDescription.mBitsPerChannel / 8 * testDescription.mChannelsPerFrame;
1293 			testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1294 			result = AudioObjectSetPropertyData(id, &property, 0, NULL, dataSize, &testDescription);
1295 			if (result == noErr)
1296 			{
1297 				setPhysicalFormat = true;
1298 				//std::cout << "Updated physical stream format:" << std::endl;
1299 				//std::cout << "   mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1300 				//std::cout << "   aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1301 				//std::cout << "   bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1302 				//std::cout << "   sample rate = " << testDescription.mSampleRate << std::endl;
1303 				break;
1304 			}
1305 		}
1306 
1307 		if (!setPhysicalFormat)
1308 		{
1309 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") setting physical data format for device (" << device << ").";
1310 			errorText_ = errorStream_.str();
1311 			return FAILURE;
1312 		}
1313 	}  // done setting virtual/physical formats.
1314 
1315 	// Get the stream / device latency.
1316 	UInt32 latency;
1317 	dataSize = sizeof(UInt32);
1318 	property.mSelector = kAudioDevicePropertyLatency;
1319 	if (AudioObjectHasProperty(id, &property) == true)
1320 	{
1321 		result = AudioObjectGetPropertyData(id, &property, 0, NULL, &dataSize, &latency);
1322 		if (result == kAudioHardwareNoError)
1323 			stream_.latency[mode] = latency;
1324 		else
1325 		{
1326 			errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode(result) << ") getting device latency for device (" << device << ").";
1327 			errorText_ = errorStream_.str();
1328 			error(RtAudioError::WARNING);
1329 		}
1330 	}
1331 
1332 	// Byte-swapping: According to AudioHardware.h, the stream data will
1333 	// always be presented in native-endian format, so we should never
1334 	// need to byte swap.
1335 	stream_.doByteSwap[mode] = false;
1336 
1337 	// From the CoreAudio documentation, PCM data must be supplied as
1338 	// 32-bit floats.
1339 	stream_.userFormat = format;
1340 	stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1341 
1342 	if (streamCount == 1)
1343 		stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1344 	else  // multiple streams
1345 		stream_.nDeviceChannels[mode] = channels;
1346 	stream_.nUserChannels[mode] = channels;
1347 	stream_.channelOffset[mode] = channelOffset;  // offset within a CoreAudio stream
1348 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
1349 		stream_.userInterleaved = false;
1350 	else
1351 		stream_.userInterleaved = true;
1352 	stream_.deviceInterleaved[mode] = true;
1353 	if (monoMode == true) stream_.deviceInterleaved[mode] = false;
1354 
1355 	// Set flags for buffer conversion.
1356 	stream_.doConvertBuffer[mode] = false;
1357 	if (stream_.userFormat != stream_.deviceFormat[mode])
1358 		stream_.doConvertBuffer[mode] = true;
1359 	if (stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode])
1360 		stream_.doConvertBuffer[mode] = true;
1361 	if (streamCount == 1)
1362 	{
1363 		if (stream_.nUserChannels[mode] > 1 &&
1364 			stream_.userInterleaved != stream_.deviceInterleaved[mode])
1365 			stream_.doConvertBuffer[mode] = true;
1366 	}
1367 	else if (monoMode && stream_.userInterleaved)
1368 		stream_.doConvertBuffer[mode] = true;
1369 
1370 	// Allocate our CoreHandle structure for the stream.
1371 	CoreHandle *handle = 0;
1372 	if (stream_.apiHandle == 0)
1373 	{
1374 		try
1375 		{
1376 			handle = new CoreHandle;
1377 		}
1378 		catch (std::bad_alloc &)
1379 		{
1380 			errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1381 			goto error;
1382 		}
1383 
1384 		if (pthread_cond_init(&handle->condition, NULL))
1385 		{
1386 			errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1387 			goto error;
1388 		}
1389 		stream_.apiHandle = (void *)handle;
1390 	}
1391 	else
1392 		handle = (CoreHandle *)stream_.apiHandle;
1393 	handle->iStream[mode] = firstStream;
1394 	handle->nStreams[mode] = streamCount;
1395 	handle->id[mode] = id;
1396 
1397 	// Allocate necessary internal buffers.
1398 	unsigned long bufferBytes;
1399 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
1400 	//  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1401 	stream_.userBuffer[mode] = (char *)malloc(bufferBytes * sizeof(char));
1402 	memset(stream_.userBuffer[mode], 0, bufferBytes * sizeof(char));
1403 	if (stream_.userBuffer[mode] == NULL)
1404 	{
1405 		errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1406 		goto error;
1407 	}
1408 
1409 	// If possible, we will make use of the CoreAudio stream buffers as
1410 	// "device buffers".  However, we can't do this if using multiple
1411 	// streams.
1412 	if (stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1)
1413 	{
1414 		bool makeBuffer = true;
1415 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
1416 		if (mode == INPUT)
1417 		{
1418 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
1419 			{
1420 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
1421 				if (bufferBytes <= bytesOut) makeBuffer = false;
1422 			}
1423 		}
1424 
1425 		if (makeBuffer)
1426 		{
1427 			bufferBytes *= *bufferSize;
1428 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
1429 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
1430 			if (stream_.deviceBuffer == NULL)
1431 			{
1432 				errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1433 				goto error;
1434 			}
1435 		}
1436 	}
1437 
1438 	stream_.sampleRate = sampleRate;
1439 	stream_.device[mode] = device;
1440 	stream_.state = STREAM_STOPPED;
1441 	stream_.callbackInfo.object = (void *)this;
1442 
1443 	// Setup the buffer conversion information structure.
1444 	if (stream_.doConvertBuffer[mode])
1445 	{
1446 		if (streamCount > 1)
1447 			setConvertInfo(mode, 0);
1448 		else
1449 			setConvertInfo(mode, channelOffset);
1450 	}
1451 
1452 	if (mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device)
1453 		// Only one callback procedure per device.
1454 		stream_.mode = DUPLEX;
1455 	else
1456 	{
1457 #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
1458 		result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *)&stream_.callbackInfo, &handle->procId[mode]);
1459 #else
1460 		// deprecated in favor of AudioDeviceCreateIOProcID()
1461 		result = AudioDeviceAddIOProc(id, callbackHandler, (void *)&stream_.callbackInfo);
1462 #endif
1463 		if (result != noErr)
1464 		{
1465 			errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1466 			errorText_ = errorStream_.str();
1467 			goto error;
1468 		}
1469 		if (stream_.mode == OUTPUT && mode == INPUT)
1470 			stream_.mode = DUPLEX;
1471 		else
1472 			stream_.mode = mode;
1473 	}
1474 
1475 	// Setup the device property listener for over/underload.
1476 	property.mSelector = kAudioDeviceProcessorOverload;
1477 	property.mScope = kAudioObjectPropertyScopeGlobal;
1478 	result = AudioObjectAddPropertyListener(id, &property, xrunListener, (void *)handle);
1479 
1480 	return SUCCESS;
1481 
1482 error:
1483 	if (handle)
1484 	{
1485 		pthread_cond_destroy(&handle->condition);
1486 		delete handle;
1487 		stream_.apiHandle = 0;
1488 	}
1489 
1490 	for (int i = 0; i < 2; i++)
1491 	{
1492 		if (stream_.userBuffer[i])
1493 		{
1494 			free(stream_.userBuffer[i]);
1495 			stream_.userBuffer[i] = 0;
1496 		}
1497 	}
1498 
1499 	if (stream_.deviceBuffer)
1500 	{
1501 		free(stream_.deviceBuffer);
1502 		stream_.deviceBuffer = 0;
1503 	}
1504 
1505 	stream_.state = STREAM_CLOSED;
1506 	return FAILURE;
1507 }
1508 
closeStream(void)1509 void RtApiCore ::closeStream(void)
1510 {
1511 	if (stream_.state == STREAM_CLOSED)
1512 	{
1513 		errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1514 		error(RtAudioError::WARNING);
1515 		return;
1516 	}
1517 
1518 	CoreHandle *handle = (CoreHandle *)stream_.apiHandle;
1519 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
1520 	{
1521 		if (handle)
1522 		{
1523 			AudioObjectPropertyAddress property = {kAudioHardwarePropertyDevices,
1524 												   kAudioObjectPropertyScopeGlobal,
1525 												   kAudioObjectPropertyElementMaster};
1526 
1527 			property.mSelector = kAudioDeviceProcessorOverload;
1528 			property.mScope = kAudioObjectPropertyScopeGlobal;
1529 			if (AudioObjectRemovePropertyListener(handle->id[0], &property, xrunListener, (void *)handle) != noErr)
1530 			{
1531 				errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1532 				error(RtAudioError::WARNING);
1533 			}
1534 		}
1535 		if (stream_.state == STREAM_RUNNING)
1536 			AudioDeviceStop(handle->id[0], callbackHandler);
1537 #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
1538 		AudioDeviceDestroyIOProcID(handle->id[0], handle->procId[0]);
1539 #else
1540 		// deprecated in favor of AudioDeviceDestroyIOProcID()
1541 		AudioDeviceRemoveIOProc(handle->id[0], callbackHandler);
1542 #endif
1543 	}
1544 
1545 	if (stream_.mode == INPUT || (stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1]))
1546 	{
1547 		if (handle)
1548 		{
1549 			AudioObjectPropertyAddress property = {kAudioHardwarePropertyDevices,
1550 												   kAudioObjectPropertyScopeGlobal,
1551 												   kAudioObjectPropertyElementMaster};
1552 
1553 			property.mSelector = kAudioDeviceProcessorOverload;
1554 			property.mScope = kAudioObjectPropertyScopeGlobal;
1555 			if (AudioObjectRemovePropertyListener(handle->id[1], &property, xrunListener, (void *)handle) != noErr)
1556 			{
1557 				errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1558 				error(RtAudioError::WARNING);
1559 			}
1560 		}
1561 		if (stream_.state == STREAM_RUNNING)
1562 			AudioDeviceStop(handle->id[1], callbackHandler);
1563 #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
1564 		AudioDeviceDestroyIOProcID(handle->id[1], handle->procId[1]);
1565 #else
1566 		// deprecated in favor of AudioDeviceDestroyIOProcID()
1567 		AudioDeviceRemoveIOProc(handle->id[1], callbackHandler);
1568 #endif
1569 	}
1570 
1571 	for (int i = 0; i < 2; i++)
1572 	{
1573 		if (stream_.userBuffer[i])
1574 		{
1575 			free(stream_.userBuffer[i]);
1576 			stream_.userBuffer[i] = 0;
1577 		}
1578 	}
1579 
1580 	if (stream_.deviceBuffer)
1581 	{
1582 		free(stream_.deviceBuffer);
1583 		stream_.deviceBuffer = 0;
1584 	}
1585 
1586 	// Destroy pthread condition variable.
1587 	pthread_cond_destroy(&handle->condition);
1588 	delete handle;
1589 	stream_.apiHandle = 0;
1590 
1591 	stream_.mode = UNINITIALIZED;
1592 	stream_.state = STREAM_CLOSED;
1593 }
1594 
startStream(void)1595 void RtApiCore ::startStream(void)
1596 {
1597 	verifyStream();
1598 	if (stream_.state == STREAM_RUNNING)
1599 	{
1600 		errorText_ = "RtApiCore::startStream(): the stream is already running!";
1601 		error(RtAudioError::WARNING);
1602 		return;
1603 	}
1604 
1605 	OSStatus result = noErr;
1606 	CoreHandle *handle = (CoreHandle *)stream_.apiHandle;
1607 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
1608 	{
1609 		result = AudioDeviceStart(handle->id[0], callbackHandler);
1610 		if (result != noErr)
1611 		{
1612 			errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode(result) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1613 			errorText_ = errorStream_.str();
1614 			goto unlock;
1615 		}
1616 	}
1617 
1618 	if (stream_.mode == INPUT ||
1619 		(stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1]))
1620 	{
1621 		result = AudioDeviceStart(handle->id[1], callbackHandler);
1622 		if (result != noErr)
1623 		{
1624 			errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1625 			errorText_ = errorStream_.str();
1626 			goto unlock;
1627 		}
1628 	}
1629 
1630 	handle->drainCounter = 0;
1631 	handle->internalDrain = false;
1632 	stream_.state = STREAM_RUNNING;
1633 
1634 unlock:
1635 	if (result == noErr) return;
1636 	error(RtAudioError::SYSTEM_ERROR);
1637 }
1638 
stopStream(void)1639 void RtApiCore ::stopStream(void)
1640 {
1641 	verifyStream();
1642 	if (stream_.state == STREAM_STOPPED)
1643 	{
1644 		errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1645 		error(RtAudioError::WARNING);
1646 		return;
1647 	}
1648 
1649 	OSStatus result = noErr;
1650 	CoreHandle *handle = (CoreHandle *)stream_.apiHandle;
1651 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
1652 	{
1653 		if (handle->drainCounter == 0)
1654 		{
1655 			handle->drainCounter = 2;
1656 			pthread_cond_wait(&handle->condition, &stream_.mutex);  // block until signaled
1657 		}
1658 
1659 		result = AudioDeviceStop(handle->id[0], callbackHandler);
1660 		if (result != noErr)
1661 		{
1662 			errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode(result) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1663 			errorText_ = errorStream_.str();
1664 			goto unlock;
1665 		}
1666 	}
1667 
1668 	if (stream_.mode == INPUT || (stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1]))
1669 	{
1670 		result = AudioDeviceStop(handle->id[1], callbackHandler);
1671 		if (result != noErr)
1672 		{
1673 			errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode(result) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1674 			errorText_ = errorStream_.str();
1675 			goto unlock;
1676 		}
1677 	}
1678 
1679 	stream_.state = STREAM_STOPPED;
1680 
1681 unlock:
1682 	if (result == noErr) return;
1683 	error(RtAudioError::SYSTEM_ERROR);
1684 }
1685 
abortStream(void)1686 void RtApiCore ::abortStream(void)
1687 {
1688 	verifyStream();
1689 	if (stream_.state == STREAM_STOPPED)
1690 	{
1691 		errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1692 		error(RtAudioError::WARNING);
1693 		return;
1694 	}
1695 
1696 	CoreHandle *handle = (CoreHandle *)stream_.apiHandle;
1697 	handle->drainCounter = 2;
1698 
1699 	stopStream();
1700 }
1701 
1702 // This function will be called by a spawned thread when the user
1703 // callback function signals that the stream should be stopped or
1704 // aborted.  It is better to handle it this way because the
1705 // callbackEvent() function probably should return before the AudioDeviceStop()
1706 // function is called.
coreStopStream(void * ptr)1707 static void *coreStopStream(void *ptr)
1708 {
1709 	CallbackInfo *info = (CallbackInfo *)ptr;
1710 	RtApiCore *object = (RtApiCore *)info->object;
1711 
1712 	object->stopStream();
1713 	pthread_exit(NULL);
1714 }
1715 
callbackEvent(AudioDeviceID deviceId,const AudioBufferList * inBufferList,const AudioBufferList * outBufferList)1716 bool RtApiCore ::callbackEvent(AudioDeviceID deviceId,
1717 							   const AudioBufferList *inBufferList,
1718 							   const AudioBufferList *outBufferList)
1719 {
1720 	if (stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING) return SUCCESS;
1721 	if (stream_.state == STREAM_CLOSED)
1722 	{
1723 		errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1724 		error(RtAudioError::WARNING);
1725 		return FAILURE;
1726 	}
1727 
1728 	CallbackInfo *info = (CallbackInfo *)&stream_.callbackInfo;
1729 	CoreHandle *handle = (CoreHandle *)stream_.apiHandle;
1730 
1731 	// Check if we were draining the stream and signal is finished.
1732 	if (handle->drainCounter > 3)
1733 	{
1734 		ThreadHandle threadId;
1735 
1736 		stream_.state = STREAM_STOPPING;
1737 		if (handle->internalDrain == true)
1738 			pthread_create(&threadId, NULL, coreStopStream, info);
1739 		else  // external call to stopStream()
1740 			pthread_cond_signal(&handle->condition);
1741 		return SUCCESS;
1742 	}
1743 
1744 	AudioDeviceID outputDevice = handle->id[0];
1745 
1746 	// Invoke user callback to get fresh output data UNLESS we are
1747 	// draining stream or duplex mode AND the input/output devices are
1748 	// different AND this function is called for the input device.
1749 	if (handle->drainCounter == 0 && (stream_.mode != DUPLEX || deviceId == outputDevice))
1750 	{
1751 		RtAudioCallback callback = (RtAudioCallback)info->callback;
1752 		double streamTime = getStreamTime();
1753 		RtAudioStreamStatus status = 0;
1754 		if (stream_.mode != INPUT && handle->xrun[0] == true)
1755 		{
1756 			status |= RTAUDIO_OUTPUT_UNDERFLOW;
1757 			handle->xrun[0] = false;
1758 		}
1759 		if (stream_.mode != OUTPUT && handle->xrun[1] == true)
1760 		{
1761 			status |= RTAUDIO_INPUT_OVERFLOW;
1762 			handle->xrun[1] = false;
1763 		}
1764 
1765 		int cbReturnValue = callback(stream_.userBuffer[0], stream_.userBuffer[1],
1766 									 stream_.bufferSize, streamTime, status, info->userData);
1767 		if (cbReturnValue == 2)
1768 		{
1769 			stream_.state = STREAM_STOPPING;
1770 			handle->drainCounter = 2;
1771 			abortStream();
1772 			return SUCCESS;
1773 		}
1774 		else if (cbReturnValue == 1)
1775 		{
1776 			handle->drainCounter = 1;
1777 			handle->internalDrain = true;
1778 		}
1779 	}
1780 
1781 	if (stream_.mode == OUTPUT || (stream_.mode == DUPLEX && deviceId == outputDevice))
1782 	{
1783 		if (handle->drainCounter > 1)
1784 		{  // write zeros to the output stream
1785 
1786 			if (handle->nStreams[0] == 1)
1787 			{
1788 				memset(outBufferList->mBuffers[handle->iStream[0]].mData,
1789 					   0,
1790 					   outBufferList->mBuffers[handle->iStream[0]].mDataByteSize);
1791 			}
1792 			else
1793 			{  // fill multiple streams with zeros
1794 				for (unsigned int i = 0; i < handle->nStreams[0]; i++)
1795 				{
1796 					memset(outBufferList->mBuffers[handle->iStream[0] + i].mData,
1797 						   0,
1798 						   outBufferList->mBuffers[handle->iStream[0] + i].mDataByteSize);
1799 				}
1800 			}
1801 		}
1802 		else if (handle->nStreams[0] == 1)
1803 		{
1804 			if (stream_.doConvertBuffer[0])
1805 			{  // convert directly to CoreAudio stream buffer
1806 				convertBuffer((char *)outBufferList->mBuffers[handle->iStream[0]].mData,
1807 							  stream_.userBuffer[0], stream_.convertInfo[0]);
1808 			}
1809 			else
1810 			{  // copy from user buffer
1811 				memcpy(outBufferList->mBuffers[handle->iStream[0]].mData,
1812 					   stream_.userBuffer[0],
1813 					   outBufferList->mBuffers[handle->iStream[0]].mDataByteSize);
1814 			}
1815 		}
1816 		else
1817 		{  // fill multiple streams
1818 			Float32 *inBuffer = (Float32 *)stream_.userBuffer[0];
1819 			if (stream_.doConvertBuffer[0])
1820 			{
1821 				convertBuffer(stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0]);
1822 				inBuffer = (Float32 *)stream_.deviceBuffer;
1823 			}
1824 
1825 			if (stream_.deviceInterleaved[0] == false)
1826 			{  // mono mode
1827 				UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1828 				for (unsigned int i = 0; i < stream_.nUserChannels[0]; i++)
1829 				{
1830 					memcpy(outBufferList->mBuffers[handle->iStream[0] + i].mData,
1831 						   (void *)&inBuffer[i * stream_.bufferSize], bufferBytes);
1832 				}
1833 			}
1834 			else
1835 			{  // fill multiple multi-channel streams with interleaved data
1836 				UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1837 				Float32 *out, *in;
1838 
1839 				bool inInterleaved = (stream_.userInterleaved) ? true : false;
1840 				UInt32 inChannels = stream_.nUserChannels[0];
1841 				if (stream_.doConvertBuffer[0])
1842 				{
1843 					inInterleaved = true;  // device buffer will always be interleaved for nStreams > 1 and not mono mode
1844 					inChannels = stream_.nDeviceChannels[0];
1845 				}
1846 
1847 				if (inInterleaved)
1848 					inOffset = 1;
1849 				else
1850 					inOffset = stream_.bufferSize;
1851 
1852 				channelsLeft = inChannels;
1853 				for (unsigned int i = 0; i < handle->nStreams[0]; i++)
1854 				{
1855 					in = inBuffer;
1856 					out = (Float32 *)outBufferList->mBuffers[handle->iStream[0] + i].mData;
1857 					streamChannels = outBufferList->mBuffers[handle->iStream[0] + i].mNumberChannels;
1858 
1859 					outJump = 0;
1860 					// Account for possible channel offset in first stream
1861 					if (i == 0 && stream_.channelOffset[0] > 0)
1862 					{
1863 						streamChannels -= stream_.channelOffset[0];
1864 						outJump = stream_.channelOffset[0];
1865 						out += outJump;
1866 					}
1867 
1868 					// Account for possible unfilled channels at end of the last stream
1869 					if (streamChannels > channelsLeft)
1870 					{
1871 						outJump = streamChannels - channelsLeft;
1872 						streamChannels = channelsLeft;
1873 					}
1874 
1875 					// Determine input buffer offsets and skips
1876 					if (inInterleaved)
1877 					{
1878 						inJump = inChannels;
1879 						in += inChannels - channelsLeft;
1880 					}
1881 					else
1882 					{
1883 						inJump = 1;
1884 						in += (inChannels - channelsLeft) * inOffset;
1885 					}
1886 
1887 					for (unsigned int i = 0; i < stream_.bufferSize; i++)
1888 					{
1889 						for (unsigned int j = 0; j < streamChannels; j++)
1890 						{
1891 							*out++ = in[j * inOffset];
1892 						}
1893 						out += outJump;
1894 						in += inJump;
1895 					}
1896 					channelsLeft -= streamChannels;
1897 				}
1898 			}
1899 		}
1900 	}
1901 
1902 	// Don't bother draining input
1903 	if (handle->drainCounter)
1904 	{
1905 		handle->drainCounter++;
1906 		goto unlock;
1907 	}
1908 
1909 	AudioDeviceID inputDevice;
1910 	inputDevice = handle->id[1];
1911 	if (stream_.mode == INPUT || (stream_.mode == DUPLEX && deviceId == inputDevice))
1912 	{
1913 		if (handle->nStreams[1] == 1)
1914 		{
1915 			if (stream_.doConvertBuffer[1])
1916 			{  // convert directly from CoreAudio stream buffer
1917 				convertBuffer(stream_.userBuffer[1],
1918 							  (char *)inBufferList->mBuffers[handle->iStream[1]].mData,
1919 							  stream_.convertInfo[1]);
1920 			}
1921 			else
1922 			{  // copy to user buffer
1923 				memcpy(stream_.userBuffer[1],
1924 					   inBufferList->mBuffers[handle->iStream[1]].mData,
1925 					   inBufferList->mBuffers[handle->iStream[1]].mDataByteSize);
1926 			}
1927 		}
1928 		else
1929 		{  // read from multiple streams
1930 			Float32 *outBuffer = (Float32 *)stream_.userBuffer[1];
1931 			if (stream_.doConvertBuffer[1]) outBuffer = (Float32 *)stream_.deviceBuffer;
1932 
1933 			if (stream_.deviceInterleaved[1] == false)
1934 			{  // mono mode
1935 				UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1936 				for (unsigned int i = 0; i < stream_.nUserChannels[1]; i++)
1937 				{
1938 					memcpy((void *)&outBuffer[i * stream_.bufferSize],
1939 						   inBufferList->mBuffers[handle->iStream[1] + i].mData, bufferBytes);
1940 				}
1941 			}
1942 			else
1943 			{  // read from multiple multi-channel streams
1944 				UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1945 				Float32 *out, *in;
1946 
1947 				bool outInterleaved = (stream_.userInterleaved) ? true : false;
1948 				UInt32 outChannels = stream_.nUserChannels[1];
1949 				if (stream_.doConvertBuffer[1])
1950 				{
1951 					outInterleaved = true;  // device buffer will always be interleaved for nStreams > 1 and not mono mode
1952 					outChannels = stream_.nDeviceChannels[1];
1953 				}
1954 
1955 				if (outInterleaved)
1956 					outOffset = 1;
1957 				else
1958 					outOffset = stream_.bufferSize;
1959 
1960 				channelsLeft = outChannels;
1961 				for (unsigned int i = 0; i < handle->nStreams[1]; i++)
1962 				{
1963 					out = outBuffer;
1964 					in = (Float32 *)inBufferList->mBuffers[handle->iStream[1] + i].mData;
1965 					streamChannels = inBufferList->mBuffers[handle->iStream[1] + i].mNumberChannels;
1966 
1967 					inJump = 0;
1968 					// Account for possible channel offset in first stream
1969 					if (i == 0 && stream_.channelOffset[1] > 0)
1970 					{
1971 						streamChannels -= stream_.channelOffset[1];
1972 						inJump = stream_.channelOffset[1];
1973 						in += inJump;
1974 					}
1975 
1976 					// Account for possible unread channels at end of the last stream
1977 					if (streamChannels > channelsLeft)
1978 					{
1979 						inJump = streamChannels - channelsLeft;
1980 						streamChannels = channelsLeft;
1981 					}
1982 
1983 					// Determine output buffer offsets and skips
1984 					if (outInterleaved)
1985 					{
1986 						outJump = outChannels;
1987 						out += outChannels - channelsLeft;
1988 					}
1989 					else
1990 					{
1991 						outJump = 1;
1992 						out += (outChannels - channelsLeft) * outOffset;
1993 					}
1994 
1995 					for (unsigned int i = 0; i < stream_.bufferSize; i++)
1996 					{
1997 						for (unsigned int j = 0; j < streamChannels; j++)
1998 						{
1999 							out[j * outOffset] = *in++;
2000 						}
2001 						out += outJump;
2002 						in += inJump;
2003 					}
2004 					channelsLeft -= streamChannels;
2005 				}
2006 			}
2007 
2008 			if (stream_.doConvertBuffer[1])
2009 			{  // convert from our internal "device" buffer
2010 				convertBuffer(stream_.userBuffer[1],
2011 							  stream_.deviceBuffer,
2012 							  stream_.convertInfo[1]);
2013 			}
2014 		}
2015 	}
2016 
2017 unlock:
2018 	//MUTEX_UNLOCK( &stream_.mutex );
2019 
2020 	RtApi::tickStreamTime();
2021 	return SUCCESS;
2022 }
2023 
getErrorCode(OSStatus code)2024 const char *RtApiCore ::getErrorCode(OSStatus code)
2025 {
2026 	switch (code)
2027 	{
2028 		case kAudioHardwareNotRunningError:
2029 			return "kAudioHardwareNotRunningError";
2030 
2031 		case kAudioHardwareUnspecifiedError:
2032 			return "kAudioHardwareUnspecifiedError";
2033 
2034 		case kAudioHardwareUnknownPropertyError:
2035 			return "kAudioHardwareUnknownPropertyError";
2036 
2037 		case kAudioHardwareBadPropertySizeError:
2038 			return "kAudioHardwareBadPropertySizeError";
2039 
2040 		case kAudioHardwareIllegalOperationError:
2041 			return "kAudioHardwareIllegalOperationError";
2042 
2043 		case kAudioHardwareBadObjectError:
2044 			return "kAudioHardwareBadObjectError";
2045 
2046 		case kAudioHardwareBadDeviceError:
2047 			return "kAudioHardwareBadDeviceError";
2048 
2049 		case kAudioHardwareBadStreamError:
2050 			return "kAudioHardwareBadStreamError";
2051 
2052 		case kAudioHardwareUnsupportedOperationError:
2053 			return "kAudioHardwareUnsupportedOperationError";
2054 
2055 		case kAudioDeviceUnsupportedFormatError:
2056 			return "kAudioDeviceUnsupportedFormatError";
2057 
2058 		case kAudioDevicePermissionsError:
2059 			return "kAudioDevicePermissionsError";
2060 
2061 		default:
2062 			return "CoreAudio unknown error";
2063 	}
2064 }
2065 
2066 //******************** End of __MACOSX_CORE__ *********************//
2067 #endif
2068 
2069 #if defined(__UNIX_JACK__)
2070 
2071 // JACK is a low-latency audio server, originally written for the
2072 // GNU/Linux operating system and now also ported to OS-X. It can
2073 // connect a number of different applications to an audio device, as
2074 // well as allowing them to share audio between themselves.
2075 //
2076 // When using JACK with RtAudio, "devices" refer to JACK clients that
2077 // have ports connected to the server.  The JACK server is typically
2078 // started in a terminal as follows:
2079 //
2080 // .jackd -d alsa -d hw:0
2081 //
2082 // or through an interface program such as qjackctl.  Many of the
2083 // parameters normally set for a stream are fixed by the JACK server
2084 // and can be specified when the JACK server is started.  In
2085 // particular,
2086 //
2087 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2088 //
2089 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2090 // frames, and number of buffers = 4.  Once the server is running, it
2091 // is not possible to override these values.  If the values are not
2092 // specified in the command-line, the JACK server uses default values.
2093 //
2094 // The JACK server does not have to be running when an instance of
2095 // RtApiJack is created, though the function getDeviceCount() will
2096 // report 0 devices found until JACK has been started.  When no
2097 // devices are available (i.e., the JACK server is not running), a
2098 // stream cannot be opened.
2099 
2100 #include <jack/jack.h>
2101 #include <unistd.h>
2102 #include <cstdio>
2103 
2104 // A structure to hold various information related to the Jack API
2105 // implementation.
2106 struct JackHandle
2107 {
2108 	jack_client_t *client;
2109 	jack_port_t **ports[2];
2110 	std::string deviceName[2];
2111 	bool xrun[2];
2112 	pthread_cond_t condition;
2113 	int drainCounter;    // Tracks callback counts when draining
2114 	bool internalDrain;  // Indicates if stop is initiated from callback or not.
2115 
JackHandleJackHandle2116 	JackHandle()
2117 		: client(0), drainCounter(0), internalDrain(false)
2118 	{
2119 		ports[0] = 0;
2120 		ports[1] = 0;
2121 		xrun[0] = false;
2122 		xrun[1] = false;
2123 	}
2124 };
2125 
jackSilentError(const char *)2126 static void jackSilentError(const char *){};
2127 
RtApiJack()2128 RtApiJack ::RtApiJack()
2129 {
2130 	// Nothing to do here.
2131 #if !defined(__RTAUDIO_DEBUG__)
2132 	// Turn off Jack's internal error reporting.
2133 	jack_set_error_function(&jackSilentError);
2134 #endif
2135 }
2136 
~RtApiJack()2137 RtApiJack ::~RtApiJack()
2138 {
2139 	if (stream_.state != STREAM_CLOSED) closeStream();
2140 }
2141 
getDeviceCount(void)2142 unsigned int RtApiJack ::getDeviceCount(void)
2143 {
2144 	// See if we can become a jack client.
2145 	jack_options_t options = (jack_options_t)(JackNoStartServer);  //JackNullOption;
2146 	jack_status_t *status = NULL;
2147 	jack_client_t *client = jack_client_open("RtApiJackCount", options, status);
2148 	if (client == 0) return 0;
2149 
2150 	const char **ports;
2151 	std::string port, previousPort;
2152 	unsigned int nChannels = 0, nDevices = 0;
2153 	ports = jack_get_ports(client, NULL, NULL, 0);
2154 	if (ports)
2155 	{
2156 		// Parse the port names up to the first colon (:).
2157 		size_t iColon = 0;
2158 		do
2159 		{
2160 			port = (char *)ports[nChannels];
2161 			iColon = port.find(":");
2162 			if (iColon != std::string::npos)
2163 			{
2164 				port = port.substr(0, iColon + 1);
2165 				if (port != previousPort)
2166 				{
2167 					nDevices++;
2168 					previousPort = port;
2169 				}
2170 			}
2171 		} while (ports[++nChannels]);
2172 		free(ports);
2173 	}
2174 
2175 	jack_client_close(client);
2176 	return nDevices;
2177 }
2178 
getDeviceInfo(unsigned int device)2179 RtAudio::DeviceInfo RtApiJack ::getDeviceInfo(unsigned int device)
2180 {
2181 	RtAudio::DeviceInfo info;
2182 	info.probed = false;
2183 
2184 	jack_options_t options = (jack_options_t)(JackNoStartServer);  //JackNullOption
2185 	jack_status_t *status = NULL;
2186 	jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
2187 	if (client == 0)
2188 	{
2189 		errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2190 		error(RtAudioError::WARNING);
2191 		return info;
2192 	}
2193 
2194 	const char **ports;
2195 	std::string port, previousPort;
2196 	unsigned int nPorts = 0, nDevices = 0;
2197 	ports = jack_get_ports(client, NULL, NULL, 0);
2198 	if (ports)
2199 	{
2200 		// Parse the port names up to the first colon (:).
2201 		size_t iColon = 0;
2202 		do
2203 		{
2204 			port = (char *)ports[nPorts];
2205 			iColon = port.find(":");
2206 			if (iColon != std::string::npos)
2207 			{
2208 				port = port.substr(0, iColon);
2209 				if (port != previousPort)
2210 				{
2211 					if (nDevices == device) info.name = port;
2212 					nDevices++;
2213 					previousPort = port;
2214 				}
2215 			}
2216 		} while (ports[++nPorts]);
2217 		free(ports);
2218 	}
2219 
2220 	if (device >= nDevices)
2221 	{
2222 		jack_client_close(client);
2223 		errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2224 		error(RtAudioError::INVALID_USE);
2225 		return info;
2226 	}
2227 
2228 	// Get the current jack server sample rate.
2229 	info.sampleRates.clear();
2230 
2231 	info.preferredSampleRate = jack_get_sample_rate(client);
2232 	info.sampleRates.push_back(info.preferredSampleRate);
2233 
2234 	// Count the available ports containing the client name as device
2235 	// channels.  Jack "input ports" equal RtAudio output channels.
2236 	unsigned int nChannels = 0;
2237 	ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsInput);
2238 	if (ports)
2239 	{
2240 		while (ports[nChannels]) nChannels++;
2241 		free(ports);
2242 		info.outputChannels = nChannels;
2243 	}
2244 
2245 	// Jack "output ports" equal RtAudio input channels.
2246 	nChannels = 0;
2247 	ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsOutput);
2248 	if (ports)
2249 	{
2250 		while (ports[nChannels]) nChannels++;
2251 		free(ports);
2252 		info.inputChannels = nChannels;
2253 	}
2254 
2255 	if (info.outputChannels == 0 && info.inputChannels == 0)
2256 	{
2257 		jack_client_close(client);
2258 		errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2259 		error(RtAudioError::WARNING);
2260 		return info;
2261 	}
2262 
2263 	// If device opens for both playback and capture, we determine the channels.
2264 	if (info.outputChannels > 0 && info.inputChannels > 0)
2265 		info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2266 
2267 	// Jack always uses 32-bit floats.
2268 	info.nativeFormats = RTAUDIO_FLOAT32;
2269 
2270 	// Jack doesn't provide default devices so we'll use the first available one.
2271 	if (device == 0 && info.outputChannels > 0)
2272 		info.isDefaultOutput = true;
2273 	if (device == 0 && info.inputChannels > 0)
2274 		info.isDefaultInput = true;
2275 
2276 	jack_client_close(client);
2277 	info.probed = true;
2278 	return info;
2279 }
2280 
jackCallbackHandler(jack_nframes_t nframes,void * infoPointer)2281 static int jackCallbackHandler(jack_nframes_t nframes, void *infoPointer)
2282 {
2283 	CallbackInfo *info = (CallbackInfo *)infoPointer;
2284 
2285 	RtApiJack *object = (RtApiJack *)info->object;
2286 	if (object->callbackEvent((unsigned long)nframes) == false) return 1;
2287 
2288 	return 0;
2289 }
2290 
2291 // This function will be called by a spawned thread when the Jack
2292 // server signals that it is shutting down.  It is necessary to handle
2293 // it this way because the jackShutdown() function must return before
2294 // the jack_deactivate() function (in closeStream()) will return.
jackCloseStream(void * ptr)2295 static void *jackCloseStream(void *ptr)
2296 {
2297 	CallbackInfo *info = (CallbackInfo *)ptr;
2298 	RtApiJack *object = (RtApiJack *)info->object;
2299 
2300 	object->closeStream();
2301 
2302 	pthread_exit(NULL);
2303 }
jackShutdown(void * infoPointer)2304 static void jackShutdown(void *infoPointer)
2305 {
2306 	CallbackInfo *info = (CallbackInfo *)infoPointer;
2307 	RtApiJack *object = (RtApiJack *)info->object;
2308 
2309 	// Check current stream state.  If stopped, then we'll assume this
2310 	// was called as a result of a call to RtApiJack::stopStream (the
2311 	// deactivation of a client handle causes this function to be called).
2312 	// If not, we'll assume the Jack server is shutting down or some
2313 	// other problem occurred and we should close the stream.
2314 	if (object->isStreamRunning() == false) return;
2315 
2316 	ThreadHandle threadId;
2317 	pthread_create(&threadId, NULL, jackCloseStream, info);
2318 	std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n"
2319 			  << std::endl;
2320 }
2321 
jackXrun(void * infoPointer)2322 static int jackXrun(void *infoPointer)
2323 {
2324 	JackHandle *handle = (JackHandle *)infoPointer;
2325 
2326 	if (handle->ports[0]) handle->xrun[0] = true;
2327 	if (handle->ports[1]) handle->xrun[1] = true;
2328 
2329 	return 0;
2330 }
2331 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)2332 bool RtApiJack ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
2333 								 unsigned int firstChannel, unsigned int sampleRate,
2334 								 RtAudioFormat format, unsigned int *bufferSize,
2335 								 RtAudio::StreamOptions *options)
2336 {
2337 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2338 
2339 	// Look for jack server and try to become a client (only do once per stream).
2340 	jack_client_t *client = 0;
2341 	if (mode == OUTPUT || (mode == INPUT && stream_.mode != OUTPUT))
2342 	{
2343 		jack_options_t jackoptions = (jack_options_t)(JackNoStartServer);  //JackNullOption;
2344 		jack_status_t *status = NULL;
2345 		if (options && !options->streamName.empty())
2346 			client = jack_client_open(options->streamName.c_str(), jackoptions, status);
2347 		else
2348 			client = jack_client_open("RtApiJack", jackoptions, status);
2349 		if (client == 0)
2350 		{
2351 			errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2352 			error(RtAudioError::WARNING);
2353 			return FAILURE;
2354 		}
2355 	}
2356 	else
2357 	{
2358 		// The handle must have been created on an earlier pass.
2359 		client = handle->client;
2360 	}
2361 
2362 	const char **ports;
2363 	std::string port, previousPort, deviceName;
2364 	unsigned int nPorts = 0, nDevices = 0;
2365 	ports = jack_get_ports(client, NULL, NULL, 0);
2366 	if (ports)
2367 	{
2368 		// Parse the port names up to the first colon (:).
2369 		size_t iColon = 0;
2370 		do
2371 		{
2372 			port = (char *)ports[nPorts];
2373 			iColon = port.find(":");
2374 			if (iColon != std::string::npos)
2375 			{
2376 				port = port.substr(0, iColon);
2377 				if (port != previousPort)
2378 				{
2379 					if (nDevices == device) deviceName = port;
2380 					nDevices++;
2381 					previousPort = port;
2382 				}
2383 			}
2384 		} while (ports[++nPorts]);
2385 		free(ports);
2386 	}
2387 
2388 	if (device >= nDevices)
2389 	{
2390 		errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2391 		return FAILURE;
2392 	}
2393 
2394 	// Count the available ports containing the client name as device
2395 	// channels.  Jack "input ports" equal RtAudio output channels.
2396 	unsigned int nChannels = 0;
2397 	unsigned long flag = JackPortIsInput;
2398 	if (mode == INPUT) flag = JackPortIsOutput;
2399 	ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
2400 	if (ports)
2401 	{
2402 		while (ports[nChannels]) nChannels++;
2403 		free(ports);
2404 	}
2405 
2406 	// Compare the jack ports for specified client to the requested number of channels.
2407 	if (nChannels < (channels + firstChannel))
2408 	{
2409 		errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2410 		errorText_ = errorStream_.str();
2411 		return FAILURE;
2412 	}
2413 
2414 	// Check the jack server sample rate.
2415 	unsigned int jackRate = jack_get_sample_rate(client);
2416 	if (sampleRate != jackRate)
2417 	{
2418 		jack_client_close(client);
2419 		errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2420 		errorText_ = errorStream_.str();
2421 		return FAILURE;
2422 	}
2423 	stream_.sampleRate = jackRate;
2424 
2425 	// Get the latency of the JACK port.
2426 	ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
2427 	if (ports[firstChannel])
2428 	{
2429 		// Added by Ge Wang
2430 		jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2431 		// the range (usually the min and max are equal)
2432 		jack_latency_range_t latrange;
2433 		latrange.min = latrange.max = 0;
2434 		// get the latency range
2435 		jack_port_get_latency_range(jack_port_by_name(client, ports[firstChannel]), cbmode, &latrange);
2436 		// be optimistic, use the min!
2437 		stream_.latency[mode] = latrange.min;
2438 		//stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2439 	}
2440 	free(ports);
2441 
2442 	// The jack server always uses 32-bit floating-point data.
2443 	stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2444 	stream_.userFormat = format;
2445 
2446 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
2447 		stream_.userInterleaved = false;
2448 	else
2449 		stream_.userInterleaved = true;
2450 
2451 	// Jack always uses non-interleaved buffers.
2452 	stream_.deviceInterleaved[mode] = false;
2453 
2454 	// Jack always provides host byte-ordered data.
2455 	stream_.doByteSwap[mode] = false;
2456 
2457 	// Get the buffer size.  The buffer size and number of buffers
2458 	// (periods) is set when the jack server is started.
2459 	stream_.bufferSize = (int)jack_get_buffer_size(client);
2460 	*bufferSize = stream_.bufferSize;
2461 
2462 	stream_.nDeviceChannels[mode] = channels;
2463 	stream_.nUserChannels[mode] = channels;
2464 
2465 	// Set flags for buffer conversion.
2466 	stream_.doConvertBuffer[mode] = false;
2467 	if (stream_.userFormat != stream_.deviceFormat[mode])
2468 		stream_.doConvertBuffer[mode] = true;
2469 	if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2470 		stream_.nUserChannels[mode] > 1)
2471 		stream_.doConvertBuffer[mode] = true;
2472 
2473 	// Allocate our JackHandle structure for the stream.
2474 	if (handle == 0)
2475 	{
2476 		try
2477 		{
2478 			handle = new JackHandle;
2479 		}
2480 		catch (std::bad_alloc &)
2481 		{
2482 			errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2483 			goto error;
2484 		}
2485 
2486 		if (pthread_cond_init(&handle->condition, NULL))
2487 		{
2488 			errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2489 			goto error;
2490 		}
2491 		stream_.apiHandle = (void *)handle;
2492 		handle->client = client;
2493 	}
2494 	handle->deviceName[mode] = deviceName;
2495 
2496 	// Allocate necessary internal buffers.
2497 	unsigned long bufferBytes;
2498 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
2499 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
2500 	if (stream_.userBuffer[mode] == NULL)
2501 	{
2502 		errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2503 		goto error;
2504 	}
2505 
2506 	if (stream_.doConvertBuffer[mode])
2507 	{
2508 		bool makeBuffer = true;
2509 		if (mode == OUTPUT)
2510 			bufferBytes = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2511 		else
2512 		{  // mode == INPUT
2513 			bufferBytes = stream_.nDeviceChannels[1] * formatBytes(stream_.deviceFormat[1]);
2514 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
2515 			{
2516 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2517 				if (bufferBytes < bytesOut) makeBuffer = false;
2518 			}
2519 		}
2520 
2521 		if (makeBuffer)
2522 		{
2523 			bufferBytes *= *bufferSize;
2524 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
2525 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
2526 			if (stream_.deviceBuffer == NULL)
2527 			{
2528 				errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2529 				goto error;
2530 			}
2531 		}
2532 	}
2533 
2534 	// Allocate memory for the Jack ports (channels) identifiers.
2535 	handle->ports[mode] = (jack_port_t **)malloc(sizeof(jack_port_t *) * channels);
2536 	if (handle->ports[mode] == NULL)
2537 	{
2538 		errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2539 		goto error;
2540 	}
2541 
2542 	stream_.device[mode] = device;
2543 	stream_.channelOffset[mode] = firstChannel;
2544 	stream_.state = STREAM_STOPPED;
2545 	stream_.callbackInfo.object = (void *)this;
2546 
2547 	if (stream_.mode == OUTPUT && mode == INPUT)
2548 		// We had already set up the stream for output.
2549 		stream_.mode = DUPLEX;
2550 	else
2551 	{
2552 		stream_.mode = mode;
2553 		jack_set_process_callback(handle->client, jackCallbackHandler, (void *)&stream_.callbackInfo);
2554 		jack_set_xrun_callback(handle->client, jackXrun, (void *)&handle);
2555 		jack_on_shutdown(handle->client, jackShutdown, (void *)&stream_.callbackInfo);
2556 	}
2557 
2558 	// Register our ports.
2559 	char label[64];
2560 	if (mode == OUTPUT)
2561 	{
2562 		for (unsigned int i = 0; i < stream_.nUserChannels[0]; i++)
2563 		{
2564 			snprintf(label, 64, "outport %d", i);
2565 			handle->ports[0][i] = jack_port_register(handle->client, (const char *)label,
2566 													 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0);
2567 		}
2568 	}
2569 	else
2570 	{
2571 		for (unsigned int i = 0; i < stream_.nUserChannels[1]; i++)
2572 		{
2573 			snprintf(label, 64, "inport %d", i);
2574 			handle->ports[1][i] = jack_port_register(handle->client, (const char *)label,
2575 													 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0);
2576 		}
2577 	}
2578 
2579 	// Setup the buffer conversion information structure.  We don't use
2580 	// buffers to do channel offsets, so we override that parameter
2581 	// here.
2582 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, 0);
2583 
2584 	return SUCCESS;
2585 
2586 error:
2587 	if (handle)
2588 	{
2589 		pthread_cond_destroy(&handle->condition);
2590 		jack_client_close(handle->client);
2591 
2592 		if (handle->ports[0]) free(handle->ports[0]);
2593 		if (handle->ports[1]) free(handle->ports[1]);
2594 
2595 		delete handle;
2596 		stream_.apiHandle = 0;
2597 	}
2598 
2599 	for (int i = 0; i < 2; i++)
2600 	{
2601 		if (stream_.userBuffer[i])
2602 		{
2603 			free(stream_.userBuffer[i]);
2604 			stream_.userBuffer[i] = 0;
2605 		}
2606 	}
2607 
2608 	if (stream_.deviceBuffer)
2609 	{
2610 		free(stream_.deviceBuffer);
2611 		stream_.deviceBuffer = 0;
2612 	}
2613 
2614 	return FAILURE;
2615 }
2616 
closeStream(void)2617 void RtApiJack ::closeStream(void)
2618 {
2619 	if (stream_.state == STREAM_CLOSED)
2620 	{
2621 		errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2622 		error(RtAudioError::WARNING);
2623 		return;
2624 	}
2625 
2626 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2627 	if (handle)
2628 	{
2629 		if (stream_.state == STREAM_RUNNING)
2630 			jack_deactivate(handle->client);
2631 
2632 		jack_client_close(handle->client);
2633 	}
2634 
2635 	if (handle)
2636 	{
2637 		if (handle->ports[0]) free(handle->ports[0]);
2638 		if (handle->ports[1]) free(handle->ports[1]);
2639 		pthread_cond_destroy(&handle->condition);
2640 		delete handle;
2641 		stream_.apiHandle = 0;
2642 	}
2643 
2644 	for (int i = 0; i < 2; i++)
2645 	{
2646 		if (stream_.userBuffer[i])
2647 		{
2648 			free(stream_.userBuffer[i]);
2649 			stream_.userBuffer[i] = 0;
2650 		}
2651 	}
2652 
2653 	if (stream_.deviceBuffer)
2654 	{
2655 		free(stream_.deviceBuffer);
2656 		stream_.deviceBuffer = 0;
2657 	}
2658 
2659 	stream_.mode = UNINITIALIZED;
2660 	stream_.state = STREAM_CLOSED;
2661 }
2662 
startStream(void)2663 void RtApiJack ::startStream(void)
2664 {
2665 	verifyStream();
2666 	if (stream_.state == STREAM_RUNNING)
2667 	{
2668 		errorText_ = "RtApiJack::startStream(): the stream is already running!";
2669 		error(RtAudioError::WARNING);
2670 		return;
2671 	}
2672 
2673 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2674 	int result = jack_activate(handle->client);
2675 	if (result)
2676 	{
2677 		errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2678 		goto unlock;
2679 	}
2680 
2681 	const char **ports;
2682 
2683 	// Get the list of available ports.
2684 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
2685 	{
2686 		result = 1;
2687 		ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2688 		if (ports == NULL)
2689 		{
2690 			errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2691 			goto unlock;
2692 		}
2693 
2694 		// Now make the port connections.  Since RtAudio wasn't designed to
2695 		// allow the user to select particular channels of a device, we'll
2696 		// just open the first "nChannels" ports with offset.
2697 		for (unsigned int i = 0; i < stream_.nUserChannels[0]; i++)
2698 		{
2699 			result = 1;
2700 			if (ports[stream_.channelOffset[0] + i])
2701 				result = jack_connect(handle->client, jack_port_name(handle->ports[0][i]), ports[stream_.channelOffset[0] + i]);
2702 			if (result)
2703 			{
2704 				free(ports);
2705 				errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2706 				goto unlock;
2707 			}
2708 		}
2709 		free(ports);
2710 	}
2711 
2712 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
2713 	{
2714 		result = 1;
2715 		ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput);
2716 		if (ports == NULL)
2717 		{
2718 			errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2719 			goto unlock;
2720 		}
2721 
2722 		// Now make the port connections.  See note above.
2723 		for (unsigned int i = 0; i < stream_.nUserChannels[1]; i++)
2724 		{
2725 			result = 1;
2726 			if (ports[stream_.channelOffset[1] + i])
2727 				result = jack_connect(handle->client, ports[stream_.channelOffset[1] + i], jack_port_name(handle->ports[1][i]));
2728 			if (result)
2729 			{
2730 				free(ports);
2731 				errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2732 				goto unlock;
2733 			}
2734 		}
2735 		free(ports);
2736 	}
2737 
2738 	handle->drainCounter = 0;
2739 	handle->internalDrain = false;
2740 	stream_.state = STREAM_RUNNING;
2741 
2742 unlock:
2743 	if (result == 0) return;
2744 	error(RtAudioError::SYSTEM_ERROR);
2745 }
2746 
stopStream(void)2747 void RtApiJack ::stopStream(void)
2748 {
2749 	verifyStream();
2750 	if (stream_.state == STREAM_STOPPED)
2751 	{
2752 		errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2753 		error(RtAudioError::WARNING);
2754 		return;
2755 	}
2756 
2757 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2758 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
2759 	{
2760 		if (handle->drainCounter == 0)
2761 		{
2762 			handle->drainCounter = 2;
2763 			pthread_cond_wait(&handle->condition, &stream_.mutex);  // block until signaled
2764 		}
2765 	}
2766 
2767 	jack_deactivate(handle->client);
2768 	stream_.state = STREAM_STOPPED;
2769 }
2770 
abortStream(void)2771 void RtApiJack ::abortStream(void)
2772 {
2773 	verifyStream();
2774 	if (stream_.state == STREAM_STOPPED)
2775 	{
2776 		errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2777 		error(RtAudioError::WARNING);
2778 		return;
2779 	}
2780 
2781 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2782 	handle->drainCounter = 2;
2783 
2784 	stopStream();
2785 }
2786 
2787 // This function will be called by a spawned thread when the user
2788 // callback function signals that the stream should be stopped or
2789 // aborted.  It is necessary to handle it this way because the
2790 // callbackEvent() function must return before the jack_deactivate()
2791 // function will return.
jackStopStream(void * ptr)2792 static void *jackStopStream(void *ptr)
2793 {
2794 	CallbackInfo *info = (CallbackInfo *)ptr;
2795 	RtApiJack *object = (RtApiJack *)info->object;
2796 
2797 	object->stopStream();
2798 	pthread_exit(NULL);
2799 }
2800 
callbackEvent(unsigned long nframes)2801 bool RtApiJack ::callbackEvent(unsigned long nframes)
2802 {
2803 	if (stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING) return SUCCESS;
2804 	if (stream_.state == STREAM_CLOSED)
2805 	{
2806 		errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2807 		error(RtAudioError::WARNING);
2808 		return FAILURE;
2809 	}
2810 	if (stream_.bufferSize != nframes)
2811 	{
2812 		errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2813 		error(RtAudioError::WARNING);
2814 		return FAILURE;
2815 	}
2816 
2817 	CallbackInfo *info = (CallbackInfo *)&stream_.callbackInfo;
2818 	JackHandle *handle = (JackHandle *)stream_.apiHandle;
2819 
2820 	// Check if we were draining the stream and signal is finished.
2821 	if (handle->drainCounter > 3)
2822 	{
2823 		ThreadHandle threadId;
2824 
2825 		stream_.state = STREAM_STOPPING;
2826 		if (handle->internalDrain == true)
2827 			pthread_create(&threadId, NULL, jackStopStream, info);
2828 		else
2829 			pthread_cond_signal(&handle->condition);
2830 		return SUCCESS;
2831 	}
2832 
2833 	// Invoke user callback first, to get fresh output data.
2834 	if (handle->drainCounter == 0)
2835 	{
2836 		RtAudioCallback callback = (RtAudioCallback)info->callback;
2837 		double streamTime = getStreamTime();
2838 		RtAudioStreamStatus status = 0;
2839 		if (stream_.mode != INPUT && handle->xrun[0] == true)
2840 		{
2841 			status |= RTAUDIO_OUTPUT_UNDERFLOW;
2842 			handle->xrun[0] = false;
2843 		}
2844 		if (stream_.mode != OUTPUT && handle->xrun[1] == true)
2845 		{
2846 			status |= RTAUDIO_INPUT_OVERFLOW;
2847 			handle->xrun[1] = false;
2848 		}
2849 		int cbReturnValue = callback(stream_.userBuffer[0], stream_.userBuffer[1],
2850 									 stream_.bufferSize, streamTime, status, info->userData);
2851 		if (cbReturnValue == 2)
2852 		{
2853 			stream_.state = STREAM_STOPPING;
2854 			handle->drainCounter = 2;
2855 			ThreadHandle id;
2856 			pthread_create(&id, NULL, jackStopStream, info);
2857 			return SUCCESS;
2858 		}
2859 		else if (cbReturnValue == 1)
2860 		{
2861 			handle->drainCounter = 1;
2862 			handle->internalDrain = true;
2863 		}
2864 	}
2865 
2866 	jack_default_audio_sample_t *jackbuffer;
2867 	unsigned long bufferBytes = nframes * sizeof(jack_default_audio_sample_t);
2868 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
2869 	{
2870 		if (handle->drainCounter > 1)
2871 		{  // write zeros to the output stream
2872 
2873 			for (unsigned int i = 0; i < stream_.nDeviceChannels[0]; i++)
2874 			{
2875 				jackbuffer = (jack_default_audio_sample_t *)jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t)nframes);
2876 				memset(jackbuffer, 0, bufferBytes);
2877 			}
2878 		}
2879 		else if (stream_.doConvertBuffer[0])
2880 		{
2881 			convertBuffer(stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0]);
2882 
2883 			for (unsigned int i = 0; i < stream_.nDeviceChannels[0]; i++)
2884 			{
2885 				jackbuffer = (jack_default_audio_sample_t *)jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t)nframes);
2886 				memcpy(jackbuffer, &stream_.deviceBuffer[i * bufferBytes], bufferBytes);
2887 			}
2888 		}
2889 		else
2890 		{  // no buffer conversion
2891 			for (unsigned int i = 0; i < stream_.nUserChannels[0]; i++)
2892 			{
2893 				jackbuffer = (jack_default_audio_sample_t *)jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t)nframes);
2894 				memcpy(jackbuffer, &stream_.userBuffer[0][i * bufferBytes], bufferBytes);
2895 			}
2896 		}
2897 	}
2898 
2899 	// Don't bother draining input
2900 	if (handle->drainCounter)
2901 	{
2902 		handle->drainCounter++;
2903 		goto unlock;
2904 	}
2905 
2906 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
2907 	{
2908 		if (stream_.doConvertBuffer[1])
2909 		{
2910 			for (unsigned int i = 0; i < stream_.nDeviceChannels[1]; i++)
2911 			{
2912 				jackbuffer = (jack_default_audio_sample_t *)jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t)nframes);
2913 				memcpy(&stream_.deviceBuffer[i * bufferBytes], jackbuffer, bufferBytes);
2914 			}
2915 			convertBuffer(stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1]);
2916 		}
2917 		else
2918 		{  // no buffer conversion
2919 			for (unsigned int i = 0; i < stream_.nUserChannels[1]; i++)
2920 			{
2921 				jackbuffer = (jack_default_audio_sample_t *)jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t)nframes);
2922 				memcpy(&stream_.userBuffer[1][i * bufferBytes], jackbuffer, bufferBytes);
2923 			}
2924 		}
2925 	}
2926 
2927 unlock:
2928 	RtApi::tickStreamTime();
2929 	return SUCCESS;
2930 }
2931 //******************** End of __UNIX_JACK__ *********************//
2932 #endif
2933 
2934 #if defined(__WINDOWS_ASIO__)  // ASIO API on Windows
2935 
2936 // The ASIO API is designed around a callback scheme, so this
2937 // implementation is similar to that used for OS-X CoreAudio and Linux
2938 // Jack.  The primary constraint with ASIO is that it only allows
2939 // access to a single driver at a time.  Thus, it is not possible to
2940 // have more than one simultaneous RtAudio stream.
2941 //
2942 // This implementation also requires a number of external ASIO files
2943 // and a few global variables.  The ASIO callback scheme does not
2944 // allow for the passing of user data, so we must create a global
2945 // pointer to our callbackInfo structure.
2946 //
2947 // On unix systems, we make use of a pthread condition variable.
2948 // Since there is no equivalent in Windows, I hacked something based
2949 // on information found in
2950 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2951 
2952 #include "asiosys.h"
2953 #include "asio.h"
2954 #include "iasiothiscallresolver.h"
2955 #include "asiodrivers.h"
2956 #include <cmath>
2957 
2958 static AsioDrivers drivers;
2959 static ASIOCallbacks asioCallbacks;
2960 static ASIODriverInfo driverInfo;
2961 static CallbackInfo *asioCallbackInfo;
2962 static bool asioXRun;
2963 
2964 struct AsioHandle
2965 {
2966 	int drainCounter;    // Tracks callback counts when draining
2967 	bool internalDrain;  // Indicates if stop is initiated from callback or not.
2968 	ASIOBufferInfo *bufferInfos;
2969 	HANDLE condition;
2970 
AsioHandleAsioHandle2971 	AsioHandle()
2972 		: drainCounter(0), internalDrain(false), bufferInfos(0) {}
2973 };
2974 
2975 // Function declarations (definitions at end of section)
2976 static const char *getAsioErrorString(ASIOError result);
2977 static void sampleRateChanged(ASIOSampleRate sRate);
2978 static long asioMessages(long selector, long value, void *message, double *opt);
2979 
RtApiAsio()2980 RtApiAsio ::RtApiAsio()
2981 {
2982 	// ASIO cannot run on a multi-threaded appartment. You can call
2983 	// CoInitialize beforehand, but it must be for appartment threading
2984 	// (in which case, CoInitilialize will return S_FALSE here).
2985 	coInitialized_ = false;
2986 	HRESULT hr = CoInitialize(NULL);
2987 	if (FAILED(hr))
2988 	{
2989 		errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2990 		error(RtAudioError::WARNING);
2991 	}
2992 	coInitialized_ = true;
2993 
2994 	drivers.removeCurrentDriver();
2995 	driverInfo.asioVersion = 2;
2996 
2997 	// See note in DirectSound implementation about GetDesktopWindow().
2998 	driverInfo.sysRef = GetForegroundWindow();
2999 }
3000 
~RtApiAsio()3001 RtApiAsio ::~RtApiAsio()
3002 {
3003 	if (stream_.state != STREAM_CLOSED) closeStream();
3004 	if (coInitialized_) CoUninitialize();
3005 }
3006 
getDeviceCount(void)3007 unsigned int RtApiAsio ::getDeviceCount(void)
3008 {
3009 	return (unsigned int)drivers.asioGetNumDev();
3010 }
3011 
getDeviceInfo(unsigned int device)3012 RtAudio::DeviceInfo RtApiAsio ::getDeviceInfo(unsigned int device)
3013 {
3014 	RtAudio::DeviceInfo info;
3015 	info.probed = false;
3016 
3017 	// Get device ID
3018 	unsigned int nDevices = getDeviceCount();
3019 	if (nDevices == 0)
3020 	{
3021 		errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
3022 		error(RtAudioError::INVALID_USE);
3023 		return info;
3024 	}
3025 
3026 	if (device >= nDevices)
3027 	{
3028 		errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
3029 		error(RtAudioError::INVALID_USE);
3030 		return info;
3031 	}
3032 
3033 	// If a stream is already open, we cannot probe other devices.  Thus, use the saved results.
3034 	if (stream_.state != STREAM_CLOSED)
3035 	{
3036 		if (device >= devices_.size())
3037 		{
3038 			errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
3039 			error(RtAudioError::WARNING);
3040 			return info;
3041 		}
3042 		return devices_[device];
3043 	}
3044 
3045 	char driverName[32];
3046 	ASIOError result = drivers.asioGetDriverName((int)device, driverName, 32);
3047 	if (result != ASE_OK)
3048 	{
3049 		errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString(result) << ").";
3050 		errorText_ = errorStream_.str();
3051 		error(RtAudioError::WARNING);
3052 		return info;
3053 	}
3054 
3055 	info.name = driverName;
3056 
3057 	if (!drivers.loadDriver(driverName))
3058 	{
3059 		errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
3060 		errorText_ = errorStream_.str();
3061 		error(RtAudioError::WARNING);
3062 		return info;
3063 	}
3064 
3065 	result = ASIOInit(&driverInfo);
3066 	if (result != ASE_OK)
3067 	{
3068 		errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").";
3069 		errorText_ = errorStream_.str();
3070 		error(RtAudioError::WARNING);
3071 		return info;
3072 	}
3073 
3074 	// Determine the device channel information.
3075 	long inputChannels, outputChannels;
3076 	result = ASIOGetChannels(&inputChannels, &outputChannels);
3077 	if (result != ASE_OK)
3078 	{
3079 		drivers.removeCurrentDriver();
3080 		errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").";
3081 		errorText_ = errorStream_.str();
3082 		error(RtAudioError::WARNING);
3083 		return info;
3084 	}
3085 
3086 	info.outputChannels = outputChannels;
3087 	info.inputChannels = inputChannels;
3088 	if (info.outputChannels > 0 && info.inputChannels > 0)
3089 		info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
3090 
3091 	// Determine the supported sample rates.
3092 	info.sampleRates.clear();
3093 	for (unsigned int i = 0; i < MAX_SAMPLE_RATES; i++)
3094 	{
3095 		result = ASIOCanSampleRate((ASIOSampleRate)SAMPLE_RATES[i]);
3096 		if (result == ASE_OK)
3097 		{
3098 			info.sampleRates.push_back(SAMPLE_RATES[i]);
3099 
3100 			if (!info.preferredSampleRate || (SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate))
3101 				info.preferredSampleRate = SAMPLE_RATES[i];
3102 		}
3103 	}
3104 
3105 	// Determine supported data types ... just check first channel and assume rest are the same.
3106 	ASIOChannelInfo channelInfo;
3107 	channelInfo.channel = 0;
3108 	channelInfo.isInput = true;
3109 	if (info.inputChannels <= 0) channelInfo.isInput = false;
3110 	result = ASIOGetChannelInfo(&channelInfo);
3111 	if (result != ASE_OK)
3112 	{
3113 		drivers.removeCurrentDriver();
3114 		errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString(result) << ") getting driver channel info (" << driverName << ").";
3115 		errorText_ = errorStream_.str();
3116 		error(RtAudioError::WARNING);
3117 		return info;
3118 	}
3119 
3120 	info.nativeFormats = 0;
3121 	if (channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB)
3122 		info.nativeFormats |= RTAUDIO_SINT16;
3123 	else if (channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB)
3124 		info.nativeFormats |= RTAUDIO_SINT32;
3125 	else if (channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB)
3126 		info.nativeFormats |= RTAUDIO_FLOAT32;
3127 	else if (channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB)
3128 		info.nativeFormats |= RTAUDIO_FLOAT64;
3129 	else if (channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB)
3130 		info.nativeFormats |= RTAUDIO_SINT24;
3131 
3132 	if (info.outputChannels > 0)
3133 		if (getDefaultOutputDevice() == device) info.isDefaultOutput = true;
3134 	if (info.inputChannels > 0)
3135 		if (getDefaultInputDevice() == device) info.isDefaultInput = true;
3136 
3137 	info.probed = true;
3138 	drivers.removeCurrentDriver();
3139 	return info;
3140 }
3141 
bufferSwitch(long index,ASIOBool)3142 static void bufferSwitch(long index, ASIOBool /*processNow*/)
3143 {
3144 	RtApiAsio *object = (RtApiAsio *)asioCallbackInfo->object;
3145 	object->callbackEvent(index);
3146 }
3147 
saveDeviceInfo(void)3148 void RtApiAsio ::saveDeviceInfo(void)
3149 {
3150 	devices_.clear();
3151 
3152 	unsigned int nDevices = getDeviceCount();
3153 	devices_.resize(nDevices);
3154 	for (unsigned int i = 0; i < nDevices; i++)
3155 		devices_[i] = getDeviceInfo(i);
3156 }
3157 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)3158 bool RtApiAsio ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
3159 								 unsigned int firstChannel, unsigned int sampleRate,
3160 								 RtAudioFormat format, unsigned int *bufferSize,
3161 								 RtAudio::StreamOptions *options)
3162 {  ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3163 
3164 	bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3165 
3166 	// For ASIO, a duplex stream MUST use the same driver.
3167 	if (isDuplexInput && stream_.device[0] != device)
3168 	{
3169 		errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3170 		return FAILURE;
3171 	}
3172 
3173 	char driverName[32];
3174 	ASIOError result = drivers.asioGetDriverName((int)device, driverName, 32);
3175 	if (result != ASE_OK)
3176 	{
3177 		errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString(result) << ").";
3178 		errorText_ = errorStream_.str();
3179 		return FAILURE;
3180 	}
3181 
3182 	// Only load the driver once for duplex stream.
3183 	if (!isDuplexInput)
3184 	{
3185 		// The getDeviceInfo() function will not work when a stream is open
3186 		// because ASIO does not allow multiple devices to run at the same
3187 		// time.  Thus, we'll probe the system before opening a stream and
3188 		// save the results for use by getDeviceInfo().
3189 		this->saveDeviceInfo();
3190 
3191 		if (!drivers.loadDriver(driverName))
3192 		{
3193 			errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3194 			errorText_ = errorStream_.str();
3195 			return FAILURE;
3196 		}
3197 
3198 		result = ASIOInit(&driverInfo);
3199 		if (result != ASE_OK)
3200 		{
3201 			errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").";
3202 			errorText_ = errorStream_.str();
3203 			return FAILURE;
3204 		}
3205 	}
3206 
3207 	// keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3208 	bool buffersAllocated = false;
3209 	AsioHandle *handle = (AsioHandle *)stream_.apiHandle;
3210 	unsigned int nChannels;
3211 
3212 	// Check the device channel count.
3213 	long inputChannels, outputChannels;
3214 	result = ASIOGetChannels(&inputChannels, &outputChannels);
3215 	if (result != ASE_OK)
3216 	{
3217 		errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").";
3218 		errorText_ = errorStream_.str();
3219 		goto error;
3220 	}
3221 
3222 	if ((mode == OUTPUT && (channels + firstChannel) > (unsigned int)outputChannels) ||
3223 		(mode == INPUT && (channels + firstChannel) > (unsigned int)inputChannels))
3224 	{
3225 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3226 		errorText_ = errorStream_.str();
3227 		goto error;
3228 	}
3229 	stream_.nDeviceChannels[mode] = channels;
3230 	stream_.nUserChannels[mode] = channels;
3231 	stream_.channelOffset[mode] = firstChannel;
3232 
3233 	// Verify the sample rate is supported.
3234 	result = ASIOCanSampleRate((ASIOSampleRate)sampleRate);
3235 	if (result != ASE_OK)
3236 	{
3237 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3238 		errorText_ = errorStream_.str();
3239 		goto error;
3240 	}
3241 
3242 	// Get the current sample rate
3243 	ASIOSampleRate currentRate;
3244 	result = ASIOGetSampleRate(&currentRate);
3245 	if (result != ASE_OK)
3246 	{
3247 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3248 		errorText_ = errorStream_.str();
3249 		goto error;
3250 	}
3251 
3252 	// Set the sample rate only if necessary
3253 	if (currentRate != sampleRate)
3254 	{
3255 		result = ASIOSetSampleRate((ASIOSampleRate)sampleRate);
3256 		if (result != ASE_OK)
3257 		{
3258 			errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3259 			errorText_ = errorStream_.str();
3260 			goto error;
3261 		}
3262 	}
3263 
3264 	// Determine the driver data type.
3265 	ASIOChannelInfo channelInfo;
3266 	channelInfo.channel = 0;
3267 	if (mode == OUTPUT)
3268 		channelInfo.isInput = false;
3269 	else
3270 		channelInfo.isInput = true;
3271 	result = ASIOGetChannelInfo(&channelInfo);
3272 	if (result != ASE_OK)
3273 	{
3274 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting data format.";
3275 		errorText_ = errorStream_.str();
3276 		goto error;
3277 	}
3278 
3279 	// Assuming WINDOWS host is always little-endian.
3280 	stream_.doByteSwap[mode] = false;
3281 	stream_.userFormat = format;
3282 	stream_.deviceFormat[mode] = 0;
3283 	if (channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB)
3284 	{
3285 		stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3286 		if (channelInfo.type == ASIOSTInt16MSB) stream_.doByteSwap[mode] = true;
3287 	}
3288 	else if (channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB)
3289 	{
3290 		stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3291 		if (channelInfo.type == ASIOSTInt32MSB) stream_.doByteSwap[mode] = true;
3292 	}
3293 	else if (channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB)
3294 	{
3295 		stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3296 		if (channelInfo.type == ASIOSTFloat32MSB) stream_.doByteSwap[mode] = true;
3297 	}
3298 	else if (channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB)
3299 	{
3300 		stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3301 		if (channelInfo.type == ASIOSTFloat64MSB) stream_.doByteSwap[mode] = true;
3302 	}
3303 	else if (channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB)
3304 	{
3305 		stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3306 		if (channelInfo.type == ASIOSTInt24MSB) stream_.doByteSwap[mode] = true;
3307 	}
3308 
3309 	if (stream_.deviceFormat[mode] == 0)
3310 	{
3311 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3312 		errorText_ = errorStream_.str();
3313 		goto error;
3314 	}
3315 
3316 	// Set the buffer size.  For a duplex stream, this will end up
3317 	// setting the buffer size based on the input constraints, which
3318 	// should be ok.
3319 	long minSize, maxSize, preferSize, granularity;
3320 	result = ASIOGetBufferSize(&minSize, &maxSize, &preferSize, &granularity);
3321 	if (result != ASE_OK)
3322 	{
3323 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting buffer size.";
3324 		errorText_ = errorStream_.str();
3325 		goto error;
3326 	}
3327 
3328 	if (isDuplexInput)
3329 	{
3330 		// When this is the duplex input (output was opened before), then we have to use the same
3331 		// buffersize as the output, because it might use the preferred buffer size, which most
3332 		// likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3333 		// So instead of throwing an error, make them equal. The caller uses the reference
3334 		// to the "bufferSize" param as usual to set up processing buffers.
3335 
3336 		*bufferSize = stream_.bufferSize;
3337 	}
3338 	else
3339 	{
3340 		if (*bufferSize == 0)
3341 			*bufferSize = preferSize;
3342 		else if (*bufferSize < (unsigned int)minSize)
3343 			*bufferSize = (unsigned int)minSize;
3344 		else if (*bufferSize > (unsigned int)maxSize)
3345 			*bufferSize = (unsigned int)maxSize;
3346 		else if (granularity == -1)
3347 		{
3348 			// Make sure bufferSize is a power of two.
3349 			int log2_of_min_size = 0;
3350 			int log2_of_max_size = 0;
3351 
3352 			for (unsigned int i = 0; i < sizeof(long) * 8; i++)
3353 			{
3354 				if (minSize & ((long)1 << i)) log2_of_min_size = i;
3355 				if (maxSize & ((long)1 << i)) log2_of_max_size = i;
3356 			}
3357 
3358 			long min_delta = std::abs((long)*bufferSize - ((long)1 << log2_of_min_size));
3359 			int min_delta_num = log2_of_min_size;
3360 
3361 			for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++)
3362 			{
3363 				long current_delta = std::abs((long)*bufferSize - ((long)1 << i));
3364 				if (current_delta < min_delta)
3365 				{
3366 					min_delta = current_delta;
3367 					min_delta_num = i;
3368 				}
3369 			}
3370 
3371 			*bufferSize = ((unsigned int)1 << min_delta_num);
3372 			if (*bufferSize < (unsigned int)minSize)
3373 				*bufferSize = (unsigned int)minSize;
3374 			else if (*bufferSize > (unsigned int)maxSize)
3375 				*bufferSize = (unsigned int)maxSize;
3376 		}
3377 		else if (granularity != 0)
3378 		{
3379 			// Set to an even multiple of granularity, rounding up.
3380 			*bufferSize = (*bufferSize + granularity - 1) / granularity * granularity;
3381 		}
3382 	}
3383 
3384 	/*
3385   // we don't use it anymore, see above!
3386   // Just left it here for the case...
3387   if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3388     errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3389     goto error;
3390   }
3391   */
3392 
3393 	stream_.bufferSize = *bufferSize;
3394 	stream_.nBuffers = 2;
3395 
3396 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
3397 		stream_.userInterleaved = false;
3398 	else
3399 		stream_.userInterleaved = true;
3400 
3401 	// ASIO always uses non-interleaved buffers.
3402 	stream_.deviceInterleaved[mode] = false;
3403 
3404 	// Allocate, if necessary, our AsioHandle structure for the stream.
3405 	if (handle == 0)
3406 	{
3407 		try
3408 		{
3409 			handle = new AsioHandle;
3410 		}
3411 		catch (std::bad_alloc &)
3412 		{
3413 			errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3414 			goto error;
3415 		}
3416 		handle->bufferInfos = 0;
3417 
3418 		// Create a manual-reset event.
3419 		handle->condition = CreateEvent(NULL,   // no security
3420 										TRUE,   // manual-reset
3421 										FALSE,  // non-signaled initially
3422 										NULL);  // unnamed
3423 		stream_.apiHandle = (void *)handle;
3424 	}
3425 
3426 	// Create the ASIO internal buffers.  Since RtAudio sets up input
3427 	// and output separately, we'll have to dispose of previously
3428 	// created output buffers for a duplex stream.
3429 	if (mode == INPUT && stream_.mode == OUTPUT)
3430 	{
3431 		ASIODisposeBuffers();
3432 		if (handle->bufferInfos) free(handle->bufferInfos);
3433 	}
3434 
3435 	// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3436 	unsigned int i;
3437 	nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3438 	handle->bufferInfos = (ASIOBufferInfo *)malloc(nChannels * sizeof(ASIOBufferInfo));
3439 	if (handle->bufferInfos == NULL)
3440 	{
3441 		errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3442 		errorText_ = errorStream_.str();
3443 		goto error;
3444 	}
3445 
3446 	ASIOBufferInfo *infos;
3447 	infos = handle->bufferInfos;
3448 	for (i = 0; i < stream_.nDeviceChannels[0]; i++, infos++)
3449 	{
3450 		infos->isInput = ASIOFalse;
3451 		infos->channelNum = i + stream_.channelOffset[0];
3452 		infos->buffers[0] = infos->buffers[1] = 0;
3453 	}
3454 	for (i = 0; i < stream_.nDeviceChannels[1]; i++, infos++)
3455 	{
3456 		infos->isInput = ASIOTrue;
3457 		infos->channelNum = i + stream_.channelOffset[1];
3458 		infos->buffers[0] = infos->buffers[1] = 0;
3459 	}
3460 
3461 	// prepare for callbacks
3462 	stream_.sampleRate = sampleRate;
3463 	stream_.device[mode] = device;
3464 	stream_.mode = isDuplexInput ? DUPLEX : mode;
3465 
3466 	// store this class instance before registering callbacks, that are going to use it
3467 	asioCallbackInfo = &stream_.callbackInfo;
3468 	stream_.callbackInfo.object = (void *)this;
3469 
3470 	// Set up the ASIO callback structure and create the ASIO data buffers.
3471 	asioCallbacks.bufferSwitch = &bufferSwitch;
3472 	asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3473 	asioCallbacks.asioMessage = &asioMessages;
3474 	asioCallbacks.bufferSwitchTimeInfo = NULL;
3475 	result = ASIOCreateBuffers(handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks);
3476 	if (result != ASE_OK)
3477 	{
3478 		// Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3479 		// but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3480 		// in that case, let's be na�ve and try that instead
3481 		*bufferSize = preferSize;
3482 		stream_.bufferSize = *bufferSize;
3483 		result = ASIOCreateBuffers(handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks);
3484 	}
3485 
3486 	if (result != ASE_OK)
3487 	{
3488 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers.";
3489 		errorText_ = errorStream_.str();
3490 		goto error;
3491 	}
3492 	buffersAllocated = true;
3493 	stream_.state = STREAM_STOPPED;
3494 
3495 	// Set flags for buffer conversion.
3496 	stream_.doConvertBuffer[mode] = false;
3497 	if (stream_.userFormat != stream_.deviceFormat[mode])
3498 		stream_.doConvertBuffer[mode] = true;
3499 	if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3500 		stream_.nUserChannels[mode] > 1)
3501 		stream_.doConvertBuffer[mode] = true;
3502 
3503 	// Allocate necessary internal buffers
3504 	unsigned long bufferBytes;
3505 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
3506 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
3507 	if (stream_.userBuffer[mode] == NULL)
3508 	{
3509 		errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3510 		goto error;
3511 	}
3512 
3513 	if (stream_.doConvertBuffer[mode])
3514 	{
3515 		bool makeBuffer = true;
3516 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
3517 		if (isDuplexInput && stream_.deviceBuffer)
3518 		{
3519 			unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
3520 			if (bufferBytes <= bytesOut) makeBuffer = false;
3521 		}
3522 
3523 		if (makeBuffer)
3524 		{
3525 			bufferBytes *= *bufferSize;
3526 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
3527 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
3528 			if (stream_.deviceBuffer == NULL)
3529 			{
3530 				errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3531 				goto error;
3532 			}
3533 		}
3534 	}
3535 
3536 	// Determine device latencies
3537 	long inputLatency, outputLatency;
3538 	result = ASIOGetLatencies(&inputLatency, &outputLatency);
3539 	if (result != ASE_OK)
3540 	{
3541 		errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting latency.";
3542 		errorText_ = errorStream_.str();
3543 		error(RtAudioError::WARNING);  // warn but don't fail
3544 	}
3545 	else
3546 	{
3547 		stream_.latency[0] = outputLatency;
3548 		stream_.latency[1] = inputLatency;
3549 	}
3550 
3551 	// Setup the buffer conversion information structure.  We don't use
3552 	// buffers to do channel offsets, so we override that parameter
3553 	// here.
3554 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, 0);
3555 
3556 	return SUCCESS;
3557 
3558 error:
3559 	if (!isDuplexInput)
3560 	{
3561 		// the cleanup for error in the duplex input, is done by RtApi::openStream
3562 		// So we clean up for single channel only
3563 
3564 		if (buffersAllocated)
3565 			ASIODisposeBuffers();
3566 
3567 		drivers.removeCurrentDriver();
3568 
3569 		if (handle)
3570 		{
3571 			CloseHandle(handle->condition);
3572 			if (handle->bufferInfos)
3573 				free(handle->bufferInfos);
3574 
3575 			delete handle;
3576 			stream_.apiHandle = 0;
3577 		}
3578 
3579 		if (stream_.userBuffer[mode])
3580 		{
3581 			free(stream_.userBuffer[mode]);
3582 			stream_.userBuffer[mode] = 0;
3583 		}
3584 
3585 		if (stream_.deviceBuffer)
3586 		{
3587 			free(stream_.deviceBuffer);
3588 			stream_.deviceBuffer = 0;
3589 		}
3590 	}
3591 
3592 	return FAILURE;
3593 }  ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3594 
closeStream()3595 void RtApiAsio ::closeStream()
3596 {
3597 	if (stream_.state == STREAM_CLOSED)
3598 	{
3599 		errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3600 		error(RtAudioError::WARNING);
3601 		return;
3602 	}
3603 
3604 	if (stream_.state == STREAM_RUNNING)
3605 	{
3606 		stream_.state = STREAM_STOPPED;
3607 		ASIOStop();
3608 	}
3609 	ASIODisposeBuffers();
3610 	drivers.removeCurrentDriver();
3611 
3612 	AsioHandle *handle = (AsioHandle *)stream_.apiHandle;
3613 	if (handle)
3614 	{
3615 		CloseHandle(handle->condition);
3616 		if (handle->bufferInfos)
3617 			free(handle->bufferInfos);
3618 		delete handle;
3619 		stream_.apiHandle = 0;
3620 	}
3621 
3622 	for (int i = 0; i < 2; i++)
3623 	{
3624 		if (stream_.userBuffer[i])
3625 		{
3626 			free(stream_.userBuffer[i]);
3627 			stream_.userBuffer[i] = 0;
3628 		}
3629 	}
3630 
3631 	if (stream_.deviceBuffer)
3632 	{
3633 		free(stream_.deviceBuffer);
3634 		stream_.deviceBuffer = 0;
3635 	}
3636 
3637 	stream_.mode = UNINITIALIZED;
3638 	stream_.state = STREAM_CLOSED;
3639 }
3640 
3641 bool stopThreadCalled = false;
3642 
startStream()3643 void RtApiAsio ::startStream()
3644 {
3645 	verifyStream();
3646 	if (stream_.state == STREAM_RUNNING)
3647 	{
3648 		errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3649 		error(RtAudioError::WARNING);
3650 		return;
3651 	}
3652 
3653 	AsioHandle *handle = (AsioHandle *)stream_.apiHandle;
3654 	ASIOError result = ASIOStart();
3655 	if (result != ASE_OK)
3656 	{
3657 		errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString(result) << ") starting device.";
3658 		errorText_ = errorStream_.str();
3659 		goto unlock;
3660 	}
3661 
3662 	handle->drainCounter = 0;
3663 	handle->internalDrain = false;
3664 	ResetEvent(handle->condition);
3665 	stream_.state = STREAM_RUNNING;
3666 	asioXRun = false;
3667 
3668 unlock:
3669 	stopThreadCalled = false;
3670 
3671 	if (result == ASE_OK) return;
3672 	error(RtAudioError::SYSTEM_ERROR);
3673 }
3674 
stopStream()3675 void RtApiAsio ::stopStream()
3676 {
3677 	verifyStream();
3678 	if (stream_.state == STREAM_STOPPED)
3679 	{
3680 		errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3681 		error(RtAudioError::WARNING);
3682 		return;
3683 	}
3684 
3685 	AsioHandle *handle = (AsioHandle *)stream_.apiHandle;
3686 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
3687 	{
3688 		if (handle->drainCounter == 0)
3689 		{
3690 			handle->drainCounter = 2;
3691 			WaitForSingleObject(handle->condition, INFINITE);  // block until signaled
3692 		}
3693 	}
3694 
3695 	stream_.state = STREAM_STOPPED;
3696 
3697 	ASIOError result = ASIOStop();
3698 	if (result != ASE_OK)
3699 	{
3700 		errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString(result) << ") stopping device.";
3701 		errorText_ = errorStream_.str();
3702 	}
3703 
3704 	if (result == ASE_OK) return;
3705 	error(RtAudioError::SYSTEM_ERROR);
3706 }
3707 
abortStream()3708 void RtApiAsio ::abortStream()
3709 {
3710 	verifyStream();
3711 	if (stream_.state == STREAM_STOPPED)
3712 	{
3713 		errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3714 		error(RtAudioError::WARNING);
3715 		return;
3716 	}
3717 
3718 	// The following lines were commented-out because some behavior was
3719 	// noted where the device buffers need to be zeroed to avoid
3720 	// continuing sound, even when the device buffers are completely
3721 	// disposed.  So now, calling abort is the same as calling stop.
3722 	// AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3723 	// handle->drainCounter = 2;
3724 	stopStream();
3725 }
3726 
3727 // This function will be called by a spawned thread when the user
3728 // callback function signals that the stream should be stopped or
3729 // aborted.  It is necessary to handle it this way because the
3730 // callbackEvent() function must return before the ASIOStop()
3731 // function will return.
asioStopStream(void * ptr)3732 static unsigned __stdcall asioStopStream(void *ptr)
3733 {
3734 	CallbackInfo *info = (CallbackInfo *)ptr;
3735 	RtApiAsio *object = (RtApiAsio *)info->object;
3736 
3737 	object->stopStream();
3738 	_endthreadex(0);
3739 	return 0;
3740 }
3741 
callbackEvent(long bufferIndex)3742 bool RtApiAsio ::callbackEvent(long bufferIndex)
3743 {
3744 	if (stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING) return SUCCESS;
3745 	if (stream_.state == STREAM_CLOSED)
3746 	{
3747 		errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3748 		error(RtAudioError::WARNING);
3749 		return FAILURE;
3750 	}
3751 
3752 	CallbackInfo *info = (CallbackInfo *)&stream_.callbackInfo;
3753 	AsioHandle *handle = (AsioHandle *)stream_.apiHandle;
3754 
3755 	// Check if we were draining the stream and signal if finished.
3756 	if (handle->drainCounter > 3)
3757 	{
3758 		stream_.state = STREAM_STOPPING;
3759 		if (handle->internalDrain == false)
3760 			SetEvent(handle->condition);
3761 		else
3762 		{  // spawn a thread to stop the stream
3763 			unsigned threadId;
3764 			stream_.callbackInfo.thread = _beginthreadex(NULL, 0, &asioStopStream,
3765 														 &stream_.callbackInfo, 0, &threadId);
3766 		}
3767 		return SUCCESS;
3768 	}
3769 
3770 	// Invoke user callback to get fresh output data UNLESS we are
3771 	// draining stream.
3772 	if (handle->drainCounter == 0)
3773 	{
3774 		RtAudioCallback callback = (RtAudioCallback)info->callback;
3775 		double streamTime = getStreamTime();
3776 		RtAudioStreamStatus status = 0;
3777 		if (stream_.mode != INPUT && asioXRun == true)
3778 		{
3779 			status |= RTAUDIO_OUTPUT_UNDERFLOW;
3780 			asioXRun = false;
3781 		}
3782 		if (stream_.mode != OUTPUT && asioXRun == true)
3783 		{
3784 			status |= RTAUDIO_INPUT_OVERFLOW;
3785 			asioXRun = false;
3786 		}
3787 		int cbReturnValue = callback(stream_.userBuffer[0], stream_.userBuffer[1],
3788 									 stream_.bufferSize, streamTime, status, info->userData);
3789 		if (cbReturnValue == 2)
3790 		{
3791 			stream_.state = STREAM_STOPPING;
3792 			handle->drainCounter = 2;
3793 			unsigned threadId;
3794 			stream_.callbackInfo.thread = _beginthreadex(NULL, 0, &asioStopStream,
3795 														 &stream_.callbackInfo, 0, &threadId);
3796 			return SUCCESS;
3797 		}
3798 		else if (cbReturnValue == 1)
3799 		{
3800 			handle->drainCounter = 1;
3801 			handle->internalDrain = true;
3802 		}
3803 	}
3804 
3805 	unsigned int nChannels, bufferBytes, i, j;
3806 	nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3807 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
3808 	{
3809 		bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[0]);
3810 
3811 		if (handle->drainCounter > 1)
3812 		{  // write zeros to the output stream
3813 
3814 			for (i = 0, j = 0; i < nChannels; i++)
3815 			{
3816 				if (handle->bufferInfos[i].isInput != ASIOTrue)
3817 					memset(handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes);
3818 			}
3819 		}
3820 		else if (stream_.doConvertBuffer[0])
3821 		{
3822 			convertBuffer(stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0]);
3823 			if (stream_.doByteSwap[0])
3824 				byteSwapBuffer(stream_.deviceBuffer,
3825 							   stream_.bufferSize * stream_.nDeviceChannels[0],
3826 							   stream_.deviceFormat[0]);
3827 
3828 			for (i = 0, j = 0; i < nChannels; i++)
3829 			{
3830 				if (handle->bufferInfos[i].isInput != ASIOTrue)
3831 					memcpy(handle->bufferInfos[i].buffers[bufferIndex],
3832 						   &stream_.deviceBuffer[j++ * bufferBytes], bufferBytes);
3833 			}
3834 		}
3835 		else
3836 		{
3837 			if (stream_.doByteSwap[0])
3838 				byteSwapBuffer(stream_.userBuffer[0],
3839 							   stream_.bufferSize * stream_.nUserChannels[0],
3840 							   stream_.userFormat);
3841 
3842 			for (i = 0, j = 0; i < nChannels; i++)
3843 			{
3844 				if (handle->bufferInfos[i].isInput != ASIOTrue)
3845 					memcpy(handle->bufferInfos[i].buffers[bufferIndex],
3846 						   &stream_.userBuffer[0][bufferBytes * j++], bufferBytes);
3847 			}
3848 		}
3849 	}
3850 
3851 	// Don't bother draining input
3852 	if (handle->drainCounter)
3853 	{
3854 		handle->drainCounter++;
3855 		goto unlock;
3856 	}
3857 
3858 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
3859 	{
3860 		bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3861 
3862 		if (stream_.doConvertBuffer[1])
3863 		{
3864 			// Always interleave ASIO input data.
3865 			for (i = 0, j = 0; i < nChannels; i++)
3866 			{
3867 				if (handle->bufferInfos[i].isInput == ASIOTrue)
3868 					memcpy(&stream_.deviceBuffer[j++ * bufferBytes],
3869 						   handle->bufferInfos[i].buffers[bufferIndex],
3870 						   bufferBytes);
3871 			}
3872 
3873 			if (stream_.doByteSwap[1])
3874 				byteSwapBuffer(stream_.deviceBuffer,
3875 							   stream_.bufferSize * stream_.nDeviceChannels[1],
3876 							   stream_.deviceFormat[1]);
3877 			convertBuffer(stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1]);
3878 		}
3879 		else
3880 		{
3881 			for (i = 0, j = 0; i < nChannels; i++)
3882 			{
3883 				if (handle->bufferInfos[i].isInput == ASIOTrue)
3884 				{
3885 					memcpy(&stream_.userBuffer[1][bufferBytes * j++],
3886 						   handle->bufferInfos[i].buffers[bufferIndex],
3887 						   bufferBytes);
3888 				}
3889 			}
3890 
3891 			if (stream_.doByteSwap[1])
3892 				byteSwapBuffer(stream_.userBuffer[1],
3893 							   stream_.bufferSize * stream_.nUserChannels[1],
3894 							   stream_.userFormat);
3895 		}
3896 	}
3897 
3898 unlock:
3899 	// The following call was suggested by Malte Clasen.  While the API
3900 	// documentation indicates it should not be required, some device
3901 	// drivers apparently do not function correctly without it.
3902 	ASIOOutputReady();
3903 
3904 	RtApi::tickStreamTime();
3905 	return SUCCESS;
3906 }
3907 
sampleRateChanged(ASIOSampleRate sRate)3908 static void sampleRateChanged(ASIOSampleRate sRate)
3909 {
3910 	// The ASIO documentation says that this usually only happens during
3911 	// external sync.  Audio processing is not stopped by the driver,
3912 	// actual sample rate might not have even changed, maybe only the
3913 	// sample rate status of an AES/EBU or S/PDIF digital input at the
3914 	// audio device.
3915 
3916 	RtApi *object = (RtApi *)asioCallbackInfo->object;
3917 	try
3918 	{
3919 		object->stopStream();
3920 	}
3921 	catch (RtAudioError &exception)
3922 	{
3923 		std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n"
3924 				  << std::endl;
3925 		return;
3926 	}
3927 
3928 	std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n"
3929 			  << std::endl;
3930 }
3931 
asioMessages(long selector,long value,void *,double *)3932 static long asioMessages(long selector, long value, void * /*message*/, double * /*opt*/)
3933 {
3934 	long ret = 0;
3935 
3936 	switch (selector)
3937 	{
3938 		case kAsioSelectorSupported:
3939 			if (value == kAsioResetRequest || value == kAsioEngineVersion || value == kAsioResyncRequest || value == kAsioLatenciesChanged
3940 				// The following three were added for ASIO 2.0, you don't
3941 				// necessarily have to support them.
3942 				|| value == kAsioSupportsTimeInfo || value == kAsioSupportsTimeCode || value == kAsioSupportsInputMonitor)
3943 				ret = 1L;
3944 			break;
3945 		case kAsioResetRequest:
3946 			// Defer the task and perform the reset of the driver during the
3947 			// next "safe" situation.  You cannot reset the driver right now,
3948 			// as this code is called from the driver.  Reset the driver is
3949 			// done by completely destruct is. I.e. ASIOStop(),
3950 			// ASIODisposeBuffers(), Destruction Afterwards you initialize the
3951 			// driver again.
3952 			std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3953 			ret = 1L;
3954 			break;
3955 		case kAsioResyncRequest:
3956 			// This informs the application that the driver encountered some
3957 			// non-fatal data loss.  It is used for synchronization purposes
3958 			// of different media.  Added mainly to work around the Win16Mutex
3959 			// problems in Windows 95/98 with the Windows Multimedia system,
3960 			// which could lose data because the Mutex was held too long by
3961 			// another thread.  However a driver can issue it in other
3962 			// situations, too.
3963 			// std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3964 			asioXRun = true;
3965 			ret = 1L;
3966 			break;
3967 		case kAsioLatenciesChanged:
3968 			// This will inform the host application that the drivers were
3969 			// latencies changed.  Beware, it this does not mean that the
3970 			// buffer sizes have changed!  You might need to update internal
3971 			// delay data.
3972 			std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3973 			ret = 1L;
3974 			break;
3975 		case kAsioEngineVersion:
3976 			// Return the supported ASIO version of the host application.  If
3977 			// a host application does not implement this selector, ASIO 1.0
3978 			// is assumed by the driver.
3979 			ret = 2L;
3980 			break;
3981 		case kAsioSupportsTimeInfo:
3982 			// Informs the driver whether the
3983 			// asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3984 			// For compatibility with ASIO 1.0 drivers the host application
3985 			// should always support the "old" bufferSwitch method, too.
3986 			ret = 0;
3987 			break;
3988 		case kAsioSupportsTimeCode:
3989 			// Informs the driver whether application is interested in time
3990 			// code info.  If an application does not need to know about time
3991 			// code, the driver has less work to do.
3992 			ret = 0;
3993 			break;
3994 	}
3995 	return ret;
3996 }
3997 
getAsioErrorString(ASIOError result)3998 static const char *getAsioErrorString(ASIOError result)
3999 {
4000 	struct Messages
4001 	{
4002 		ASIOError value;
4003 		const char *message;
4004 	};
4005 
4006 	static const Messages m[] =
4007 		{
4008 			{ASE_NotPresent, "Hardware input or output is not present or available."},
4009 			{ASE_HWMalfunction, "Hardware is malfunctioning."},
4010 			{ASE_InvalidParameter, "Invalid input parameter."},
4011 			{ASE_InvalidMode, "Invalid mode."},
4012 			{ASE_SPNotAdvancing, "Sample position not advancing."},
4013 			{ASE_NoClock, "Sample clock or rate cannot be determined or is not present."},
4014 			{ASE_NoMemory, "Not enough memory to complete the request."}};
4015 
4016 	for (unsigned int i = 0; i < sizeof(m) / sizeof(m[0]); ++i)
4017 		if (m[i].value == result) return m[i].message;
4018 
4019 	return "Unknown error.";
4020 }
4021 
4022 //******************** End of __WINDOWS_ASIO__ *********************//
4023 #endif
4024 
4025 #if defined(__WINDOWS_WASAPI__)  // Windows WASAPI API
4026 
4027 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
4028 // - Introduces support for the Windows WASAPI API
4029 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
4030 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
4031 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
4032 
4033 #ifndef INITGUID
4034 #define INITGUID
4035 #endif
4036 #include <audioclient.h>
4037 #include <avrt.h>
4038 #include <mmdeviceapi.h>
4039 #include <functiondiscoverykeys_devpkey.h>
4040 
4041 //=============================================================================
4042 
4043 #define SAFE_RELEASE(objectPtr) \
4044 	if (objectPtr)              \
4045 	{                           \
4046 		objectPtr->Release();   \
4047 		objectPtr = NULL;       \
4048 	}
4049 
4050 typedef HANDLE(__stdcall *TAvSetMmThreadCharacteristicsPtr)(LPCWSTR TaskName, LPDWORD TaskIndex);
4051 
4052 //-----------------------------------------------------------------------------
4053 
4054 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
4055 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
4056 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
4057 // provide intermediate storage for read / write synchronization.
4058 class WasapiBuffer
4059 {
4060 public:
WasapiBuffer()4061 	WasapiBuffer()
4062 		: buffer_(NULL),
4063 		  bufferSize_(0),
4064 		  inIndex_(0),
4065 		  outIndex_(0) {}
4066 
~WasapiBuffer()4067 	~WasapiBuffer()
4068 	{
4069 		free(buffer_);
4070 	}
4071 
4072 	// sets the length of the internal ring buffer
setBufferSize(unsigned int bufferSize,unsigned int formatBytes)4073 	void setBufferSize(unsigned int bufferSize, unsigned int formatBytes)
4074 	{
4075 		free(buffer_);
4076 
4077 		buffer_ = (char *)calloc(bufferSize, formatBytes);
4078 
4079 		bufferSize_ = bufferSize;
4080 		inIndex_ = 0;
4081 		outIndex_ = 0;
4082 	}
4083 
4084 	// attempt to push a buffer into the ring buffer at the current "in" index
pushBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)4085 	bool pushBuffer(char *buffer, unsigned int bufferSize, RtAudioFormat format)
4086 	{
4087 		if (!buffer ||                 // incoming buffer is NULL
4088 			bufferSize == 0 ||         // incoming buffer has no data
4089 			bufferSize > bufferSize_)  // incoming buffer too large
4090 		{
4091 			return false;
4092 		}
4093 
4094 		unsigned int relOutIndex = outIndex_;
4095 		unsigned int inIndexEnd = inIndex_ + bufferSize;
4096 		if (relOutIndex < inIndex_ && inIndexEnd >= bufferSize_)
4097 		{
4098 			relOutIndex += bufferSize_;
4099 		}
4100 
4101 		// "in" index can end on the "out" index but cannot begin at it
4102 		if (inIndex_ <= relOutIndex && inIndexEnd > relOutIndex)
4103 		{
4104 			return false;  // not enough space between "in" index and "out" index
4105 		}
4106 
4107 		// copy buffer from external to internal
4108 		int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
4109 		fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
4110 		int fromInSize = bufferSize - fromZeroSize;
4111 
4112 		switch (format)
4113 		{
4114 			case RTAUDIO_SINT8:
4115 				memcpy(&((char *)buffer_)[inIndex_], buffer, fromInSize * sizeof(char));
4116 				memcpy(buffer_, &((char *)buffer)[fromInSize], fromZeroSize * sizeof(char));
4117 				break;
4118 			case RTAUDIO_SINT16:
4119 				memcpy(&((short *)buffer_)[inIndex_], buffer, fromInSize * sizeof(short));
4120 				memcpy(buffer_, &((short *)buffer)[fromInSize], fromZeroSize * sizeof(short));
4121 				break;
4122 			case RTAUDIO_SINT24:
4123 				memcpy(&((S24 *)buffer_)[inIndex_], buffer, fromInSize * sizeof(S24));
4124 				memcpy(buffer_, &((S24 *)buffer)[fromInSize], fromZeroSize * sizeof(S24));
4125 				break;
4126 			case RTAUDIO_SINT32:
4127 				memcpy(&((int *)buffer_)[inIndex_], buffer, fromInSize * sizeof(int));
4128 				memcpy(buffer_, &((int *)buffer)[fromInSize], fromZeroSize * sizeof(int));
4129 				break;
4130 			case RTAUDIO_FLOAT32:
4131 				memcpy(&((float *)buffer_)[inIndex_], buffer, fromInSize * sizeof(float));
4132 				memcpy(buffer_, &((float *)buffer)[fromInSize], fromZeroSize * sizeof(float));
4133 				break;
4134 			case RTAUDIO_FLOAT64:
4135 				memcpy(&((double *)buffer_)[inIndex_], buffer, fromInSize * sizeof(double));
4136 				memcpy(buffer_, &((double *)buffer)[fromInSize], fromZeroSize * sizeof(double));
4137 				break;
4138 		}
4139 
4140 		// update "in" index
4141 		inIndex_ += bufferSize;
4142 		inIndex_ %= bufferSize_;
4143 
4144 		return true;
4145 	}
4146 
4147 	// attempt to pull a buffer from the ring buffer from the current "out" index
pullBuffer(char * buffer,unsigned int bufferSize,RtAudioFormat format)4148 	bool pullBuffer(char *buffer, unsigned int bufferSize, RtAudioFormat format)
4149 	{
4150 		if (!buffer ||                 // incoming buffer is NULL
4151 			bufferSize == 0 ||         // incoming buffer has no data
4152 			bufferSize > bufferSize_)  // incoming buffer too large
4153 		{
4154 			return false;
4155 		}
4156 
4157 		unsigned int relInIndex = inIndex_;
4158 		unsigned int outIndexEnd = outIndex_ + bufferSize;
4159 		if (relInIndex < outIndex_ && outIndexEnd >= bufferSize_)
4160 		{
4161 			relInIndex += bufferSize_;
4162 		}
4163 
4164 		// "out" index can begin at and end on the "in" index
4165 		if (outIndex_ < relInIndex && outIndexEnd > relInIndex)
4166 		{
4167 			return false;  // not enough space between "out" index and "in" index
4168 		}
4169 
4170 		// copy buffer from internal to external
4171 		int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
4172 		fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
4173 		int fromOutSize = bufferSize - fromZeroSize;
4174 
4175 		switch (format)
4176 		{
4177 			case RTAUDIO_SINT8:
4178 				memcpy(buffer, &((char *)buffer_)[outIndex_], fromOutSize * sizeof(char));
4179 				memcpy(&((char *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(char));
4180 				break;
4181 			case RTAUDIO_SINT16:
4182 				memcpy(buffer, &((short *)buffer_)[outIndex_], fromOutSize * sizeof(short));
4183 				memcpy(&((short *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(short));
4184 				break;
4185 			case RTAUDIO_SINT24:
4186 				memcpy(buffer, &((S24 *)buffer_)[outIndex_], fromOutSize * sizeof(S24));
4187 				memcpy(&((S24 *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(S24));
4188 				break;
4189 			case RTAUDIO_SINT32:
4190 				memcpy(buffer, &((int *)buffer_)[outIndex_], fromOutSize * sizeof(int));
4191 				memcpy(&((int *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(int));
4192 				break;
4193 			case RTAUDIO_FLOAT32:
4194 				memcpy(buffer, &((float *)buffer_)[outIndex_], fromOutSize * sizeof(float));
4195 				memcpy(&((float *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(float));
4196 				break;
4197 			case RTAUDIO_FLOAT64:
4198 				memcpy(buffer, &((double *)buffer_)[outIndex_], fromOutSize * sizeof(double));
4199 				memcpy(&((double *)buffer)[fromOutSize], buffer_, fromZeroSize * sizeof(double));
4200 				break;
4201 		}
4202 
4203 		// update "out" index
4204 		outIndex_ += bufferSize;
4205 		outIndex_ %= bufferSize_;
4206 
4207 		return true;
4208 	}
4209 
4210 private:
4211 	char *buffer_;
4212 	unsigned int bufferSize_;
4213 	unsigned int inIndex_;
4214 	unsigned int outIndex_;
4215 };
4216 
4217 //-----------------------------------------------------------------------------
4218 
4219 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4220 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
4221 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4222 // This sample rate converter favors speed over quality, and works best with conversions between
4223 // one rate and its multiple.
convertBufferWasapi(char * outBuffer,const char * inBuffer,const unsigned int & channelCount,const unsigned int & inSampleRate,const unsigned int & outSampleRate,const unsigned int & inSampleCount,unsigned int & outSampleCount,const RtAudioFormat & format)4224 void convertBufferWasapi(char *outBuffer,
4225 						 const char *inBuffer,
4226 						 const unsigned int &channelCount,
4227 						 const unsigned int &inSampleRate,
4228 						 const unsigned int &outSampleRate,
4229 						 const unsigned int &inSampleCount,
4230 						 unsigned int &outSampleCount,
4231 						 const RtAudioFormat &format)
4232 {
4233 	// calculate the new outSampleCount and relative sampleStep
4234 	float sampleRatio = (float)outSampleRate / inSampleRate;
4235 	float sampleStep = 1.0f / sampleRatio;
4236 	float inSampleFraction = 0.0f;
4237 
4238 	outSampleCount = (unsigned int)roundf(inSampleCount * sampleRatio);
4239 
4240 	// frame-by-frame, copy each relative input sample into it's corresponding output sample
4241 	for (unsigned int outSample = 0; outSample < outSampleCount; outSample++)
4242 	{
4243 		unsigned int inSample = (unsigned int)inSampleFraction;
4244 
4245 		switch (format)
4246 		{
4247 			case RTAUDIO_SINT8:
4248 				memcpy(&((char *)outBuffer)[outSample * channelCount], &((char *)inBuffer)[inSample * channelCount], channelCount * sizeof(char));
4249 				break;
4250 			case RTAUDIO_SINT16:
4251 				memcpy(&((short *)outBuffer)[outSample * channelCount], &((short *)inBuffer)[inSample * channelCount], channelCount * sizeof(short));
4252 				break;
4253 			case RTAUDIO_SINT24:
4254 				memcpy(&((S24 *)outBuffer)[outSample * channelCount], &((S24 *)inBuffer)[inSample * channelCount], channelCount * sizeof(S24));
4255 				break;
4256 			case RTAUDIO_SINT32:
4257 				memcpy(&((int *)outBuffer)[outSample * channelCount], &((int *)inBuffer)[inSample * channelCount], channelCount * sizeof(int));
4258 				break;
4259 			case RTAUDIO_FLOAT32:
4260 				memcpy(&((float *)outBuffer)[outSample * channelCount], &((float *)inBuffer)[inSample * channelCount], channelCount * sizeof(float));
4261 				break;
4262 			case RTAUDIO_FLOAT64:
4263 				memcpy(&((double *)outBuffer)[outSample * channelCount], &((double *)inBuffer)[inSample * channelCount], channelCount * sizeof(double));
4264 				break;
4265 		}
4266 
4267 		// jump to next in sample
4268 		inSampleFraction += sampleStep;
4269 	}
4270 }
4271 
4272 //-----------------------------------------------------------------------------
4273 
4274 // A structure to hold various information related to the WASAPI implementation.
4275 struct WasapiHandle
4276 {
4277 	IAudioClient *captureAudioClient;
4278 	IAudioClient *renderAudioClient;
4279 	IAudioCaptureClient *captureClient;
4280 	IAudioRenderClient *renderClient;
4281 	HANDLE captureEvent;
4282 	HANDLE renderEvent;
4283 
WasapiHandleWasapiHandle4284 	WasapiHandle()
4285 		: captureAudioClient(NULL),
4286 		  renderAudioClient(NULL),
4287 		  captureClient(NULL),
4288 		  renderClient(NULL),
4289 		  captureEvent(NULL),
4290 		  renderEvent(NULL) {}
4291 };
4292 
4293 //=============================================================================
4294 
RtApiWasapi()4295 RtApiWasapi::RtApiWasapi()
4296 	: coInitialized_(false), deviceEnumerator_(NULL)
4297 {
4298 	// WASAPI can run either apartment or multi-threaded
4299 	HRESULT hr = CoInitialize(NULL);
4300 	if (!FAILED(hr))
4301 		coInitialized_ = true;
4302 
4303 	// Instantiate device enumerator
4304 	hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL,
4305 						  CLSCTX_ALL, __uuidof(IMMDeviceEnumerator),
4306 						  (void **)&deviceEnumerator_);
4307 
4308 	if (FAILED(hr))
4309 	{
4310 		errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4311 		error(RtAudioError::DRIVER_ERROR);
4312 	}
4313 }
4314 
4315 //-----------------------------------------------------------------------------
4316 
~RtApiWasapi()4317 RtApiWasapi::~RtApiWasapi()
4318 {
4319 	if (stream_.state != STREAM_CLOSED)
4320 		closeStream();
4321 
4322 	SAFE_RELEASE(deviceEnumerator_);
4323 
4324 	// If this object previously called CoInitialize()
4325 	if (coInitialized_)
4326 		CoUninitialize();
4327 }
4328 
4329 //=============================================================================
4330 
getDeviceCount(void)4331 unsigned int RtApiWasapi::getDeviceCount(void)
4332 {
4333 	unsigned int captureDeviceCount = 0;
4334 	unsigned int renderDeviceCount = 0;
4335 
4336 	IMMDeviceCollection *captureDevices = NULL;
4337 	IMMDeviceCollection *renderDevices = NULL;
4338 
4339 	// Count capture devices
4340 	errorText_.clear();
4341 	HRESULT hr = deviceEnumerator_->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &captureDevices);
4342 	if (FAILED(hr))
4343 	{
4344 		errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4345 		goto Exit;
4346 	}
4347 
4348 	hr = captureDevices->GetCount(&captureDeviceCount);
4349 	if (FAILED(hr))
4350 	{
4351 		errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4352 		goto Exit;
4353 	}
4354 
4355 	// Count render devices
4356 	hr = deviceEnumerator_->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &renderDevices);
4357 	if (FAILED(hr))
4358 	{
4359 		errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4360 		goto Exit;
4361 	}
4362 
4363 	hr = renderDevices->GetCount(&renderDeviceCount);
4364 	if (FAILED(hr))
4365 	{
4366 		errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4367 		goto Exit;
4368 	}
4369 
4370 Exit:
4371 	// release all references
4372 	SAFE_RELEASE(captureDevices);
4373 	SAFE_RELEASE(renderDevices);
4374 
4375 	if (errorText_.empty())
4376 		return captureDeviceCount + renderDeviceCount;
4377 
4378 	error(RtAudioError::DRIVER_ERROR);
4379 	return 0;
4380 }
4381 
4382 //-----------------------------------------------------------------------------
4383 
getDeviceInfo(unsigned int device)4384 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo(unsigned int device)
4385 {
4386 	RtAudio::DeviceInfo info;
4387 	unsigned int captureDeviceCount = 0;
4388 	unsigned int renderDeviceCount = 0;
4389 	std::string defaultDeviceName;
4390 	bool isCaptureDevice = false;
4391 
4392 	PROPVARIANT deviceNameProp;
4393 	PROPVARIANT defaultDeviceNameProp;
4394 
4395 	IMMDeviceCollection *captureDevices = NULL;
4396 	IMMDeviceCollection *renderDevices = NULL;
4397 	IMMDevice *devicePtr = NULL;
4398 	IMMDevice *defaultDevicePtr = NULL;
4399 	IAudioClient *audioClient = NULL;
4400 	IPropertyStore *devicePropStore = NULL;
4401 	IPropertyStore *defaultDevicePropStore = NULL;
4402 
4403 	WAVEFORMATEX *deviceFormat = NULL;
4404 	WAVEFORMATEX *closestMatchFormat = NULL;
4405 
4406 	// probed
4407 	info.probed = false;
4408 
4409 	// Count capture devices
4410 	errorText_.clear();
4411 	RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4412 	HRESULT hr = deviceEnumerator_->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &captureDevices);
4413 	if (FAILED(hr))
4414 	{
4415 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4416 		goto Exit;
4417 	}
4418 
4419 	hr = captureDevices->GetCount(&captureDeviceCount);
4420 	if (FAILED(hr))
4421 	{
4422 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4423 		goto Exit;
4424 	}
4425 
4426 	// Count render devices
4427 	hr = deviceEnumerator_->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &renderDevices);
4428 	if (FAILED(hr))
4429 	{
4430 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4431 		goto Exit;
4432 	}
4433 
4434 	hr = renderDevices->GetCount(&renderDeviceCount);
4435 	if (FAILED(hr))
4436 	{
4437 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4438 		goto Exit;
4439 	}
4440 
4441 	// validate device index
4442 	if (device >= captureDeviceCount + renderDeviceCount)
4443 	{
4444 		errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4445 		errorType = RtAudioError::INVALID_USE;
4446 		goto Exit;
4447 	}
4448 
4449 	// determine whether index falls within capture or render devices
4450 	if (device >= renderDeviceCount)
4451 	{
4452 		hr = captureDevices->Item(device - renderDeviceCount, &devicePtr);
4453 		if (FAILED(hr))
4454 		{
4455 			errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4456 			goto Exit;
4457 		}
4458 		isCaptureDevice = true;
4459 	}
4460 	else
4461 	{
4462 		hr = renderDevices->Item(device, &devicePtr);
4463 		if (FAILED(hr))
4464 		{
4465 			errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4466 			goto Exit;
4467 		}
4468 		isCaptureDevice = false;
4469 	}
4470 
4471 	// get default device name
4472 	if (isCaptureDevice)
4473 	{
4474 		hr = deviceEnumerator_->GetDefaultAudioEndpoint(eCapture, eConsole, &defaultDevicePtr);
4475 		if (FAILED(hr))
4476 		{
4477 			errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4478 			goto Exit;
4479 		}
4480 	}
4481 	else
4482 	{
4483 		hr = deviceEnumerator_->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevicePtr);
4484 		if (FAILED(hr))
4485 		{
4486 			errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4487 			goto Exit;
4488 		}
4489 	}
4490 
4491 	hr = defaultDevicePtr->OpenPropertyStore(STGM_READ, &defaultDevicePropStore);
4492 	if (FAILED(hr))
4493 	{
4494 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4495 		goto Exit;
4496 	}
4497 	PropVariantInit(&defaultDeviceNameProp);
4498 
4499 	hr = defaultDevicePropStore->GetValue(PKEY_Device_FriendlyName, &defaultDeviceNameProp);
4500 	if (FAILED(hr))
4501 	{
4502 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4503 		goto Exit;
4504 	}
4505 
4506 	defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4507 
4508 	// name
4509 	hr = devicePtr->OpenPropertyStore(STGM_READ, &devicePropStore);
4510 	if (FAILED(hr))
4511 	{
4512 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4513 		goto Exit;
4514 	}
4515 
4516 	PropVariantInit(&deviceNameProp);
4517 
4518 	hr = devicePropStore->GetValue(PKEY_Device_FriendlyName, &deviceNameProp);
4519 	if (FAILED(hr))
4520 	{
4521 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4522 		goto Exit;
4523 	}
4524 
4525 	info.name = convertCharPointerToStdString(deviceNameProp.pwszVal);
4526 
4527 	// is default
4528 	if (isCaptureDevice)
4529 	{
4530 		info.isDefaultInput = info.name == defaultDeviceName;
4531 		info.isDefaultOutput = false;
4532 	}
4533 	else
4534 	{
4535 		info.isDefaultInput = false;
4536 		info.isDefaultOutput = info.name == defaultDeviceName;
4537 	}
4538 
4539 	// channel count
4540 	hr = devicePtr->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void **)&audioClient);
4541 	if (FAILED(hr))
4542 	{
4543 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4544 		goto Exit;
4545 	}
4546 
4547 	hr = audioClient->GetMixFormat(&deviceFormat);
4548 	if (FAILED(hr))
4549 	{
4550 		errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4551 		goto Exit;
4552 	}
4553 
4554 	if (isCaptureDevice)
4555 	{
4556 		info.inputChannels = deviceFormat->nChannels;
4557 		info.outputChannels = 0;
4558 		info.duplexChannels = 0;
4559 	}
4560 	else
4561 	{
4562 		info.inputChannels = 0;
4563 		info.outputChannels = deviceFormat->nChannels;
4564 		info.duplexChannels = 0;
4565 	}
4566 
4567 	// sample rates
4568 	info.sampleRates.clear();
4569 
4570 	// allow support for all sample rates as we have a built-in sample rate converter
4571 	for (unsigned int i = 0; i < MAX_SAMPLE_RATES; i++)
4572 	{
4573 		info.sampleRates.push_back(SAMPLE_RATES[i]);
4574 	}
4575 	info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4576 
4577 	// native format
4578 	info.nativeFormats = 0;
4579 
4580 	if (deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4581 		(deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4582 		 ((WAVEFORMATEXTENSIBLE *)deviceFormat)->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
4583 	{
4584 		if (deviceFormat->wBitsPerSample == 32)
4585 		{
4586 			info.nativeFormats |= RTAUDIO_FLOAT32;
4587 		}
4588 		else if (deviceFormat->wBitsPerSample == 64)
4589 		{
4590 			info.nativeFormats |= RTAUDIO_FLOAT64;
4591 		}
4592 	}
4593 	else if (deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4594 			 (deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4595 			  ((WAVEFORMATEXTENSIBLE *)deviceFormat)->SubFormat == KSDATAFORMAT_SUBTYPE_PCM))
4596 	{
4597 		if (deviceFormat->wBitsPerSample == 8)
4598 		{
4599 			info.nativeFormats |= RTAUDIO_SINT8;
4600 		}
4601 		else if (deviceFormat->wBitsPerSample == 16)
4602 		{
4603 			info.nativeFormats |= RTAUDIO_SINT16;
4604 		}
4605 		else if (deviceFormat->wBitsPerSample == 24)
4606 		{
4607 			info.nativeFormats |= RTAUDIO_SINT24;
4608 		}
4609 		else if (deviceFormat->wBitsPerSample == 32)
4610 		{
4611 			info.nativeFormats |= RTAUDIO_SINT32;
4612 		}
4613 	}
4614 
4615 	// probed
4616 	info.probed = true;
4617 
4618 Exit:
4619 	// release all references
4620 	PropVariantClear(&deviceNameProp);
4621 	PropVariantClear(&defaultDeviceNameProp);
4622 
4623 	SAFE_RELEASE(captureDevices);
4624 	SAFE_RELEASE(renderDevices);
4625 	SAFE_RELEASE(devicePtr);
4626 	SAFE_RELEASE(defaultDevicePtr);
4627 	SAFE_RELEASE(audioClient);
4628 	SAFE_RELEASE(devicePropStore);
4629 	SAFE_RELEASE(defaultDevicePropStore);
4630 
4631 	CoTaskMemFree(deviceFormat);
4632 	CoTaskMemFree(closestMatchFormat);
4633 
4634 	if (!errorText_.empty())
4635 		error(errorType);
4636 	return info;
4637 }
4638 
4639 //-----------------------------------------------------------------------------
4640 
getDefaultOutputDevice(void)4641 unsigned int RtApiWasapi::getDefaultOutputDevice(void)
4642 {
4643 	for (unsigned int i = 0; i < getDeviceCount(); i++)
4644 	{
4645 		if (getDeviceInfo(i).isDefaultOutput)
4646 		{
4647 			return i;
4648 		}
4649 	}
4650 
4651 	return 0;
4652 }
4653 
4654 //-----------------------------------------------------------------------------
4655 
getDefaultInputDevice(void)4656 unsigned int RtApiWasapi::getDefaultInputDevice(void)
4657 {
4658 	for (unsigned int i = 0; i < getDeviceCount(); i++)
4659 	{
4660 		if (getDeviceInfo(i).isDefaultInput)
4661 		{
4662 			return i;
4663 		}
4664 	}
4665 
4666 	return 0;
4667 }
4668 
4669 //-----------------------------------------------------------------------------
4670 
closeStream(void)4671 void RtApiWasapi::closeStream(void)
4672 {
4673 	if (stream_.state == STREAM_CLOSED)
4674 	{
4675 		errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4676 		error(RtAudioError::WARNING);
4677 		return;
4678 	}
4679 
4680 	if (stream_.state != STREAM_STOPPED)
4681 		stopStream();
4682 
4683 	// clean up stream memory
4684 	SAFE_RELEASE(((WasapiHandle *)stream_.apiHandle)->captureAudioClient)
4685 	SAFE_RELEASE(((WasapiHandle *)stream_.apiHandle)->renderAudioClient)
4686 
4687 	SAFE_RELEASE(((WasapiHandle *)stream_.apiHandle)->captureClient)
4688 	SAFE_RELEASE(((WasapiHandle *)stream_.apiHandle)->renderClient)
4689 
4690 	if (((WasapiHandle *)stream_.apiHandle)->captureEvent)
4691 		CloseHandle(((WasapiHandle *)stream_.apiHandle)->captureEvent);
4692 
4693 	if (((WasapiHandle *)stream_.apiHandle)->renderEvent)
4694 		CloseHandle(((WasapiHandle *)stream_.apiHandle)->renderEvent);
4695 
4696 	delete (WasapiHandle *)stream_.apiHandle;
4697 	stream_.apiHandle = NULL;
4698 
4699 	for (int i = 0; i < 2; i++)
4700 	{
4701 		if (stream_.userBuffer[i])
4702 		{
4703 			free(stream_.userBuffer[i]);
4704 			stream_.userBuffer[i] = 0;
4705 		}
4706 	}
4707 
4708 	if (stream_.deviceBuffer)
4709 	{
4710 		free(stream_.deviceBuffer);
4711 		stream_.deviceBuffer = 0;
4712 	}
4713 
4714 	// update stream state
4715 	stream_.state = STREAM_CLOSED;
4716 }
4717 
4718 //-----------------------------------------------------------------------------
4719 
startStream(void)4720 void RtApiWasapi::startStream(void)
4721 {
4722 	verifyStream();
4723 
4724 	if (stream_.state == STREAM_RUNNING)
4725 	{
4726 		errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4727 		error(RtAudioError::WARNING);
4728 		return;
4729 	}
4730 
4731 	// update stream state
4732 	stream_.state = STREAM_RUNNING;
4733 
4734 	// create WASAPI stream thread
4735 	stream_.callbackInfo.thread = (ThreadHandle)CreateThread(NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL);
4736 
4737 	if (!stream_.callbackInfo.thread)
4738 	{
4739 		errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4740 		error(RtAudioError::THREAD_ERROR);
4741 	}
4742 	else
4743 	{
4744 		SetThreadPriority((void *)stream_.callbackInfo.thread, stream_.callbackInfo.priority);
4745 		ResumeThread((void *)stream_.callbackInfo.thread);
4746 	}
4747 }
4748 
4749 //-----------------------------------------------------------------------------
4750 
stopStream(void)4751 void RtApiWasapi::stopStream(void)
4752 {
4753 	verifyStream();
4754 
4755 	if (stream_.state == STREAM_STOPPED)
4756 	{
4757 		errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4758 		error(RtAudioError::WARNING);
4759 		return;
4760 	}
4761 
4762 	// inform stream thread by setting stream state to STREAM_STOPPING
4763 	stream_.state = STREAM_STOPPING;
4764 
4765 	// wait until stream thread is stopped
4766 	while (stream_.state != STREAM_STOPPED)
4767 	{
4768 		Sleep(1);
4769 	}
4770 
4771 	// Wait for the last buffer to play before stopping.
4772 	Sleep(1000 * stream_.bufferSize / stream_.sampleRate);
4773 
4774 	// stop capture client if applicable
4775 	if (((WasapiHandle *)stream_.apiHandle)->captureAudioClient)
4776 	{
4777 		HRESULT hr = ((WasapiHandle *)stream_.apiHandle)->captureAudioClient->Stop();
4778 		if (FAILED(hr))
4779 		{
4780 			errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4781 			error(RtAudioError::DRIVER_ERROR);
4782 			return;
4783 		}
4784 	}
4785 
4786 	// stop render client if applicable
4787 	if (((WasapiHandle *)stream_.apiHandle)->renderAudioClient)
4788 	{
4789 		HRESULT hr = ((WasapiHandle *)stream_.apiHandle)->renderAudioClient->Stop();
4790 		if (FAILED(hr))
4791 		{
4792 			errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4793 			error(RtAudioError::DRIVER_ERROR);
4794 			return;
4795 		}
4796 	}
4797 
4798 	// close thread handle
4799 	if (stream_.callbackInfo.thread && !CloseHandle((void *)stream_.callbackInfo.thread))
4800 	{
4801 		errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4802 		error(RtAudioError::THREAD_ERROR);
4803 		return;
4804 	}
4805 
4806 	stream_.callbackInfo.thread = (ThreadHandle)NULL;
4807 }
4808 
4809 //-----------------------------------------------------------------------------
4810 
abortStream(void)4811 void RtApiWasapi::abortStream(void)
4812 {
4813 	verifyStream();
4814 
4815 	if (stream_.state == STREAM_STOPPED)
4816 	{
4817 		errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4818 		error(RtAudioError::WARNING);
4819 		return;
4820 	}
4821 
4822 	// inform stream thread by setting stream state to STREAM_STOPPING
4823 	stream_.state = STREAM_STOPPING;
4824 
4825 	// wait until stream thread is stopped
4826 	while (stream_.state != STREAM_STOPPED)
4827 	{
4828 		Sleep(1);
4829 	}
4830 
4831 	// stop capture client if applicable
4832 	if (((WasapiHandle *)stream_.apiHandle)->captureAudioClient)
4833 	{
4834 		HRESULT hr = ((WasapiHandle *)stream_.apiHandle)->captureAudioClient->Stop();
4835 		if (FAILED(hr))
4836 		{
4837 			errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4838 			error(RtAudioError::DRIVER_ERROR);
4839 			return;
4840 		}
4841 	}
4842 
4843 	// stop render client if applicable
4844 	if (((WasapiHandle *)stream_.apiHandle)->renderAudioClient)
4845 	{
4846 		HRESULT hr = ((WasapiHandle *)stream_.apiHandle)->renderAudioClient->Stop();
4847 		if (FAILED(hr))
4848 		{
4849 			errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4850 			error(RtAudioError::DRIVER_ERROR);
4851 			return;
4852 		}
4853 	}
4854 
4855 	// close thread handle
4856 	if (stream_.callbackInfo.thread && !CloseHandle((void *)stream_.callbackInfo.thread))
4857 	{
4858 		errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4859 		error(RtAudioError::THREAD_ERROR);
4860 		return;
4861 	}
4862 
4863 	stream_.callbackInfo.thread = (ThreadHandle)NULL;
4864 }
4865 
4866 //-----------------------------------------------------------------------------
4867 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)4868 bool RtApiWasapi::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
4869 								  unsigned int firstChannel, unsigned int sampleRate,
4870 								  RtAudioFormat format, unsigned int *bufferSize,
4871 								  RtAudio::StreamOptions *options)
4872 {
4873 	bool methodResult = FAILURE;
4874 	unsigned int captureDeviceCount = 0;
4875 	unsigned int renderDeviceCount = 0;
4876 
4877 	IMMDeviceCollection *captureDevices = NULL;
4878 	IMMDeviceCollection *renderDevices = NULL;
4879 	IMMDevice *devicePtr = NULL;
4880 	WAVEFORMATEX *deviceFormat = NULL;
4881 	unsigned int bufferBytes;
4882 	stream_.state = STREAM_STOPPED;
4883 
4884 	// create API Handle if not already created
4885 	if (!stream_.apiHandle)
4886 		stream_.apiHandle = (void *)new WasapiHandle();
4887 
4888 	// Count capture devices
4889 	errorText_.clear();
4890 	RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4891 	HRESULT hr = deviceEnumerator_->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &captureDevices);
4892 	if (FAILED(hr))
4893 	{
4894 		errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4895 		goto Exit;
4896 	}
4897 
4898 	hr = captureDevices->GetCount(&captureDeviceCount);
4899 	if (FAILED(hr))
4900 	{
4901 		errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4902 		goto Exit;
4903 	}
4904 
4905 	// Count render devices
4906 	hr = deviceEnumerator_->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &renderDevices);
4907 	if (FAILED(hr))
4908 	{
4909 		errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4910 		goto Exit;
4911 	}
4912 
4913 	hr = renderDevices->GetCount(&renderDeviceCount);
4914 	if (FAILED(hr))
4915 	{
4916 		errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4917 		goto Exit;
4918 	}
4919 
4920 	// validate device index
4921 	if (device >= captureDeviceCount + renderDeviceCount)
4922 	{
4923 		errorType = RtAudioError::INVALID_USE;
4924 		errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4925 		goto Exit;
4926 	}
4927 
4928 	// determine whether index falls within capture or render devices
4929 	if (device >= renderDeviceCount)
4930 	{
4931 		if (mode != INPUT)
4932 		{
4933 			errorType = RtAudioError::INVALID_USE;
4934 			errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4935 			goto Exit;
4936 		}
4937 
4938 		// retrieve captureAudioClient from devicePtr
4939 		IAudioClient *&captureAudioClient = ((WasapiHandle *)stream_.apiHandle)->captureAudioClient;
4940 
4941 		hr = captureDevices->Item(device - renderDeviceCount, &devicePtr);
4942 		if (FAILED(hr))
4943 		{
4944 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4945 			goto Exit;
4946 		}
4947 
4948 		hr = devicePtr->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
4949 								 NULL, (void **)&captureAudioClient);
4950 		if (FAILED(hr))
4951 		{
4952 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4953 			goto Exit;
4954 		}
4955 
4956 		hr = captureAudioClient->GetMixFormat(&deviceFormat);
4957 		if (FAILED(hr))
4958 		{
4959 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4960 			goto Exit;
4961 		}
4962 
4963 		stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4964 		captureAudioClient->GetStreamLatency((long long *)&stream_.latency[mode]);
4965 	}
4966 	else
4967 	{
4968 		if (mode != OUTPUT)
4969 		{
4970 			errorType = RtAudioError::INVALID_USE;
4971 			errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4972 			goto Exit;
4973 		}
4974 
4975 		// retrieve renderAudioClient from devicePtr
4976 		IAudioClient *&renderAudioClient = ((WasapiHandle *)stream_.apiHandle)->renderAudioClient;
4977 
4978 		hr = renderDevices->Item(device, &devicePtr);
4979 		if (FAILED(hr))
4980 		{
4981 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4982 			goto Exit;
4983 		}
4984 
4985 		hr = devicePtr->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
4986 								 NULL, (void **)&renderAudioClient);
4987 		if (FAILED(hr))
4988 		{
4989 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4990 			goto Exit;
4991 		}
4992 
4993 		hr = renderAudioClient->GetMixFormat(&deviceFormat);
4994 		if (FAILED(hr))
4995 		{
4996 			errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4997 			goto Exit;
4998 		}
4999 
5000 		stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
5001 		renderAudioClient->GetStreamLatency((long long *)&stream_.latency[mode]);
5002 	}
5003 
5004 	// fill stream data
5005 	if ((stream_.mode == OUTPUT && mode == INPUT) ||
5006 		(stream_.mode == INPUT && mode == OUTPUT))
5007 	{
5008 		stream_.mode = DUPLEX;
5009 	}
5010 	else
5011 	{
5012 		stream_.mode = mode;
5013 	}
5014 
5015 	stream_.device[mode] = device;
5016 	stream_.doByteSwap[mode] = false;
5017 	stream_.sampleRate = sampleRate;
5018 	stream_.bufferSize = *bufferSize;
5019 	stream_.nBuffers = 1;
5020 	stream_.nUserChannels[mode] = channels;
5021 	stream_.channelOffset[mode] = firstChannel;
5022 	stream_.userFormat = format;
5023 	stream_.deviceFormat[mode] = getDeviceInfo(device).nativeFormats;
5024 
5025 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
5026 		stream_.userInterleaved = false;
5027 	else
5028 		stream_.userInterleaved = true;
5029 	stream_.deviceInterleaved[mode] = true;
5030 
5031 	// Set flags for buffer conversion.
5032 	stream_.doConvertBuffer[mode] = false;
5033 	if (stream_.userFormat != stream_.deviceFormat[mode] ||
5034 		stream_.nUserChannels != stream_.nDeviceChannels)
5035 		stream_.doConvertBuffer[mode] = true;
5036 	else if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5037 			 stream_.nUserChannels[mode] > 1)
5038 		stream_.doConvertBuffer[mode] = true;
5039 
5040 	if (stream_.doConvertBuffer[mode])
5041 		setConvertInfo(mode, 0);
5042 
5043 	// Allocate necessary internal buffers
5044 	bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes(stream_.userFormat);
5045 
5046 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
5047 	if (!stream_.userBuffer[mode])
5048 	{
5049 		errorType = RtAudioError::MEMORY_ERROR;
5050 		errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
5051 		goto Exit;
5052 	}
5053 
5054 	if (options && options->flags & RTAUDIO_SCHEDULE_REALTIME)
5055 		stream_.callbackInfo.priority = 15;
5056 	else
5057 		stream_.callbackInfo.priority = 0;
5058 
5059 	///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
5060 	///! TODO: RTAUDIO_HOG_DEVICE       // Exclusive mode
5061 
5062 	methodResult = SUCCESS;
5063 
5064 Exit:
5065 	//clean up
5066 	SAFE_RELEASE(captureDevices);
5067 	SAFE_RELEASE(renderDevices);
5068 	SAFE_RELEASE(devicePtr);
5069 	CoTaskMemFree(deviceFormat);
5070 
5071 	// if method failed, close the stream
5072 	if (methodResult == FAILURE)
5073 		closeStream();
5074 
5075 	if (!errorText_.empty())
5076 		error(errorType);
5077 	return methodResult;
5078 }
5079 
5080 //=============================================================================
5081 
runWasapiThread(void * wasapiPtr)5082 DWORD WINAPI RtApiWasapi::runWasapiThread(void *wasapiPtr)
5083 {
5084 	if (wasapiPtr)
5085 		((RtApiWasapi *)wasapiPtr)->wasapiThread();
5086 
5087 	return 0;
5088 }
5089 
stopWasapiThread(void * wasapiPtr)5090 DWORD WINAPI RtApiWasapi::stopWasapiThread(void *wasapiPtr)
5091 {
5092 	if (wasapiPtr)
5093 		((RtApiWasapi *)wasapiPtr)->stopStream();
5094 
5095 	return 0;
5096 }
5097 
abortWasapiThread(void * wasapiPtr)5098 DWORD WINAPI RtApiWasapi::abortWasapiThread(void *wasapiPtr)
5099 {
5100 	if (wasapiPtr)
5101 		((RtApiWasapi *)wasapiPtr)->abortStream();
5102 
5103 	return 0;
5104 }
5105 
5106 //-----------------------------------------------------------------------------
5107 
wasapiThread()5108 void RtApiWasapi::wasapiThread()
5109 {
5110 	// as this is a new thread, we must CoInitialize it
5111 	CoInitialize(NULL);
5112 
5113 	HRESULT hr;
5114 
5115 	IAudioClient *captureAudioClient = ((WasapiHandle *)stream_.apiHandle)->captureAudioClient;
5116 	IAudioClient *renderAudioClient = ((WasapiHandle *)stream_.apiHandle)->renderAudioClient;
5117 	IAudioCaptureClient *captureClient = ((WasapiHandle *)stream_.apiHandle)->captureClient;
5118 	IAudioRenderClient *renderClient = ((WasapiHandle *)stream_.apiHandle)->renderClient;
5119 	HANDLE captureEvent = ((WasapiHandle *)stream_.apiHandle)->captureEvent;
5120 	HANDLE renderEvent = ((WasapiHandle *)stream_.apiHandle)->renderEvent;
5121 
5122 	WAVEFORMATEX *captureFormat = NULL;
5123 	WAVEFORMATEX *renderFormat = NULL;
5124 	float captureSrRatio = 0.0f;
5125 	float renderSrRatio = 0.0f;
5126 	WasapiBuffer captureBuffer;
5127 	WasapiBuffer renderBuffer;
5128 
5129 	// declare local stream variables
5130 	RtAudioCallback callback = (RtAudioCallback)stream_.callbackInfo.callback;
5131 	BYTE *streamBuffer = NULL;
5132 	unsigned long captureFlags = 0;
5133 	unsigned int bufferFrameCount = 0;
5134 	unsigned int numFramesPadding = 0;
5135 	unsigned int convBufferSize = 0;
5136 	bool callbackPushed = false;
5137 	bool callbackPulled = false;
5138 	bool callbackStopped = false;
5139 	int callbackResult = 0;
5140 
5141 	// convBuffer is used to store converted buffers between WASAPI and the user
5142 	char *convBuffer = NULL;
5143 	unsigned int convBuffSize = 0;
5144 	unsigned int deviceBuffSize = 0;
5145 
5146 	errorText_.clear();
5147 	RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5148 
5149 	// Attempt to assign "Pro Audio" characteristic to thread
5150 	HMODULE AvrtDll = LoadLibrary((LPCTSTR) "AVRT.dll");
5151 	if (AvrtDll)
5152 	{
5153 		DWORD taskIndex = 0;
5154 		TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = (TAvSetMmThreadCharacteristicsPtr)GetProcAddress(AvrtDll, "AvSetMmThreadCharacteristicsW");
5155 		AvSetMmThreadCharacteristicsPtr(L"Pro Audio", &taskIndex);
5156 		FreeLibrary(AvrtDll);
5157 	}
5158 
5159 	// start capture stream if applicable
5160 	if (captureAudioClient)
5161 	{
5162 		hr = captureAudioClient->GetMixFormat(&captureFormat);
5163 		if (FAILED(hr))
5164 		{
5165 			errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5166 			goto Exit;
5167 		}
5168 
5169 		captureSrRatio = ((float)captureFormat->nSamplesPerSec / stream_.sampleRate);
5170 
5171 		// initialize capture stream according to desire buffer size
5172 		float desiredBufferSize = stream_.bufferSize * captureSrRatio;
5173 		REFERENCE_TIME desiredBufferPeriod = (REFERENCE_TIME)((float)desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec);
5174 
5175 		if (!captureClient)
5176 		{
5177 			hr = captureAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED,
5178 												AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5179 												desiredBufferPeriod,
5180 												desiredBufferPeriod,
5181 												captureFormat,
5182 												NULL);
5183 			if (FAILED(hr))
5184 			{
5185 				errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5186 				goto Exit;
5187 			}
5188 
5189 			hr = captureAudioClient->GetService(__uuidof(IAudioCaptureClient),
5190 												(void **)&captureClient);
5191 			if (FAILED(hr))
5192 			{
5193 				errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5194 				goto Exit;
5195 			}
5196 
5197 			// configure captureEvent to trigger on every available capture buffer
5198 			captureEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
5199 			if (!captureEvent)
5200 			{
5201 				errorType = RtAudioError::SYSTEM_ERROR;
5202 				errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5203 				goto Exit;
5204 			}
5205 
5206 			hr = captureAudioClient->SetEventHandle(captureEvent);
5207 			if (FAILED(hr))
5208 			{
5209 				errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5210 				goto Exit;
5211 			}
5212 
5213 			((WasapiHandle *)stream_.apiHandle)->captureClient = captureClient;
5214 			((WasapiHandle *)stream_.apiHandle)->captureEvent = captureEvent;
5215 		}
5216 
5217 		unsigned int inBufferSize = 0;
5218 		hr = captureAudioClient->GetBufferSize(&inBufferSize);
5219 		if (FAILED(hr))
5220 		{
5221 			errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5222 			goto Exit;
5223 		}
5224 
5225 		// scale outBufferSize according to stream->user sample rate ratio
5226 		unsigned int outBufferSize = (unsigned int)(stream_.bufferSize * captureSrRatio) * stream_.nDeviceChannels[INPUT];
5227 		inBufferSize *= stream_.nDeviceChannels[INPUT];
5228 
5229 		// set captureBuffer size
5230 		captureBuffer.setBufferSize(inBufferSize + outBufferSize, formatBytes(stream_.deviceFormat[INPUT]));
5231 
5232 		// reset the capture stream
5233 		hr = captureAudioClient->Reset();
5234 		if (FAILED(hr))
5235 		{
5236 			errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5237 			goto Exit;
5238 		}
5239 
5240 		// start the capture stream
5241 		hr = captureAudioClient->Start();
5242 		if (FAILED(hr))
5243 		{
5244 			errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5245 			goto Exit;
5246 		}
5247 	}
5248 
5249 	// start render stream if applicable
5250 	if (renderAudioClient)
5251 	{
5252 		hr = renderAudioClient->GetMixFormat(&renderFormat);
5253 		if (FAILED(hr))
5254 		{
5255 			errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5256 			goto Exit;
5257 		}
5258 
5259 		renderSrRatio = ((float)renderFormat->nSamplesPerSec / stream_.sampleRate);
5260 
5261 		// initialize render stream according to desire buffer size
5262 		float desiredBufferSize = stream_.bufferSize * renderSrRatio;
5263 		REFERENCE_TIME desiredBufferPeriod = (REFERENCE_TIME)((float)desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec);
5264 
5265 		if (!renderClient)
5266 		{
5267 			hr = renderAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED,
5268 											   AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5269 											   desiredBufferPeriod,
5270 											   desiredBufferPeriod,
5271 											   renderFormat,
5272 											   NULL);
5273 			if (FAILED(hr))
5274 			{
5275 				errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5276 				goto Exit;
5277 			}
5278 
5279 			hr = renderAudioClient->GetService(__uuidof(IAudioRenderClient),
5280 											   (void **)&renderClient);
5281 			if (FAILED(hr))
5282 			{
5283 				errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5284 				goto Exit;
5285 			}
5286 
5287 			// configure renderEvent to trigger on every available render buffer
5288 			renderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
5289 			if (!renderEvent)
5290 			{
5291 				errorType = RtAudioError::SYSTEM_ERROR;
5292 				errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5293 				goto Exit;
5294 			}
5295 
5296 			hr = renderAudioClient->SetEventHandle(renderEvent);
5297 			if (FAILED(hr))
5298 			{
5299 				errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5300 				goto Exit;
5301 			}
5302 
5303 			((WasapiHandle *)stream_.apiHandle)->renderClient = renderClient;
5304 			((WasapiHandle *)stream_.apiHandle)->renderEvent = renderEvent;
5305 		}
5306 
5307 		unsigned int outBufferSize = 0;
5308 		hr = renderAudioClient->GetBufferSize(&outBufferSize);
5309 		if (FAILED(hr))
5310 		{
5311 			errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5312 			goto Exit;
5313 		}
5314 
5315 		// scale inBufferSize according to user->stream sample rate ratio
5316 		unsigned int inBufferSize = (unsigned int)(stream_.bufferSize * renderSrRatio) * stream_.nDeviceChannels[OUTPUT];
5317 		outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5318 
5319 		// set renderBuffer size
5320 		renderBuffer.setBufferSize(inBufferSize + outBufferSize, formatBytes(stream_.deviceFormat[OUTPUT]));
5321 
5322 		// reset the render stream
5323 		hr = renderAudioClient->Reset();
5324 		if (FAILED(hr))
5325 		{
5326 			errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5327 			goto Exit;
5328 		}
5329 
5330 		// start the render stream
5331 		hr = renderAudioClient->Start();
5332 		if (FAILED(hr))
5333 		{
5334 			errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5335 			goto Exit;
5336 		}
5337 	}
5338 
5339 	if (stream_.mode == INPUT)
5340 	{
5341 		convBuffSize = (size_t)(stream_.bufferSize * captureSrRatio) * stream_.nDeviceChannels[INPUT] * formatBytes(stream_.deviceFormat[INPUT]);
5342 		deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes(stream_.deviceFormat[INPUT]);
5343 	}
5344 	else if (stream_.mode == OUTPUT)
5345 	{
5346 		convBuffSize = (size_t)(stream_.bufferSize * renderSrRatio) * stream_.nDeviceChannels[OUTPUT] * formatBytes(stream_.deviceFormat[OUTPUT]);
5347 		deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes(stream_.deviceFormat[OUTPUT]);
5348 	}
5349 	else if (stream_.mode == DUPLEX)
5350 	{
5351 		convBuffSize = std::max((size_t)(stream_.bufferSize * captureSrRatio) * stream_.nDeviceChannels[INPUT] * formatBytes(stream_.deviceFormat[INPUT]),
5352 								(size_t)(stream_.bufferSize * renderSrRatio) * stream_.nDeviceChannels[OUTPUT] * formatBytes(stream_.deviceFormat[OUTPUT]));
5353 		deviceBuffSize = std::max(stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes(stream_.deviceFormat[INPUT]),
5354 								  stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes(stream_.deviceFormat[OUTPUT]));
5355 	}
5356 
5357 	convBuffer = (char *)malloc(convBuffSize);
5358 	stream_.deviceBuffer = (char *)malloc(deviceBuffSize);
5359 	if (!convBuffer || !stream_.deviceBuffer)
5360 	{
5361 		errorType = RtAudioError::MEMORY_ERROR;
5362 		errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5363 		goto Exit;
5364 	}
5365 
5366 	// stream process loop
5367 	while (stream_.state != STREAM_STOPPING)
5368 	{
5369 		if (!callbackPulled)
5370 		{
5371 			// Callback Input
5372 			// ==============
5373 			// 1. Pull callback buffer from inputBuffer
5374 			// 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5375 			//                          Convert callback buffer to user format
5376 
5377 			if (captureAudioClient)
5378 			{
5379 				// Pull callback buffer from inputBuffer
5380 				callbackPulled = captureBuffer.pullBuffer(convBuffer,
5381 														  (unsigned int)(stream_.bufferSize * captureSrRatio) * stream_.nDeviceChannels[INPUT],
5382 														  stream_.deviceFormat[INPUT]);
5383 
5384 				if (callbackPulled)
5385 				{
5386 					// Convert callback buffer to user sample rate
5387 					convertBufferWasapi(stream_.deviceBuffer,
5388 										convBuffer,
5389 										stream_.nDeviceChannels[INPUT],
5390 										captureFormat->nSamplesPerSec,
5391 										stream_.sampleRate,
5392 										(unsigned int)(stream_.bufferSize * captureSrRatio),
5393 										convBufferSize,
5394 										stream_.deviceFormat[INPUT]);
5395 
5396 					if (stream_.doConvertBuffer[INPUT])
5397 					{
5398 						// Convert callback buffer to user format
5399 						convertBuffer(stream_.userBuffer[INPUT],
5400 									  stream_.deviceBuffer,
5401 									  stream_.convertInfo[INPUT]);
5402 					}
5403 					else
5404 					{
5405 						// no further conversion, simple copy deviceBuffer to userBuffer
5406 						memcpy(stream_.userBuffer[INPUT],
5407 							   stream_.deviceBuffer,
5408 							   stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes(stream_.userFormat));
5409 					}
5410 				}
5411 			}
5412 			else
5413 			{
5414 				// if there is no capture stream, set callbackPulled flag
5415 				callbackPulled = true;
5416 			}
5417 
5418 			// Execute Callback
5419 			// ================
5420 			// 1. Execute user callback method
5421 			// 2. Handle return value from callback
5422 
5423 			// if callback has not requested the stream to stop
5424 			if (callbackPulled && !callbackStopped)
5425 			{
5426 				// Execute user callback method
5427 				callbackResult = callback(stream_.userBuffer[OUTPUT],
5428 										  stream_.userBuffer[INPUT],
5429 										  stream_.bufferSize,
5430 										  getStreamTime(),
5431 										  captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5432 										  stream_.callbackInfo.userData);
5433 
5434 				// Handle return value from callback
5435 				if (callbackResult == 1)
5436 				{
5437 					// instantiate a thread to stop this thread
5438 					HANDLE threadHandle = CreateThread(NULL, 0, stopWasapiThread, this, 0, NULL);
5439 					if (!threadHandle)
5440 					{
5441 						errorType = RtAudioError::THREAD_ERROR;
5442 						errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5443 						goto Exit;
5444 					}
5445 					else if (!CloseHandle(threadHandle))
5446 					{
5447 						errorType = RtAudioError::THREAD_ERROR;
5448 						errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5449 						goto Exit;
5450 					}
5451 
5452 					callbackStopped = true;
5453 				}
5454 				else if (callbackResult == 2)
5455 				{
5456 					// instantiate a thread to stop this thread
5457 					HANDLE threadHandle = CreateThread(NULL, 0, abortWasapiThread, this, 0, NULL);
5458 					if (!threadHandle)
5459 					{
5460 						errorType = RtAudioError::THREAD_ERROR;
5461 						errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5462 						goto Exit;
5463 					}
5464 					else if (!CloseHandle(threadHandle))
5465 					{
5466 						errorType = RtAudioError::THREAD_ERROR;
5467 						errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5468 						goto Exit;
5469 					}
5470 
5471 					callbackStopped = true;
5472 				}
5473 			}
5474 		}
5475 
5476 		// Callback Output
5477 		// ===============
5478 		// 1. Convert callback buffer to stream format
5479 		// 2. Convert callback buffer to stream sample rate and channel count
5480 		// 3. Push callback buffer into outputBuffer
5481 
5482 		if (renderAudioClient && callbackPulled)
5483 		{
5484 			if (stream_.doConvertBuffer[OUTPUT])
5485 			{
5486 				// Convert callback buffer to stream format
5487 				convertBuffer(stream_.deviceBuffer,
5488 							  stream_.userBuffer[OUTPUT],
5489 							  stream_.convertInfo[OUTPUT]);
5490 			}
5491 
5492 			// Convert callback buffer to stream sample rate
5493 			convertBufferWasapi(convBuffer,
5494 								stream_.deviceBuffer,
5495 								stream_.nDeviceChannels[OUTPUT],
5496 								stream_.sampleRate,
5497 								renderFormat->nSamplesPerSec,
5498 								stream_.bufferSize,
5499 								convBufferSize,
5500 								stream_.deviceFormat[OUTPUT]);
5501 
5502 			// Push callback buffer into outputBuffer
5503 			callbackPushed = renderBuffer.pushBuffer(convBuffer,
5504 													 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5505 													 stream_.deviceFormat[OUTPUT]);
5506 		}
5507 		else
5508 		{
5509 			// if there is no render stream, set callbackPushed flag
5510 			callbackPushed = true;
5511 		}
5512 
5513 		// Stream Capture
5514 		// ==============
5515 		// 1. Get capture buffer from stream
5516 		// 2. Push capture buffer into inputBuffer
5517 		// 3. If 2. was successful: Release capture buffer
5518 
5519 		if (captureAudioClient)
5520 		{
5521 			// if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5522 			if (!callbackPulled)
5523 			{
5524 				WaitForSingleObject(captureEvent, INFINITE);
5525 			}
5526 
5527 			// Get capture buffer from stream
5528 			hr = captureClient->GetBuffer(&streamBuffer,
5529 										  &bufferFrameCount,
5530 										  &captureFlags, NULL, NULL);
5531 			if (FAILED(hr))
5532 			{
5533 				errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5534 				goto Exit;
5535 			}
5536 
5537 			if (bufferFrameCount != 0)
5538 			{
5539 				// Push capture buffer into inputBuffer
5540 				if (captureBuffer.pushBuffer((char *)streamBuffer,
5541 											 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5542 											 stream_.deviceFormat[INPUT]))
5543 				{
5544 					// Release capture buffer
5545 					hr = captureClient->ReleaseBuffer(bufferFrameCount);
5546 					if (FAILED(hr))
5547 					{
5548 						errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5549 						goto Exit;
5550 					}
5551 				}
5552 				else
5553 				{
5554 					// Inform WASAPI that capture was unsuccessful
5555 					hr = captureClient->ReleaseBuffer(0);
5556 					if (FAILED(hr))
5557 					{
5558 						errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5559 						goto Exit;
5560 					}
5561 				}
5562 			}
5563 			else
5564 			{
5565 				// Inform WASAPI that capture was unsuccessful
5566 				hr = captureClient->ReleaseBuffer(0);
5567 				if (FAILED(hr))
5568 				{
5569 					errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5570 					goto Exit;
5571 				}
5572 			}
5573 		}
5574 
5575 		// Stream Render
5576 		// =============
5577 		// 1. Get render buffer from stream
5578 		// 2. Pull next buffer from outputBuffer
5579 		// 3. If 2. was successful: Fill render buffer with next buffer
5580 		//                          Release render buffer
5581 
5582 		if (renderAudioClient)
5583 		{
5584 			// if the callback output buffer was not pushed to renderBuffer, wait for next render event
5585 			if (callbackPulled && !callbackPushed)
5586 			{
5587 				WaitForSingleObject(renderEvent, INFINITE);
5588 			}
5589 
5590 			// Get render buffer from stream
5591 			hr = renderAudioClient->GetBufferSize(&bufferFrameCount);
5592 			if (FAILED(hr))
5593 			{
5594 				errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5595 				goto Exit;
5596 			}
5597 
5598 			hr = renderAudioClient->GetCurrentPadding(&numFramesPadding);
5599 			if (FAILED(hr))
5600 			{
5601 				errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5602 				goto Exit;
5603 			}
5604 
5605 			bufferFrameCount -= numFramesPadding;
5606 
5607 			if (bufferFrameCount != 0)
5608 			{
5609 				hr = renderClient->GetBuffer(bufferFrameCount, &streamBuffer);
5610 				if (FAILED(hr))
5611 				{
5612 					errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5613 					goto Exit;
5614 				}
5615 
5616 				// Pull next buffer from outputBuffer
5617 				// Fill render buffer with next buffer
5618 				if (renderBuffer.pullBuffer((char *)streamBuffer,
5619 											bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5620 											stream_.deviceFormat[OUTPUT]))
5621 				{
5622 					// Release render buffer
5623 					hr = renderClient->ReleaseBuffer(bufferFrameCount, 0);
5624 					if (FAILED(hr))
5625 					{
5626 						errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5627 						goto Exit;
5628 					}
5629 				}
5630 				else
5631 				{
5632 					// Inform WASAPI that render was unsuccessful
5633 					hr = renderClient->ReleaseBuffer(0, 0);
5634 					if (FAILED(hr))
5635 					{
5636 						errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5637 						goto Exit;
5638 					}
5639 				}
5640 			}
5641 			else
5642 			{
5643 				// Inform WASAPI that render was unsuccessful
5644 				hr = renderClient->ReleaseBuffer(0, 0);
5645 				if (FAILED(hr))
5646 				{
5647 					errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5648 					goto Exit;
5649 				}
5650 			}
5651 		}
5652 
5653 		// if the callback buffer was pushed renderBuffer reset callbackPulled flag
5654 		if (callbackPushed)
5655 		{
5656 			callbackPulled = false;
5657 			// tick stream time
5658 			RtApi::tickStreamTime();
5659 		}
5660 	}
5661 
5662 Exit:
5663 	// clean up
5664 	CoTaskMemFree(captureFormat);
5665 	CoTaskMemFree(renderFormat);
5666 
5667 	free(convBuffer);
5668 
5669 	CoUninitialize();
5670 
5671 	// update stream state
5672 	stream_.state = STREAM_STOPPED;
5673 
5674 	if (errorText_.empty())
5675 		return;
5676 	else
5677 		error(errorType);
5678 }
5679 
5680 //******************** End of __WINDOWS_WASAPI__ *********************//
5681 #endif
5682 
5683 #if defined(__WINDOWS_DS__)  // Windows DirectSound API
5684 
5685 // Modified by Robin Davies, October 2005
5686 // - Improvements to DirectX pointer chasing.
5687 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5688 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5689 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5690 // Changed device query structure for RtAudio 4.0.7, January 2010
5691 
5692 #include <dsound.h>
5693 #include <assert.h>
5694 #include <algorithm>
5695 
5696 #if defined(__MINGW32__)
5697 // missing from latest mingw winapi
5698 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5699 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5700 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5701 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5702 #endif
5703 
5704 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5705 
5706 #ifdef _MSC_VER                    // if Microsoft Visual C++
5707 #pragma comment(lib, "winmm.lib")  // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5708 #endif
5709 
dsPointerBetween(DWORD pointer,DWORD laterPointer,DWORD earlierPointer,DWORD bufferSize)5710 static inline DWORD dsPointerBetween(DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize)
5711 {
5712 	if (pointer > bufferSize) pointer -= bufferSize;
5713 	if (laterPointer < earlierPointer) laterPointer += bufferSize;
5714 	if (pointer < earlierPointer) pointer += bufferSize;
5715 	return pointer >= earlierPointer && pointer < laterPointer;
5716 }
5717 
5718 // A structure to hold various information related to the DirectSound
5719 // API implementation.
5720 struct DsHandle
5721 {
5722 	unsigned int drainCounter;  // Tracks callback counts when draining
5723 	bool internalDrain;         // Indicates if stop is initiated from callback or not.
5724 	void *id[2];
5725 	void *buffer[2];
5726 	bool xrun[2];
5727 	UINT bufferPointer[2];
5728 	DWORD dsBufferSize[2];
5729 	DWORD dsPointerLeadTime[2];  // the number of bytes ahead of the safe pointer to lead by.
5730 	HANDLE condition;
5731 
DsHandleDsHandle5732 	DsHandle()
5733 		: drainCounter(0), internalDrain(false)
5734 	{
5735 		id[0] = 0;
5736 		id[1] = 0;
5737 		buffer[0] = 0;
5738 		buffer[1] = 0;
5739 		xrun[0] = false;
5740 		xrun[1] = false;
5741 		bufferPointer[0] = 0;
5742 		bufferPointer[1] = 0;
5743 	}
5744 };
5745 
5746 // Declarations for utility functions, callbacks, and structures
5747 // specific to the DirectSound implementation.
5748 static BOOL CALLBACK deviceQueryCallback(LPGUID lpguid,
5749 										 LPCTSTR description,
5750 										 LPCTSTR module,
5751 										 LPVOID lpContext);
5752 
5753 static const char *getErrorString(int code);
5754 
5755 static unsigned __stdcall callbackHandler(void *ptr);
5756 
5757 struct DsDevice
5758 {
5759 	LPGUID id[2];
5760 	bool validId[2];
5761 	bool found;
5762 	std::string name;
5763 
DsDeviceDsDevice5764 	DsDevice()
5765 		: found(false)
5766 	{
5767 		validId[0] = false;
5768 		validId[1] = false;
5769 	}
5770 };
5771 
5772 struct DsProbeData
5773 {
5774 	bool isInput;
5775 	std::vector<struct DsDevice> *dsDevices;
5776 };
5777 
RtApiDs()5778 RtApiDs ::RtApiDs()
5779 {
5780 	// Dsound will run both-threaded. If CoInitialize fails, then just
5781 	// accept whatever the mainline chose for a threading model.
5782 	coInitialized_ = false;
5783 	HRESULT hr = CoInitialize(NULL);
5784 	if (!FAILED(hr)) coInitialized_ = true;
5785 }
5786 
~RtApiDs()5787 RtApiDs ::~RtApiDs()
5788 {
5789 	if (coInitialized_) CoUninitialize();  // balanced call.
5790 	if (stream_.state != STREAM_CLOSED) closeStream();
5791 }
5792 
5793 // The DirectSound default output is always the first device.
getDefaultOutputDevice(void)5794 unsigned int RtApiDs ::getDefaultOutputDevice(void)
5795 {
5796 	return 0;
5797 }
5798 
5799 // The DirectSound default input is always the first input device,
5800 // which is the first capture device enumerated.
getDefaultInputDevice(void)5801 unsigned int RtApiDs ::getDefaultInputDevice(void)
5802 {
5803 	return 0;
5804 }
5805 
getDeviceCount(void)5806 unsigned int RtApiDs ::getDeviceCount(void)
5807 {
5808 	// Set query flag for previously found devices to false, so that we
5809 	// can check for any devices that have disappeared.
5810 	for (unsigned int i = 0; i < dsDevices.size(); i++)
5811 		dsDevices[i].found = false;
5812 
5813 	// Query DirectSound devices.
5814 	struct DsProbeData probeInfo;
5815 	probeInfo.isInput = false;
5816 	probeInfo.dsDevices = &dsDevices;
5817 	HRESULT result = DirectSoundEnumerate((LPDSENUMCALLBACK)deviceQueryCallback, &probeInfo);
5818 	if (FAILED(result))
5819 	{
5820 		errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString(result) << ") enumerating output devices!";
5821 		errorText_ = errorStream_.str();
5822 		error(RtAudioError::WARNING);
5823 	}
5824 
5825 	// Query DirectSoundCapture devices.
5826 	probeInfo.isInput = true;
5827 	result = DirectSoundCaptureEnumerate((LPDSENUMCALLBACK)deviceQueryCallback, &probeInfo);
5828 	if (FAILED(result))
5829 	{
5830 		errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString(result) << ") enumerating input devices!";
5831 		errorText_ = errorStream_.str();
5832 		error(RtAudioError::WARNING);
5833 	}
5834 
5835 	// Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5836 	for (unsigned int i = 0; i < dsDevices.size();)
5837 	{
5838 		if (dsDevices[i].found == false)
5839 			dsDevices.erase(dsDevices.begin() + i);
5840 		else
5841 			i++;
5842 	}
5843 
5844 	return static_cast<unsigned int>(dsDevices.size());
5845 }
5846 
getDeviceInfo(unsigned int device)5847 RtAudio::DeviceInfo RtApiDs ::getDeviceInfo(unsigned int device)
5848 {
5849 	RtAudio::DeviceInfo info;
5850 	info.probed = false;
5851 
5852 	if (dsDevices.size() == 0)
5853 	{
5854 		// Force a query of all devices
5855 		getDeviceCount();
5856 		if (dsDevices.size() == 0)
5857 		{
5858 			errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5859 			error(RtAudioError::INVALID_USE);
5860 			return info;
5861 		}
5862 	}
5863 
5864 	if (device >= dsDevices.size())
5865 	{
5866 		errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5867 		error(RtAudioError::INVALID_USE);
5868 		return info;
5869 	}
5870 
5871 	HRESULT result;
5872 	if (dsDevices[device].validId[0] == false) goto probeInput;
5873 
5874 	LPDIRECTSOUND output;
5875 	DSCAPS outCaps;
5876 	result = DirectSoundCreate(dsDevices[device].id[0], &output, NULL);
5877 	if (FAILED(result))
5878 	{
5879 		errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString(result) << ") opening output device (" << dsDevices[device].name << ")!";
5880 		errorText_ = errorStream_.str();
5881 		error(RtAudioError::WARNING);
5882 		goto probeInput;
5883 	}
5884 
5885 	outCaps.dwSize = sizeof(outCaps);
5886 	result = output->GetCaps(&outCaps);
5887 	if (FAILED(result))
5888 	{
5889 		output->Release();
5890 		errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString(result) << ") getting capabilities!";
5891 		errorText_ = errorStream_.str();
5892 		error(RtAudioError::WARNING);
5893 		goto probeInput;
5894 	}
5895 
5896 	// Get output channel information.
5897 	info.outputChannels = (outCaps.dwFlags & DSCAPS_PRIMARYSTEREO) ? 2 : 1;
5898 
5899 	// Get sample rate information.
5900 	info.sampleRates.clear();
5901 	for (unsigned int k = 0; k < MAX_SAMPLE_RATES; k++)
5902 	{
5903 		if (SAMPLE_RATES[k] >= (unsigned int)outCaps.dwMinSecondarySampleRate &&
5904 			SAMPLE_RATES[k] <= (unsigned int)outCaps.dwMaxSecondarySampleRate)
5905 		{
5906 			info.sampleRates.push_back(SAMPLE_RATES[k]);
5907 
5908 			if (!info.preferredSampleRate || (SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate))
5909 				info.preferredSampleRate = SAMPLE_RATES[k];
5910 		}
5911 	}
5912 
5913 	// Get format information.
5914 	if (outCaps.dwFlags & DSCAPS_PRIMARY16BIT) info.nativeFormats |= RTAUDIO_SINT16;
5915 	if (outCaps.dwFlags & DSCAPS_PRIMARY8BIT) info.nativeFormats |= RTAUDIO_SINT8;
5916 
5917 	output->Release();
5918 
5919 	if (getDefaultOutputDevice() == device)
5920 		info.isDefaultOutput = true;
5921 
5922 	if (dsDevices[device].validId[1] == false)
5923 	{
5924 		info.name = dsDevices[device].name;
5925 		info.probed = true;
5926 		return info;
5927 	}
5928 
5929 probeInput:
5930 
5931 	LPDIRECTSOUNDCAPTURE input;
5932 	result = DirectSoundCaptureCreate(dsDevices[device].id[1], &input, NULL);
5933 	if (FAILED(result))
5934 	{
5935 		errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString(result) << ") opening input device (" << dsDevices[device].name << ")!";
5936 		errorText_ = errorStream_.str();
5937 		error(RtAudioError::WARNING);
5938 		return info;
5939 	}
5940 
5941 	DSCCAPS inCaps;
5942 	inCaps.dwSize = sizeof(inCaps);
5943 	result = input->GetCaps(&inCaps);
5944 	if (FAILED(result))
5945 	{
5946 		input->Release();
5947 		errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString(result) << ") getting object capabilities (" << dsDevices[device].name << ")!";
5948 		errorText_ = errorStream_.str();
5949 		error(RtAudioError::WARNING);
5950 		return info;
5951 	}
5952 
5953 	// Get input channel information.
5954 	info.inputChannels = inCaps.dwChannels;
5955 
5956 	// Get sample rate and format information.
5957 	std::vector<unsigned int> rates;
5958 	if (inCaps.dwChannels >= 2)
5959 	{
5960 		if (inCaps.dwFormats & WAVE_FORMAT_1S16) info.nativeFormats |= RTAUDIO_SINT16;
5961 		if (inCaps.dwFormats & WAVE_FORMAT_2S16) info.nativeFormats |= RTAUDIO_SINT16;
5962 		if (inCaps.dwFormats & WAVE_FORMAT_4S16) info.nativeFormats |= RTAUDIO_SINT16;
5963 		if (inCaps.dwFormats & WAVE_FORMAT_96S16) info.nativeFormats |= RTAUDIO_SINT16;
5964 		if (inCaps.dwFormats & WAVE_FORMAT_1S08) info.nativeFormats |= RTAUDIO_SINT8;
5965 		if (inCaps.dwFormats & WAVE_FORMAT_2S08) info.nativeFormats |= RTAUDIO_SINT8;
5966 		if (inCaps.dwFormats & WAVE_FORMAT_4S08) info.nativeFormats |= RTAUDIO_SINT8;
5967 		if (inCaps.dwFormats & WAVE_FORMAT_96S08) info.nativeFormats |= RTAUDIO_SINT8;
5968 
5969 		if (info.nativeFormats & RTAUDIO_SINT16)
5970 		{
5971 			if (inCaps.dwFormats & WAVE_FORMAT_1S16) rates.push_back(11025);
5972 			if (inCaps.dwFormats & WAVE_FORMAT_2S16) rates.push_back(22050);
5973 			if (inCaps.dwFormats & WAVE_FORMAT_4S16) rates.push_back(44100);
5974 			if (inCaps.dwFormats & WAVE_FORMAT_96S16) rates.push_back(96000);
5975 		}
5976 		else if (info.nativeFormats & RTAUDIO_SINT8)
5977 		{
5978 			if (inCaps.dwFormats & WAVE_FORMAT_1S08) rates.push_back(11025);
5979 			if (inCaps.dwFormats & WAVE_FORMAT_2S08) rates.push_back(22050);
5980 			if (inCaps.dwFormats & WAVE_FORMAT_4S08) rates.push_back(44100);
5981 			if (inCaps.dwFormats & WAVE_FORMAT_96S08) rates.push_back(96000);
5982 		}
5983 	}
5984 	else if (inCaps.dwChannels == 1)
5985 	{
5986 		if (inCaps.dwFormats & WAVE_FORMAT_1M16) info.nativeFormats |= RTAUDIO_SINT16;
5987 		if (inCaps.dwFormats & WAVE_FORMAT_2M16) info.nativeFormats |= RTAUDIO_SINT16;
5988 		if (inCaps.dwFormats & WAVE_FORMAT_4M16) info.nativeFormats |= RTAUDIO_SINT16;
5989 		if (inCaps.dwFormats & WAVE_FORMAT_96M16) info.nativeFormats |= RTAUDIO_SINT16;
5990 		if (inCaps.dwFormats & WAVE_FORMAT_1M08) info.nativeFormats |= RTAUDIO_SINT8;
5991 		if (inCaps.dwFormats & WAVE_FORMAT_2M08) info.nativeFormats |= RTAUDIO_SINT8;
5992 		if (inCaps.dwFormats & WAVE_FORMAT_4M08) info.nativeFormats |= RTAUDIO_SINT8;
5993 		if (inCaps.dwFormats & WAVE_FORMAT_96M08) info.nativeFormats |= RTAUDIO_SINT8;
5994 
5995 		if (info.nativeFormats & RTAUDIO_SINT16)
5996 		{
5997 			if (inCaps.dwFormats & WAVE_FORMAT_1M16) rates.push_back(11025);
5998 			if (inCaps.dwFormats & WAVE_FORMAT_2M16) rates.push_back(22050);
5999 			if (inCaps.dwFormats & WAVE_FORMAT_4M16) rates.push_back(44100);
6000 			if (inCaps.dwFormats & WAVE_FORMAT_96M16) rates.push_back(96000);
6001 		}
6002 		else if (info.nativeFormats & RTAUDIO_SINT8)
6003 		{
6004 			if (inCaps.dwFormats & WAVE_FORMAT_1M08) rates.push_back(11025);
6005 			if (inCaps.dwFormats & WAVE_FORMAT_2M08) rates.push_back(22050);
6006 			if (inCaps.dwFormats & WAVE_FORMAT_4M08) rates.push_back(44100);
6007 			if (inCaps.dwFormats & WAVE_FORMAT_96M08) rates.push_back(96000);
6008 		}
6009 	}
6010 	else
6011 		info.inputChannels = 0;  // technically, this would be an error
6012 
6013 	input->Release();
6014 
6015 	if (info.inputChannels == 0) return info;
6016 
6017 	// Copy the supported rates to the info structure but avoid duplication.
6018 	bool found;
6019 	for (unsigned int i = 0; i < rates.size(); i++)
6020 	{
6021 		found = false;
6022 		for (unsigned int j = 0; j < info.sampleRates.size(); j++)
6023 		{
6024 			if (rates[i] == info.sampleRates[j])
6025 			{
6026 				found = true;
6027 				break;
6028 			}
6029 		}
6030 		if (found == false) info.sampleRates.push_back(rates[i]);
6031 	}
6032 	std::sort(info.sampleRates.begin(), info.sampleRates.end());
6033 
6034 	// If device opens for both playback and capture, we determine the channels.
6035 	if (info.outputChannels > 0 && info.inputChannels > 0)
6036 		info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6037 
6038 	if (device == 0) info.isDefaultInput = true;
6039 
6040 	// Copy name and return.
6041 	info.name = dsDevices[device].name;
6042 	info.probed = true;
6043 	return info;
6044 }
6045 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)6046 bool RtApiDs ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
6047 							   unsigned int firstChannel, unsigned int sampleRate,
6048 							   RtAudioFormat format, unsigned int *bufferSize,
6049 							   RtAudio::StreamOptions *options)
6050 {
6051 	if (channels + firstChannel > 2)
6052 	{
6053 		errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
6054 		return FAILURE;
6055 	}
6056 
6057 	size_t nDevices = dsDevices.size();
6058 	if (nDevices == 0)
6059 	{
6060 		// This should not happen because a check is made before this function is called.
6061 		errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
6062 		return FAILURE;
6063 	}
6064 
6065 	if (device >= nDevices)
6066 	{
6067 		// This should not happen because a check is made before this function is called.
6068 		errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
6069 		return FAILURE;
6070 	}
6071 
6072 	if (mode == OUTPUT)
6073 	{
6074 		if (dsDevices[device].validId[0] == false)
6075 		{
6076 			errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
6077 			errorText_ = errorStream_.str();
6078 			return FAILURE;
6079 		}
6080 	}
6081 	else
6082 	{  // mode == INPUT
6083 		if (dsDevices[device].validId[1] == false)
6084 		{
6085 			errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
6086 			errorText_ = errorStream_.str();
6087 			return FAILURE;
6088 		}
6089 	}
6090 
6091 	// According to a note in PortAudio, using GetDesktopWindow()
6092 	// instead of GetForegroundWindow() is supposed to avoid problems
6093 	// that occur when the application's window is not the foreground
6094 	// window.  Also, if the application window closes before the
6095 	// DirectSound buffer, DirectSound can crash.  In the past, I had
6096 	// problems when using GetDesktopWindow() but it seems fine now
6097 	// (January 2010).  I'll leave it commented here.
6098 	// HWND hWnd = GetForegroundWindow();
6099 	HWND hWnd = GetDesktopWindow();
6100 
6101 	// Check the numberOfBuffers parameter and limit the lowest value to
6102 	// two.  This is a judgement call and a value of two is probably too
6103 	// low for capture, but it should work for playback.
6104 	int nBuffers = 0;
6105 	if (options) nBuffers = options->numberOfBuffers;
6106 	if (options && options->flags & RTAUDIO_MINIMIZE_LATENCY) nBuffers = 2;
6107 	if (nBuffers < 2) nBuffers = 3;
6108 
6109 	// Check the lower range of the user-specified buffer size and set
6110 	// (arbitrarily) to a lower bound of 32.
6111 	if (*bufferSize < 32) *bufferSize = 32;
6112 
6113 	// Create the wave format structure.  The data format setting will
6114 	// be determined later.
6115 	WAVEFORMATEX waveFormat;
6116 	ZeroMemory(&waveFormat, sizeof(WAVEFORMATEX));
6117 	waveFormat.wFormatTag = WAVE_FORMAT_PCM;
6118 	waveFormat.nChannels = channels + firstChannel;
6119 	waveFormat.nSamplesPerSec = (unsigned long)sampleRate;
6120 
6121 	// Determine the device buffer size. By default, we'll use the value
6122 	// defined above (32K), but we will grow it to make allowances for
6123 	// very large software buffer sizes.
6124 	DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
6125 	DWORD dsPointerLeadTime = 0;
6126 
6127 	void *ohandle = 0, *bhandle = 0;
6128 	HRESULT result;
6129 	if (mode == OUTPUT)
6130 	{
6131 		LPDIRECTSOUND output;
6132 		result = DirectSoundCreate(dsDevices[device].id[0], &output, NULL);
6133 		if (FAILED(result))
6134 		{
6135 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") opening output device (" << dsDevices[device].name << ")!";
6136 			errorText_ = errorStream_.str();
6137 			return FAILURE;
6138 		}
6139 
6140 		DSCAPS outCaps;
6141 		outCaps.dwSize = sizeof(outCaps);
6142 		result = output->GetCaps(&outCaps);
6143 		if (FAILED(result))
6144 		{
6145 			output->Release();
6146 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") getting capabilities (" << dsDevices[device].name << ")!";
6147 			errorText_ = errorStream_.str();
6148 			return FAILURE;
6149 		}
6150 
6151 		// Check channel information.
6152 		if (channels + firstChannel == 2 && !(outCaps.dwFlags & DSCAPS_PRIMARYSTEREO))
6153 		{
6154 			errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[device].name << ") does not support stereo playback.";
6155 			errorText_ = errorStream_.str();
6156 			return FAILURE;
6157 		}
6158 
6159 		// Check format information.  Use 16-bit format unless not
6160 		// supported or user requests 8-bit.
6161 		if (outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
6162 			!(format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT))
6163 		{
6164 			waveFormat.wBitsPerSample = 16;
6165 			stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6166 		}
6167 		else
6168 		{
6169 			waveFormat.wBitsPerSample = 8;
6170 			stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6171 		}
6172 		stream_.userFormat = format;
6173 
6174 		// Update wave format structure and buffer information.
6175 		waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6176 		waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6177 		dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6178 
6179 		// If the user wants an even bigger buffer, increase the device buffer size accordingly.
6180 		while (dsPointerLeadTime * 2U > dsBufferSize)
6181 			dsBufferSize *= 2;
6182 
6183 		// Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
6184 		// result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
6185 		// Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
6186 		result = output->SetCooperativeLevel(hWnd, DSSCL_PRIORITY);
6187 		if (FAILED(result))
6188 		{
6189 			output->Release();
6190 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") setting cooperative level (" << dsDevices[device].name << ")!";
6191 			errorText_ = errorStream_.str();
6192 			return FAILURE;
6193 		}
6194 
6195 		// Even though we will write to the secondary buffer, we need to
6196 		// access the primary buffer to set the correct output format
6197 		// (since the default is 8-bit, 22 kHz!).  Setup the DS primary
6198 		// buffer description.
6199 		DSBUFFERDESC bufferDescription;
6200 		ZeroMemory(&bufferDescription, sizeof(DSBUFFERDESC));
6201 		bufferDescription.dwSize = sizeof(DSBUFFERDESC);
6202 		bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6203 
6204 		// Obtain the primary buffer
6205 		LPDIRECTSOUNDBUFFER buffer;
6206 		result = output->CreateSoundBuffer(&bufferDescription, &buffer, NULL);
6207 		if (FAILED(result))
6208 		{
6209 			output->Release();
6210 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") accessing primary buffer (" << dsDevices[device].name << ")!";
6211 			errorText_ = errorStream_.str();
6212 			return FAILURE;
6213 		}
6214 
6215 		// Set the primary DS buffer sound format.
6216 		result = buffer->SetFormat(&waveFormat);
6217 		if (FAILED(result))
6218 		{
6219 			output->Release();
6220 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") setting primary buffer format (" << dsDevices[device].name << ")!";
6221 			errorText_ = errorStream_.str();
6222 			return FAILURE;
6223 		}
6224 
6225 		// Setup the secondary DS buffer description.
6226 		ZeroMemory(&bufferDescription, sizeof(DSBUFFERDESC));
6227 		bufferDescription.dwSize = sizeof(DSBUFFERDESC);
6228 		bufferDescription.dwFlags = (DSBCAPS_STICKYFOCUS |
6229 									 DSBCAPS_GLOBALFOCUS |
6230 									 DSBCAPS_GETCURRENTPOSITION2 |
6231 									 DSBCAPS_LOCHARDWARE);  // Force hardware mixing
6232 		bufferDescription.dwBufferBytes = dsBufferSize;
6233 		bufferDescription.lpwfxFormat = &waveFormat;
6234 
6235 		// Try to create the secondary DS buffer.  If that doesn't work,
6236 		// try to use software mixing.  Otherwise, there's a problem.
6237 		result = output->CreateSoundBuffer(&bufferDescription, &buffer, NULL);
6238 		if (FAILED(result))
6239 		{
6240 			bufferDescription.dwFlags = (DSBCAPS_STICKYFOCUS |
6241 										 DSBCAPS_GLOBALFOCUS |
6242 										 DSBCAPS_GETCURRENTPOSITION2 |
6243 										 DSBCAPS_LOCSOFTWARE);  // Force software mixing
6244 			result = output->CreateSoundBuffer(&bufferDescription, &buffer, NULL);
6245 			if (FAILED(result))
6246 			{
6247 				output->Release();
6248 				errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") creating secondary buffer (" << dsDevices[device].name << ")!";
6249 				errorText_ = errorStream_.str();
6250 				return FAILURE;
6251 			}
6252 		}
6253 
6254 		// Get the buffer size ... might be different from what we specified.
6255 		DSBCAPS dsbcaps;
6256 		dsbcaps.dwSize = sizeof(DSBCAPS);
6257 		result = buffer->GetCaps(&dsbcaps);
6258 		if (FAILED(result))
6259 		{
6260 			output->Release();
6261 			buffer->Release();
6262 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") getting buffer settings (" << dsDevices[device].name << ")!";
6263 			errorText_ = errorStream_.str();
6264 			return FAILURE;
6265 		}
6266 
6267 		dsBufferSize = dsbcaps.dwBufferBytes;
6268 
6269 		// Lock the DS buffer
6270 		LPVOID audioPtr;
6271 		DWORD dataLen;
6272 		result = buffer->Lock(0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0);
6273 		if (FAILED(result))
6274 		{
6275 			output->Release();
6276 			buffer->Release();
6277 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") locking buffer (" << dsDevices[device].name << ")!";
6278 			errorText_ = errorStream_.str();
6279 			return FAILURE;
6280 		}
6281 
6282 		// Zero the DS buffer
6283 		ZeroMemory(audioPtr, dataLen);
6284 
6285 		// Unlock the DS buffer
6286 		result = buffer->Unlock(audioPtr, dataLen, NULL, 0);
6287 		if (FAILED(result))
6288 		{
6289 			output->Release();
6290 			buffer->Release();
6291 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") unlocking buffer (" << dsDevices[device].name << ")!";
6292 			errorText_ = errorStream_.str();
6293 			return FAILURE;
6294 		}
6295 
6296 		ohandle = (void *)output;
6297 		bhandle = (void *)buffer;
6298 	}
6299 
6300 	if (mode == INPUT)
6301 	{
6302 		LPDIRECTSOUNDCAPTURE input;
6303 		result = DirectSoundCaptureCreate(dsDevices[device].id[1], &input, NULL);
6304 		if (FAILED(result))
6305 		{
6306 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") opening input device (" << dsDevices[device].name << ")!";
6307 			errorText_ = errorStream_.str();
6308 			return FAILURE;
6309 		}
6310 
6311 		DSCCAPS inCaps;
6312 		inCaps.dwSize = sizeof(inCaps);
6313 		result = input->GetCaps(&inCaps);
6314 		if (FAILED(result))
6315 		{
6316 			input->Release();
6317 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") getting input capabilities (" << dsDevices[device].name << ")!";
6318 			errorText_ = errorStream_.str();
6319 			return FAILURE;
6320 		}
6321 
6322 		// Check channel information.
6323 		if (inCaps.dwChannels < channels + firstChannel)
6324 		{
6325 			errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6326 			return FAILURE;
6327 		}
6328 
6329 		// Check format information.  Use 16-bit format unless user
6330 		// requests 8-bit.
6331 		DWORD deviceFormats;
6332 		if (channels + firstChannel == 2)
6333 		{
6334 			deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6335 			if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats)
6336 			{
6337 				waveFormat.wBitsPerSample = 8;
6338 				stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6339 			}
6340 			else
6341 			{  // assume 16-bit is supported
6342 				waveFormat.wBitsPerSample = 16;
6343 				stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6344 			}
6345 		}
6346 		else
6347 		{  // channel == 1
6348 			deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6349 			if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats)
6350 			{
6351 				waveFormat.wBitsPerSample = 8;
6352 				stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6353 			}
6354 			else
6355 			{  // assume 16-bit is supported
6356 				waveFormat.wBitsPerSample = 16;
6357 				stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6358 			}
6359 		}
6360 		stream_.userFormat = format;
6361 
6362 		// Update wave format structure and buffer information.
6363 		waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6364 		waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6365 		dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6366 
6367 		// If the user wants an even bigger buffer, increase the device buffer size accordingly.
6368 		while (dsPointerLeadTime * 2U > dsBufferSize)
6369 			dsBufferSize *= 2;
6370 
6371 		// Setup the secondary DS buffer description.
6372 		DSCBUFFERDESC bufferDescription;
6373 		ZeroMemory(&bufferDescription, sizeof(DSCBUFFERDESC));
6374 		bufferDescription.dwSize = sizeof(DSCBUFFERDESC);
6375 		bufferDescription.dwFlags = 0;
6376 		bufferDescription.dwReserved = 0;
6377 		bufferDescription.dwBufferBytes = dsBufferSize;
6378 		bufferDescription.lpwfxFormat = &waveFormat;
6379 
6380 		// Create the capture buffer.
6381 		LPDIRECTSOUNDCAPTUREBUFFER buffer;
6382 		result = input->CreateCaptureBuffer(&bufferDescription, &buffer, NULL);
6383 		if (FAILED(result))
6384 		{
6385 			input->Release();
6386 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") creating input buffer (" << dsDevices[device].name << ")!";
6387 			errorText_ = errorStream_.str();
6388 			return FAILURE;
6389 		}
6390 
6391 		// Get the buffer size ... might be different from what we specified.
6392 		DSCBCAPS dscbcaps;
6393 		dscbcaps.dwSize = sizeof(DSCBCAPS);
6394 		result = buffer->GetCaps(&dscbcaps);
6395 		if (FAILED(result))
6396 		{
6397 			input->Release();
6398 			buffer->Release();
6399 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") getting buffer settings (" << dsDevices[device].name << ")!";
6400 			errorText_ = errorStream_.str();
6401 			return FAILURE;
6402 		}
6403 
6404 		dsBufferSize = dscbcaps.dwBufferBytes;
6405 
6406 		// NOTE: We could have a problem here if this is a duplex stream
6407 		// and the play and capture hardware buffer sizes are different
6408 		// (I'm actually not sure if that is a problem or not).
6409 		// Currently, we are not verifying that.
6410 
6411 		// Lock the capture buffer
6412 		LPVOID audioPtr;
6413 		DWORD dataLen;
6414 		result = buffer->Lock(0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0);
6415 		if (FAILED(result))
6416 		{
6417 			input->Release();
6418 			buffer->Release();
6419 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") locking input buffer (" << dsDevices[device].name << ")!";
6420 			errorText_ = errorStream_.str();
6421 			return FAILURE;
6422 		}
6423 
6424 		// Zero the buffer
6425 		ZeroMemory(audioPtr, dataLen);
6426 
6427 		// Unlock the buffer
6428 		result = buffer->Unlock(audioPtr, dataLen, NULL, 0);
6429 		if (FAILED(result))
6430 		{
6431 			input->Release();
6432 			buffer->Release();
6433 			errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString(result) << ") unlocking input buffer (" << dsDevices[device].name << ")!";
6434 			errorText_ = errorStream_.str();
6435 			return FAILURE;
6436 		}
6437 
6438 		ohandle = (void *)input;
6439 		bhandle = (void *)buffer;
6440 	}
6441 
6442 	// Set various stream parameters
6443 	DsHandle *handle = 0;
6444 	stream_.nDeviceChannels[mode] = channels + firstChannel;
6445 	stream_.nUserChannels[mode] = channels;
6446 	stream_.bufferSize = *bufferSize;
6447 	stream_.channelOffset[mode] = firstChannel;
6448 	stream_.deviceInterleaved[mode] = true;
6449 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
6450 		stream_.userInterleaved = false;
6451 	else
6452 		stream_.userInterleaved = true;
6453 
6454 	// Set flag for buffer conversion
6455 	stream_.doConvertBuffer[mode] = false;
6456 	if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6457 		stream_.doConvertBuffer[mode] = true;
6458 	if (stream_.userFormat != stream_.deviceFormat[mode])
6459 		stream_.doConvertBuffer[mode] = true;
6460 	if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6461 		stream_.nUserChannels[mode] > 1)
6462 		stream_.doConvertBuffer[mode] = true;
6463 
6464 	// Allocate necessary internal buffers
6465 	long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
6466 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
6467 	if (stream_.userBuffer[mode] == NULL)
6468 	{
6469 		errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6470 		goto error;
6471 	}
6472 
6473 	if (stream_.doConvertBuffer[mode])
6474 	{
6475 		bool makeBuffer = true;
6476 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
6477 		if (mode == INPUT)
6478 		{
6479 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
6480 			{
6481 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
6482 				if (bufferBytes <= (long)bytesOut) makeBuffer = false;
6483 			}
6484 		}
6485 
6486 		if (makeBuffer)
6487 		{
6488 			bufferBytes *= *bufferSize;
6489 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
6490 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
6491 			if (stream_.deviceBuffer == NULL)
6492 			{
6493 				errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6494 				goto error;
6495 			}
6496 		}
6497 	}
6498 
6499 	// Allocate our DsHandle structures for the stream.
6500 	if (stream_.apiHandle == 0)
6501 	{
6502 		try
6503 		{
6504 			handle = new DsHandle;
6505 		}
6506 		catch (std::bad_alloc &)
6507 		{
6508 			errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6509 			goto error;
6510 		}
6511 
6512 		// Create a manual-reset event.
6513 		handle->condition = CreateEvent(NULL,   // no security
6514 										TRUE,   // manual-reset
6515 										FALSE,  // non-signaled initially
6516 										NULL);  // unnamed
6517 		stream_.apiHandle = (void *)handle;
6518 	}
6519 	else
6520 		handle = (DsHandle *)stream_.apiHandle;
6521 	handle->id[mode] = ohandle;
6522 	handle->buffer[mode] = bhandle;
6523 	handle->dsBufferSize[mode] = dsBufferSize;
6524 	handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6525 
6526 	stream_.device[mode] = device;
6527 	stream_.state = STREAM_STOPPED;
6528 	if (stream_.mode == OUTPUT && mode == INPUT)
6529 		// We had already set up an output stream.
6530 		stream_.mode = DUPLEX;
6531 	else
6532 		stream_.mode = mode;
6533 	stream_.nBuffers = nBuffers;
6534 	stream_.sampleRate = sampleRate;
6535 
6536 	// Setup the buffer conversion information structure.
6537 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, firstChannel);
6538 
6539 	// Setup the callback thread.
6540 	if (stream_.callbackInfo.isRunning == false)
6541 	{
6542 		unsigned threadId;
6543 		stream_.callbackInfo.isRunning = true;
6544 		stream_.callbackInfo.object = (void *)this;
6545 		stream_.callbackInfo.thread = _beginthreadex(NULL, 0, &callbackHandler,
6546 													 &stream_.callbackInfo, 0, &threadId);
6547 		if (stream_.callbackInfo.thread == 0)
6548 		{
6549 			errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6550 			goto error;
6551 		}
6552 
6553 		// Boost DS thread priority
6554 		SetThreadPriority((HANDLE)stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST);
6555 	}
6556 	return SUCCESS;
6557 
6558 error:
6559 	if (handle)
6560 	{
6561 		if (handle->buffer[0])
6562 		{  // the object pointer can be NULL and valid
6563 			LPDIRECTSOUND object = (LPDIRECTSOUND)handle->id[0];
6564 			LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
6565 			if (buffer) buffer->Release();
6566 			object->Release();
6567 		}
6568 		if (handle->buffer[1])
6569 		{
6570 			LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE)handle->id[1];
6571 			LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
6572 			if (buffer) buffer->Release();
6573 			object->Release();
6574 		}
6575 		CloseHandle(handle->condition);
6576 		delete handle;
6577 		stream_.apiHandle = 0;
6578 	}
6579 
6580 	for (int i = 0; i < 2; i++)
6581 	{
6582 		if (stream_.userBuffer[i])
6583 		{
6584 			free(stream_.userBuffer[i]);
6585 			stream_.userBuffer[i] = 0;
6586 		}
6587 	}
6588 
6589 	if (stream_.deviceBuffer)
6590 	{
6591 		free(stream_.deviceBuffer);
6592 		stream_.deviceBuffer = 0;
6593 	}
6594 
6595 	stream_.state = STREAM_CLOSED;
6596 	return FAILURE;
6597 }
6598 
closeStream()6599 void RtApiDs ::closeStream()
6600 {
6601 	if (stream_.state == STREAM_CLOSED)
6602 	{
6603 		errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6604 		error(RtAudioError::WARNING);
6605 		return;
6606 	}
6607 
6608 	// Stop the callback thread.
6609 	stream_.callbackInfo.isRunning = false;
6610 	WaitForSingleObject((HANDLE)stream_.callbackInfo.thread, INFINITE);
6611 	CloseHandle((HANDLE)stream_.callbackInfo.thread);
6612 
6613 	DsHandle *handle = (DsHandle *)stream_.apiHandle;
6614 	if (handle)
6615 	{
6616 		if (handle->buffer[0])
6617 		{  // the object pointer can be NULL and valid
6618 			LPDIRECTSOUND object = (LPDIRECTSOUND)handle->id[0];
6619 			LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
6620 			if (buffer)
6621 			{
6622 				buffer->Stop();
6623 				buffer->Release();
6624 			}
6625 			object->Release();
6626 		}
6627 		if (handle->buffer[1])
6628 		{
6629 			LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE)handle->id[1];
6630 			LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
6631 			if (buffer)
6632 			{
6633 				buffer->Stop();
6634 				buffer->Release();
6635 			}
6636 			object->Release();
6637 		}
6638 		CloseHandle(handle->condition);
6639 		delete handle;
6640 		stream_.apiHandle = 0;
6641 	}
6642 
6643 	for (int i = 0; i < 2; i++)
6644 	{
6645 		if (stream_.userBuffer[i])
6646 		{
6647 			free(stream_.userBuffer[i]);
6648 			stream_.userBuffer[i] = 0;
6649 		}
6650 	}
6651 
6652 	if (stream_.deviceBuffer)
6653 	{
6654 		free(stream_.deviceBuffer);
6655 		stream_.deviceBuffer = 0;
6656 	}
6657 
6658 	stream_.mode = UNINITIALIZED;
6659 	stream_.state = STREAM_CLOSED;
6660 }
6661 
startStream()6662 void RtApiDs ::startStream()
6663 {
6664 	verifyStream();
6665 	if (stream_.state == STREAM_RUNNING)
6666 	{
6667 		errorText_ = "RtApiDs::startStream(): the stream is already running!";
6668 		error(RtAudioError::WARNING);
6669 		return;
6670 	}
6671 
6672 	DsHandle *handle = (DsHandle *)stream_.apiHandle;
6673 
6674 	// Increase scheduler frequency on lesser windows (a side-effect of
6675 	// increasing timer accuracy).  On greater windows (Win2K or later),
6676 	// this is already in effect.
6677 	timeBeginPeriod(1);
6678 
6679 	buffersRolling = false;
6680 	duplexPrerollBytes = 0;
6681 
6682 	if (stream_.mode == DUPLEX)
6683 	{
6684 		// 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6685 		duplexPrerollBytes = (int)(0.5 * stream_.sampleRate * formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1]);
6686 	}
6687 
6688 	HRESULT result = 0;
6689 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
6690 	{
6691 		LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
6692 		result = buffer->Play(0, 0, DSBPLAY_LOOPING);
6693 		if (FAILED(result))
6694 		{
6695 			errorStream_ << "RtApiDs::startStream: error (" << getErrorString(result) << ") starting output buffer!";
6696 			errorText_ = errorStream_.str();
6697 			goto unlock;
6698 		}
6699 	}
6700 
6701 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
6702 	{
6703 		LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
6704 		result = buffer->Start(DSCBSTART_LOOPING);
6705 		if (FAILED(result))
6706 		{
6707 			errorStream_ << "RtApiDs::startStream: error (" << getErrorString(result) << ") starting input buffer!";
6708 			errorText_ = errorStream_.str();
6709 			goto unlock;
6710 		}
6711 	}
6712 
6713 	handle->drainCounter = 0;
6714 	handle->internalDrain = false;
6715 	ResetEvent(handle->condition);
6716 	stream_.state = STREAM_RUNNING;
6717 
6718 unlock:
6719 	if (FAILED(result)) error(RtAudioError::SYSTEM_ERROR);
6720 }
6721 
stopStream()6722 void RtApiDs ::stopStream()
6723 {
6724 	verifyStream();
6725 	if (stream_.state == STREAM_STOPPED)
6726 	{
6727 		errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6728 		error(RtAudioError::WARNING);
6729 		return;
6730 	}
6731 
6732 	HRESULT result = 0;
6733 	LPVOID audioPtr;
6734 	DWORD dataLen;
6735 	DsHandle *handle = (DsHandle *)stream_.apiHandle;
6736 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
6737 	{
6738 		if (handle->drainCounter == 0)
6739 		{
6740 			handle->drainCounter = 2;
6741 			WaitForSingleObject(handle->condition, INFINITE);  // block until signaled
6742 		}
6743 
6744 		stream_.state = STREAM_STOPPED;
6745 
6746 		MUTEX_LOCK(&stream_.mutex);
6747 
6748 		// Stop the buffer and clear memory
6749 		LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
6750 		result = buffer->Stop();
6751 		if (FAILED(result))
6752 		{
6753 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") stopping output buffer!";
6754 			errorText_ = errorStream_.str();
6755 			goto unlock;
6756 		}
6757 
6758 		// Lock the buffer and clear it so that if we start to play again,
6759 		// we won't have old data playing.
6760 		result = buffer->Lock(0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0);
6761 		if (FAILED(result))
6762 		{
6763 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") locking output buffer!";
6764 			errorText_ = errorStream_.str();
6765 			goto unlock;
6766 		}
6767 
6768 		// Zero the DS buffer
6769 		ZeroMemory(audioPtr, dataLen);
6770 
6771 		// Unlock the DS buffer
6772 		result = buffer->Unlock(audioPtr, dataLen, NULL, 0);
6773 		if (FAILED(result))
6774 		{
6775 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") unlocking output buffer!";
6776 			errorText_ = errorStream_.str();
6777 			goto unlock;
6778 		}
6779 
6780 		// If we start playing again, we must begin at beginning of buffer.
6781 		handle->bufferPointer[0] = 0;
6782 	}
6783 
6784 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
6785 	{
6786 		LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
6787 		audioPtr = NULL;
6788 		dataLen = 0;
6789 
6790 		stream_.state = STREAM_STOPPED;
6791 
6792 		if (stream_.mode != DUPLEX)
6793 			MUTEX_LOCK(&stream_.mutex);
6794 
6795 		result = buffer->Stop();
6796 		if (FAILED(result))
6797 		{
6798 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") stopping input buffer!";
6799 			errorText_ = errorStream_.str();
6800 			goto unlock;
6801 		}
6802 
6803 		// Lock the buffer and clear it so that if we start to play again,
6804 		// we won't have old data playing.
6805 		result = buffer->Lock(0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0);
6806 		if (FAILED(result))
6807 		{
6808 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") locking input buffer!";
6809 			errorText_ = errorStream_.str();
6810 			goto unlock;
6811 		}
6812 
6813 		// Zero the DS buffer
6814 		ZeroMemory(audioPtr, dataLen);
6815 
6816 		// Unlock the DS buffer
6817 		result = buffer->Unlock(audioPtr, dataLen, NULL, 0);
6818 		if (FAILED(result))
6819 		{
6820 			errorStream_ << "RtApiDs::stopStream: error (" << getErrorString(result) << ") unlocking input buffer!";
6821 			errorText_ = errorStream_.str();
6822 			goto unlock;
6823 		}
6824 
6825 		// If we start recording again, we must begin at beginning of buffer.
6826 		handle->bufferPointer[1] = 0;
6827 	}
6828 
6829 unlock:
6830 	timeEndPeriod(1);  // revert to normal scheduler frequency on lesser windows.
6831 	MUTEX_UNLOCK(&stream_.mutex);
6832 
6833 	if (FAILED(result)) error(RtAudioError::SYSTEM_ERROR);
6834 }
6835 
abortStream()6836 void RtApiDs ::abortStream()
6837 {
6838 	verifyStream();
6839 	if (stream_.state == STREAM_STOPPED)
6840 	{
6841 		errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6842 		error(RtAudioError::WARNING);
6843 		return;
6844 	}
6845 
6846 	DsHandle *handle = (DsHandle *)stream_.apiHandle;
6847 	handle->drainCounter = 2;
6848 
6849 	stopStream();
6850 }
6851 
callbackEvent()6852 void RtApiDs ::callbackEvent()
6853 {
6854 	if (stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING)
6855 	{
6856 		Sleep(50);  // sleep 50 milliseconds
6857 		return;
6858 	}
6859 
6860 	if (stream_.state == STREAM_CLOSED)
6861 	{
6862 		errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6863 		error(RtAudioError::WARNING);
6864 		return;
6865 	}
6866 
6867 	CallbackInfo *info = (CallbackInfo *)&stream_.callbackInfo;
6868 	DsHandle *handle = (DsHandle *)stream_.apiHandle;
6869 
6870 	// Check if we were draining the stream and signal is finished.
6871 	if (handle->drainCounter > stream_.nBuffers + 2)
6872 	{
6873 		stream_.state = STREAM_STOPPING;
6874 		if (handle->internalDrain == false)
6875 			SetEvent(handle->condition);
6876 		else
6877 			stopStream();
6878 		return;
6879 	}
6880 
6881 	// Invoke user callback to get fresh output data UNLESS we are
6882 	// draining stream.
6883 	if (handle->drainCounter == 0)
6884 	{
6885 		RtAudioCallback callback = (RtAudioCallback)info->callback;
6886 		double streamTime = getStreamTime();
6887 		RtAudioStreamStatus status = 0;
6888 		if (stream_.mode != INPUT && handle->xrun[0] == true)
6889 		{
6890 			status |= RTAUDIO_OUTPUT_UNDERFLOW;
6891 			handle->xrun[0] = false;
6892 		}
6893 		if (stream_.mode != OUTPUT && handle->xrun[1] == true)
6894 		{
6895 			status |= RTAUDIO_INPUT_OVERFLOW;
6896 			handle->xrun[1] = false;
6897 		}
6898 		int cbReturnValue = callback(stream_.userBuffer[0], stream_.userBuffer[1],
6899 									 stream_.bufferSize, streamTime, status, info->userData);
6900 		if (cbReturnValue == 2)
6901 		{
6902 			stream_.state = STREAM_STOPPING;
6903 			handle->drainCounter = 2;
6904 			abortStream();
6905 			return;
6906 		}
6907 		else if (cbReturnValue == 1)
6908 		{
6909 			handle->drainCounter = 1;
6910 			handle->internalDrain = true;
6911 		}
6912 	}
6913 
6914 	HRESULT result;
6915 	DWORD currentWritePointer, safeWritePointer;
6916 	DWORD currentReadPointer, safeReadPointer;
6917 	UINT nextWritePointer;
6918 
6919 	LPVOID buffer1 = NULL;
6920 	LPVOID buffer2 = NULL;
6921 	DWORD bufferSize1 = 0;
6922 	DWORD bufferSize2 = 0;
6923 
6924 	char *buffer;
6925 	long bufferBytes;
6926 
6927 	MUTEX_LOCK(&stream_.mutex);
6928 	if (stream_.state == STREAM_STOPPED)
6929 	{
6930 		MUTEX_UNLOCK(&stream_.mutex);
6931 		return;
6932 	}
6933 
6934 	if (buffersRolling == false)
6935 	{
6936 		if (stream_.mode == DUPLEX)
6937 		{
6938 			//assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6939 
6940 			// It takes a while for the devices to get rolling. As a result,
6941 			// there's no guarantee that the capture and write device pointers
6942 			// will move in lockstep.  Wait here for both devices to start
6943 			// rolling, and then set our buffer pointers accordingly.
6944 			// e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6945 			// bytes later than the write buffer.
6946 
6947 			// Stub: a serious risk of having a pre-emptive scheduling round
6948 			// take place between the two GetCurrentPosition calls... but I'm
6949 			// really not sure how to solve the problem.  Temporarily boost to
6950 			// Realtime priority, maybe; but I'm not sure what priority the
6951 			// DirectSound service threads run at. We *should* be roughly
6952 			// within a ms or so of correct.
6953 
6954 			LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
6955 			LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
6956 
6957 			DWORD startSafeWritePointer, startSafeReadPointer;
6958 
6959 			result = dsWriteBuffer->GetCurrentPosition(NULL, &startSafeWritePointer);
6960 			if (FAILED(result))
6961 			{
6962 				errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current write position!";
6963 				errorText_ = errorStream_.str();
6964 				MUTEX_UNLOCK(&stream_.mutex);
6965 				error(RtAudioError::SYSTEM_ERROR);
6966 				return;
6967 			}
6968 			result = dsCaptureBuffer->GetCurrentPosition(NULL, &startSafeReadPointer);
6969 			if (FAILED(result))
6970 			{
6971 				errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current read position!";
6972 				errorText_ = errorStream_.str();
6973 				MUTEX_UNLOCK(&stream_.mutex);
6974 				error(RtAudioError::SYSTEM_ERROR);
6975 				return;
6976 			}
6977 			while (true)
6978 			{
6979 				result = dsWriteBuffer->GetCurrentPosition(NULL, &safeWritePointer);
6980 				if (FAILED(result))
6981 				{
6982 					errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current write position!";
6983 					errorText_ = errorStream_.str();
6984 					MUTEX_UNLOCK(&stream_.mutex);
6985 					error(RtAudioError::SYSTEM_ERROR);
6986 					return;
6987 				}
6988 				result = dsCaptureBuffer->GetCurrentPosition(NULL, &safeReadPointer);
6989 				if (FAILED(result))
6990 				{
6991 					errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current read position!";
6992 					errorText_ = errorStream_.str();
6993 					MUTEX_UNLOCK(&stream_.mutex);
6994 					error(RtAudioError::SYSTEM_ERROR);
6995 					return;
6996 				}
6997 				if (safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer) break;
6998 				Sleep(1);
6999 			}
7000 
7001 			//assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
7002 
7003 			handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
7004 			if (handle->bufferPointer[0] >= handle->dsBufferSize[0]) handle->bufferPointer[0] -= handle->dsBufferSize[0];
7005 			handle->bufferPointer[1] = safeReadPointer;
7006 		}
7007 		else if (stream_.mode == OUTPUT)
7008 		{
7009 			// Set the proper nextWritePosition after initial startup.
7010 			LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
7011 			result = dsWriteBuffer->GetCurrentPosition(&currentWritePointer, &safeWritePointer);
7012 			if (FAILED(result))
7013 			{
7014 				errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current write position!";
7015 				errorText_ = errorStream_.str();
7016 				MUTEX_UNLOCK(&stream_.mutex);
7017 				error(RtAudioError::SYSTEM_ERROR);
7018 				return;
7019 			}
7020 			handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
7021 			if (handle->bufferPointer[0] >= handle->dsBufferSize[0]) handle->bufferPointer[0] -= handle->dsBufferSize[0];
7022 		}
7023 
7024 		buffersRolling = true;
7025 	}
7026 
7027 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
7028 	{
7029 		LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER)handle->buffer[0];
7030 
7031 		if (handle->drainCounter > 1)
7032 		{  // write zeros to the output stream
7033 			bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
7034 			bufferBytes *= formatBytes(stream_.userFormat);
7035 			memset(stream_.userBuffer[0], 0, bufferBytes);
7036 		}
7037 
7038 		// Setup parameters and do buffer conversion if necessary.
7039 		if (stream_.doConvertBuffer[0])
7040 		{
7041 			buffer = stream_.deviceBuffer;
7042 			convertBuffer(buffer, stream_.userBuffer[0], stream_.convertInfo[0]);
7043 			bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
7044 			bufferBytes *= formatBytes(stream_.deviceFormat[0]);
7045 		}
7046 		else
7047 		{
7048 			buffer = stream_.userBuffer[0];
7049 			bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
7050 			bufferBytes *= formatBytes(stream_.userFormat);
7051 		}
7052 
7053 		// No byte swapping necessary in DirectSound implementation.
7054 
7055 		// Ahhh ... windoze.  16-bit data is signed but 8-bit data is
7056 		// unsigned.  So, we need to convert our signed 8-bit data here to
7057 		// unsigned.
7058 		if (stream_.deviceFormat[0] == RTAUDIO_SINT8)
7059 			for (int i = 0; i < bufferBytes; i++) buffer[i] = (unsigned char)(buffer[i] + 128);
7060 
7061 		DWORD dsBufferSize = handle->dsBufferSize[0];
7062 		nextWritePointer = handle->bufferPointer[0];
7063 
7064 		DWORD endWrite, leadPointer;
7065 		while (true)
7066 		{
7067 			// Find out where the read and "safe write" pointers are.
7068 			result = dsBuffer->GetCurrentPosition(&currentWritePointer, &safeWritePointer);
7069 			if (FAILED(result))
7070 			{
7071 				errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current write position!";
7072 				errorText_ = errorStream_.str();
7073 				MUTEX_UNLOCK(&stream_.mutex);
7074 				error(RtAudioError::SYSTEM_ERROR);
7075 				return;
7076 			}
7077 
7078 			// We will copy our output buffer into the region between
7079 			// safeWritePointer and leadPointer.  If leadPointer is not
7080 			// beyond the next endWrite position, wait until it is.
7081 			leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
7082 			//std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
7083 			if (leadPointer > dsBufferSize) leadPointer -= dsBufferSize;
7084 			if (leadPointer < nextWritePointer) leadPointer += dsBufferSize;  // unwrap offset
7085 			endWrite = nextWritePointer + bufferBytes;
7086 
7087 			// Check whether the entire write region is behind the play pointer.
7088 			if (leadPointer >= endWrite) break;
7089 
7090 			// If we are here, then we must wait until the leadPointer advances
7091 			// beyond the end of our next write region. We use the
7092 			// Sleep() function to suspend operation until that happens.
7093 			double millis = (endWrite - leadPointer) * 1000.0;
7094 			millis /= (formatBytes(stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
7095 			if (millis < 1.0) millis = 1.0;
7096 			Sleep((DWORD)millis);
7097 		}
7098 
7099 		if (dsPointerBetween(nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize) || dsPointerBetween(endWrite, safeWritePointer, currentWritePointer, dsBufferSize))
7100 		{
7101 			// We've strayed into the forbidden zone ... resync the read pointer.
7102 			handle->xrun[0] = true;
7103 			nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
7104 			if (nextWritePointer >= dsBufferSize) nextWritePointer -= dsBufferSize;
7105 			handle->bufferPointer[0] = nextWritePointer;
7106 			endWrite = nextWritePointer + bufferBytes;
7107 		}
7108 
7109 		// Lock free space in the buffer
7110 		result = dsBuffer->Lock(nextWritePointer, bufferBytes, &buffer1,
7111 								&bufferSize1, &buffer2, &bufferSize2, 0);
7112 		if (FAILED(result))
7113 		{
7114 			errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") locking buffer during playback!";
7115 			errorText_ = errorStream_.str();
7116 			MUTEX_UNLOCK(&stream_.mutex);
7117 			error(RtAudioError::SYSTEM_ERROR);
7118 			return;
7119 		}
7120 
7121 		// Copy our buffer into the DS buffer
7122 		CopyMemory(buffer1, buffer, bufferSize1);
7123 		if (buffer2 != NULL) CopyMemory(buffer2, buffer + bufferSize1, bufferSize2);
7124 
7125 		// Update our buffer offset and unlock sound buffer
7126 		dsBuffer->Unlock(buffer1, bufferSize1, buffer2, bufferSize2);
7127 		if (FAILED(result))
7128 		{
7129 			errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") unlocking buffer during playback!";
7130 			errorText_ = errorStream_.str();
7131 			MUTEX_UNLOCK(&stream_.mutex);
7132 			error(RtAudioError::SYSTEM_ERROR);
7133 			return;
7134 		}
7135 		nextWritePointer = (nextWritePointer + bufferSize1 + bufferSize2) % dsBufferSize;
7136 		handle->bufferPointer[0] = nextWritePointer;
7137 	}
7138 
7139 	// Don't bother draining input
7140 	if (handle->drainCounter)
7141 	{
7142 		handle->drainCounter++;
7143 		goto unlock;
7144 	}
7145 
7146 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
7147 	{
7148 		// Setup parameters.
7149 		if (stream_.doConvertBuffer[1])
7150 		{
7151 			buffer = stream_.deviceBuffer;
7152 			bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
7153 			bufferBytes *= formatBytes(stream_.deviceFormat[1]);
7154 		}
7155 		else
7156 		{
7157 			buffer = stream_.userBuffer[1];
7158 			bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
7159 			bufferBytes *= formatBytes(stream_.userFormat);
7160 		}
7161 
7162 		LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER)handle->buffer[1];
7163 		long nextReadPointer = handle->bufferPointer[1];
7164 		DWORD dsBufferSize = handle->dsBufferSize[1];
7165 
7166 		// Find out where the write and "safe read" pointers are.
7167 		result = dsBuffer->GetCurrentPosition(&currentReadPointer, &safeReadPointer);
7168 		if (FAILED(result))
7169 		{
7170 			errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current read position!";
7171 			errorText_ = errorStream_.str();
7172 			MUTEX_UNLOCK(&stream_.mutex);
7173 			error(RtAudioError::SYSTEM_ERROR);
7174 			return;
7175 		}
7176 
7177 		if (safeReadPointer < (DWORD)nextReadPointer) safeReadPointer += dsBufferSize;  // unwrap offset
7178 		DWORD endRead = nextReadPointer + bufferBytes;
7179 
7180 		// Handling depends on whether we are INPUT or DUPLEX.
7181 		// If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
7182 		// then a wait here will drag the write pointers into the forbidden zone.
7183 		//
7184 		// In DUPLEX mode, rather than wait, we will back off the read pointer until
7185 		// it's in a safe position. This causes dropouts, but it seems to be the only
7186 		// practical way to sync up the read and write pointers reliably, given the
7187 		// the very complex relationship between phase and increment of the read and write
7188 		// pointers.
7189 		//
7190 		// In order to minimize audible dropouts in DUPLEX mode, we will
7191 		// provide a pre-roll period of 0.5 seconds in which we return
7192 		// zeros from the read buffer while the pointers sync up.
7193 
7194 		if (stream_.mode == DUPLEX)
7195 		{
7196 			if (safeReadPointer < endRead)
7197 			{
7198 				if (duplexPrerollBytes <= 0)
7199 				{
7200 					// Pre-roll time over. Be more agressive.
7201 					int adjustment = endRead - safeReadPointer;
7202 
7203 					handle->xrun[1] = true;
7204 					// Two cases:
7205 					//   - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
7206 					//     and perform fine adjustments later.
7207 					//   - small adjustments: back off by twice as much.
7208 					if (adjustment >= 2 * bufferBytes)
7209 						nextReadPointer = safeReadPointer - 2 * bufferBytes;
7210 					else
7211 						nextReadPointer = safeReadPointer - bufferBytes - adjustment;
7212 
7213 					if (nextReadPointer < 0) nextReadPointer += dsBufferSize;
7214 				}
7215 				else
7216 				{
7217 					// In pre=roll time. Just do it.
7218 					nextReadPointer = safeReadPointer - bufferBytes;
7219 					while (nextReadPointer < 0) nextReadPointer += dsBufferSize;
7220 				}
7221 				endRead = nextReadPointer + bufferBytes;
7222 			}
7223 		}
7224 		else
7225 		{  // mode == INPUT
7226 			while (safeReadPointer < endRead && stream_.callbackInfo.isRunning)
7227 			{
7228 				// See comments for playback.
7229 				double millis = (endRead - safeReadPointer) * 1000.0;
7230 				millis /= (formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
7231 				if (millis < 1.0) millis = 1.0;
7232 				Sleep((DWORD)millis);
7233 
7234 				// Wake up and find out where we are now.
7235 				result = dsBuffer->GetCurrentPosition(&currentReadPointer, &safeReadPointer);
7236 				if (FAILED(result))
7237 				{
7238 					errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") getting current read position!";
7239 					errorText_ = errorStream_.str();
7240 					MUTEX_UNLOCK(&stream_.mutex);
7241 					error(RtAudioError::SYSTEM_ERROR);
7242 					return;
7243 				}
7244 
7245 				if (safeReadPointer < (DWORD)nextReadPointer) safeReadPointer += dsBufferSize;  // unwrap offset
7246 			}
7247 		}
7248 
7249 		// Lock free space in the buffer
7250 		result = dsBuffer->Lock(nextReadPointer, bufferBytes, &buffer1,
7251 								&bufferSize1, &buffer2, &bufferSize2, 0);
7252 		if (FAILED(result))
7253 		{
7254 			errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") locking capture buffer!";
7255 			errorText_ = errorStream_.str();
7256 			MUTEX_UNLOCK(&stream_.mutex);
7257 			error(RtAudioError::SYSTEM_ERROR);
7258 			return;
7259 		}
7260 
7261 		if (duplexPrerollBytes <= 0)
7262 		{
7263 			// Copy our buffer into the DS buffer
7264 			CopyMemory(buffer, buffer1, bufferSize1);
7265 			if (buffer2 != NULL) CopyMemory(buffer + bufferSize1, buffer2, bufferSize2);
7266 		}
7267 		else
7268 		{
7269 			memset(buffer, 0, bufferSize1);
7270 			if (buffer2 != NULL) memset(buffer + bufferSize1, 0, bufferSize2);
7271 			duplexPrerollBytes -= bufferSize1 + bufferSize2;
7272 		}
7273 
7274 		// Update our buffer offset and unlock sound buffer
7275 		nextReadPointer = (nextReadPointer + bufferSize1 + bufferSize2) % dsBufferSize;
7276 		dsBuffer->Unlock(buffer1, bufferSize1, buffer2, bufferSize2);
7277 		if (FAILED(result))
7278 		{
7279 			errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString(result) << ") unlocking capture buffer!";
7280 			errorText_ = errorStream_.str();
7281 			MUTEX_UNLOCK(&stream_.mutex);
7282 			error(RtAudioError::SYSTEM_ERROR);
7283 			return;
7284 		}
7285 		handle->bufferPointer[1] = nextReadPointer;
7286 
7287 		// No byte swapping necessary in DirectSound implementation.
7288 
7289 		// If necessary, convert 8-bit data from unsigned to signed.
7290 		if (stream_.deviceFormat[1] == RTAUDIO_SINT8)
7291 			for (int j = 0; j < bufferBytes; j++) buffer[j] = (signed char)(buffer[j] - 128);
7292 
7293 		// Do buffer conversion if necessary.
7294 		if (stream_.doConvertBuffer[1])
7295 			convertBuffer(stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1]);
7296 	}
7297 
7298 unlock:
7299 	MUTEX_UNLOCK(&stream_.mutex);
7300 	RtApi::tickStreamTime();
7301 }
7302 
7303 // Definitions for utility functions and callbacks
7304 // specific to the DirectSound implementation.
7305 
callbackHandler(void * ptr)7306 static unsigned __stdcall callbackHandler(void *ptr)
7307 {
7308 	CallbackInfo *info = (CallbackInfo *)ptr;
7309 	RtApiDs *object = (RtApiDs *)info->object;
7310 	bool *isRunning = &info->isRunning;
7311 
7312 	while (*isRunning == true)
7313 	{
7314 		object->callbackEvent();
7315 	}
7316 
7317 	_endthreadex(0);
7318 	return 0;
7319 }
7320 
deviceQueryCallback(LPGUID lpguid,LPCTSTR description,LPCTSTR,LPVOID lpContext)7321 static BOOL CALLBACK deviceQueryCallback(LPGUID lpguid,
7322 										 LPCTSTR description,
7323 										 LPCTSTR /*module*/,
7324 										 LPVOID lpContext)
7325 {
7326 	struct DsProbeData &probeInfo = *(struct DsProbeData *)lpContext;
7327 	std::vector<struct DsDevice> &dsDevices = *probeInfo.dsDevices;
7328 
7329 	HRESULT hr;
7330 	bool validDevice = false;
7331 	if (probeInfo.isInput == true)
7332 	{
7333 		DSCCAPS caps;
7334 		LPDIRECTSOUNDCAPTURE object;
7335 
7336 		hr = DirectSoundCaptureCreate(lpguid, &object, NULL);
7337 		if (hr != DS_OK) return TRUE;
7338 
7339 		caps.dwSize = sizeof(caps);
7340 		hr = object->GetCaps(&caps);
7341 		if (hr == DS_OK)
7342 		{
7343 			if (caps.dwChannels > 0 && caps.dwFormats > 0)
7344 				validDevice = true;
7345 		}
7346 		object->Release();
7347 	}
7348 	else
7349 	{
7350 		DSCAPS caps;
7351 		LPDIRECTSOUND object;
7352 		hr = DirectSoundCreate(lpguid, &object, NULL);
7353 		if (hr != DS_OK) return TRUE;
7354 
7355 		caps.dwSize = sizeof(caps);
7356 		hr = object->GetCaps(&caps);
7357 		if (hr == DS_OK)
7358 		{
7359 			if (caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO)
7360 				validDevice = true;
7361 		}
7362 		object->Release();
7363 	}
7364 
7365 	// If good device, then save its name and guid.
7366 	std::string name = convertCharPointerToStdString(description);
7367 	//if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7368 	if (lpguid == NULL)
7369 		name = "Default Device";
7370 	if (validDevice)
7371 	{
7372 		for (unsigned int i = 0; i < dsDevices.size(); i++)
7373 		{
7374 			if (dsDevices[i].name == name)
7375 			{
7376 				dsDevices[i].found = true;
7377 				if (probeInfo.isInput)
7378 				{
7379 					dsDevices[i].id[1] = lpguid;
7380 					dsDevices[i].validId[1] = true;
7381 				}
7382 				else
7383 				{
7384 					dsDevices[i].id[0] = lpguid;
7385 					dsDevices[i].validId[0] = true;
7386 				}
7387 				return TRUE;
7388 			}
7389 		}
7390 
7391 		DsDevice device;
7392 		device.name = name;
7393 		device.found = true;
7394 		if (probeInfo.isInput)
7395 		{
7396 			device.id[1] = lpguid;
7397 			device.validId[1] = true;
7398 		}
7399 		else
7400 		{
7401 			device.id[0] = lpguid;
7402 			device.validId[0] = true;
7403 		}
7404 		dsDevices.push_back(device);
7405 	}
7406 
7407 	return TRUE;
7408 }
7409 
getErrorString(int code)7410 static const char *getErrorString(int code)
7411 {
7412 	switch (code)
7413 	{
7414 		case DSERR_ALLOCATED:
7415 			return "Already allocated";
7416 
7417 		case DSERR_CONTROLUNAVAIL:
7418 			return "Control unavailable";
7419 
7420 		case DSERR_INVALIDPARAM:
7421 			return "Invalid parameter";
7422 
7423 		case DSERR_INVALIDCALL:
7424 			return "Invalid call";
7425 
7426 		case DSERR_GENERIC:
7427 			return "Generic error";
7428 
7429 		case DSERR_PRIOLEVELNEEDED:
7430 			return "Priority level needed";
7431 
7432 		case DSERR_OUTOFMEMORY:
7433 			return "Out of memory";
7434 
7435 		case DSERR_BADFORMAT:
7436 			return "The sample rate or the channel format is not supported";
7437 
7438 		case DSERR_UNSUPPORTED:
7439 			return "Not supported";
7440 
7441 		case DSERR_NODRIVER:
7442 			return "No driver";
7443 
7444 		case DSERR_ALREADYINITIALIZED:
7445 			return "Already initialized";
7446 
7447 		case DSERR_NOAGGREGATION:
7448 			return "No aggregation";
7449 
7450 		case DSERR_BUFFERLOST:
7451 			return "Buffer lost";
7452 
7453 		case DSERR_OTHERAPPHASPRIO:
7454 			return "Another application already has priority";
7455 
7456 		case DSERR_UNINITIALIZED:
7457 			return "Uninitialized";
7458 
7459 		default:
7460 			return "DirectSound unknown error";
7461 	}
7462 }
7463 //******************** End of __WINDOWS_DS__ *********************//
7464 #endif
7465 
7466 #if defined(__LINUX_ALSA__)
7467 
7468 #include <alsa/asoundlib.h>
7469 #include <unistd.h>
7470 
7471 // A structure to hold various information related to the ALSA API
7472 // implementation.
7473 struct AlsaHandle
7474 {
7475 	snd_pcm_t *handles[2];
7476 	bool synchronized;
7477 	bool xrun[2];
7478 	pthread_cond_t runnable_cv;
7479 	bool runnable;
7480 
AlsaHandleAlsaHandle7481 	AlsaHandle()
7482 		: synchronized(false), runnable(false)
7483 	{
7484 		xrun[0] = false;
7485 		xrun[1] = false;
7486 	}
7487 };
7488 
7489 static void *alsaCallbackHandler(void *ptr);
7490 
RtApiAlsa()7491 RtApiAlsa ::RtApiAlsa()
7492 {
7493 	// Nothing to do here.
7494 }
7495 
~RtApiAlsa()7496 RtApiAlsa ::~RtApiAlsa()
7497 {
7498 	if (stream_.state != STREAM_CLOSED) closeStream();
7499 }
7500 
getDeviceCount(void)7501 unsigned int RtApiAlsa ::getDeviceCount(void)
7502 {
7503 	unsigned nDevices = 0;
7504 	int result, subdevice, card;
7505 	char name[64];
7506 	snd_ctl_t *handle;
7507 
7508 	// Count cards and devices
7509 	card = -1;
7510 	snd_card_next(&card);
7511 	while (card >= 0)
7512 	{
7513 		sprintf(name, "hw:%d", card);
7514 		result = snd_ctl_open(&handle, name, 0);
7515 		if (result < 0)
7516 		{
7517 			errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror(result) << ".";
7518 			errorText_ = errorStream_.str();
7519 			error(RtAudioError::WARNING);
7520 			goto nextcard;
7521 		}
7522 		subdevice = -1;
7523 		while (1)
7524 		{
7525 			result = snd_ctl_pcm_next_device(handle, &subdevice);
7526 			if (result < 0)
7527 			{
7528 				errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror(result) << ".";
7529 				errorText_ = errorStream_.str();
7530 				error(RtAudioError::WARNING);
7531 				break;
7532 			}
7533 			if (subdevice < 0)
7534 				break;
7535 			nDevices++;
7536 		}
7537 	nextcard:
7538 		snd_ctl_close(handle);
7539 		snd_card_next(&card);
7540 	}
7541 
7542 	result = snd_ctl_open(&handle, "default", 0);
7543 	if (result == 0)
7544 	{
7545 		nDevices++;
7546 		snd_ctl_close(handle);
7547 	}
7548 
7549 	return nDevices;
7550 }
7551 
getDeviceInfo(unsigned int device)7552 RtAudio::DeviceInfo RtApiAlsa ::getDeviceInfo(unsigned int device)
7553 {
7554 	RtAudio::DeviceInfo info;
7555 	info.probed = false;
7556 
7557 	unsigned nDevices = 0;
7558 	int result, subdevice, card;
7559 	char name[64];
7560 	snd_ctl_t *chandle;
7561 
7562 	// Count cards and devices
7563 	card = -1;
7564 	subdevice = -1;
7565 	snd_card_next(&card);
7566 	while (card >= 0)
7567 	{
7568 		sprintf(name, "hw:%d", card);
7569 		result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
7570 		if (result < 0)
7571 		{
7572 			errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror(result) << ".";
7573 			errorText_ = errorStream_.str();
7574 			error(RtAudioError::WARNING);
7575 			goto nextcard;
7576 		}
7577 		subdevice = -1;
7578 		while (1)
7579 		{
7580 			result = snd_ctl_pcm_next_device(chandle, &subdevice);
7581 			if (result < 0)
7582 			{
7583 				errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror(result) << ".";
7584 				errorText_ = errorStream_.str();
7585 				error(RtAudioError::WARNING);
7586 				break;
7587 			}
7588 			if (subdevice < 0) break;
7589 			if (nDevices == device)
7590 			{
7591 				sprintf(name, "hw:%d,%d", card, subdevice);
7592 				goto foundDevice;
7593 			}
7594 			nDevices++;
7595 		}
7596 	nextcard:
7597 		snd_ctl_close(chandle);
7598 		snd_card_next(&card);
7599 	}
7600 
7601 	result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK);
7602 	if (result == 0)
7603 	{
7604 		if (nDevices == device)
7605 		{
7606 			strcpy(name, "default");
7607 			goto foundDevice;
7608 		}
7609 		nDevices++;
7610 	}
7611 
7612 	if (nDevices == 0)
7613 	{
7614 		errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7615 		error(RtAudioError::INVALID_USE);
7616 		return info;
7617 	}
7618 
7619 	if (device >= nDevices)
7620 	{
7621 		errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7622 		error(RtAudioError::INVALID_USE);
7623 		return info;
7624 	}
7625 
7626 foundDevice:
7627 
7628 	// If a stream is already open, we cannot probe the stream devices.
7629 	// Thus, use the saved results.
7630 	if (stream_.state != STREAM_CLOSED &&
7631 		(stream_.device[0] == device || stream_.device[1] == device))
7632 	{
7633 		snd_ctl_close(chandle);
7634 		if (device >= devices_.size())
7635 		{
7636 			errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7637 			error(RtAudioError::WARNING);
7638 			return info;
7639 		}
7640 		return devices_[device];
7641 	}
7642 
7643 	int openMode = SND_PCM_ASYNC;
7644 	snd_pcm_stream_t stream;
7645 	snd_pcm_info_t *pcminfo;
7646 	snd_pcm_info_alloca(&pcminfo);
7647 	snd_pcm_t *phandle;
7648 	snd_pcm_hw_params_t *params;
7649 	snd_pcm_hw_params_alloca(&params);
7650 
7651 	// First try for playback unless default device (which has subdev -1)
7652 	stream = SND_PCM_STREAM_PLAYBACK;
7653 	snd_pcm_info_set_stream(pcminfo, stream);
7654 	if (subdevice != -1)
7655 	{
7656 		snd_pcm_info_set_device(pcminfo, subdevice);
7657 		snd_pcm_info_set_subdevice(pcminfo, 0);
7658 
7659 		result = snd_ctl_pcm_info(chandle, pcminfo);
7660 		if (result < 0)
7661 		{
7662 			// Device probably doesn't support playback.
7663 			goto captureProbe;
7664 		}
7665 	}
7666 
7667 	result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7668 	if (result < 0)
7669 	{
7670 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".";
7671 		errorText_ = errorStream_.str();
7672 		error(RtAudioError::WARNING);
7673 		goto captureProbe;
7674 	}
7675 
7676 	// The device is open ... fill the parameter structure.
7677 	result = snd_pcm_hw_params_any(phandle, params);
7678 	if (result < 0)
7679 	{
7680 		snd_pcm_close(phandle);
7681 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".";
7682 		errorText_ = errorStream_.str();
7683 		error(RtAudioError::WARNING);
7684 		goto captureProbe;
7685 	}
7686 
7687 	// Get output channel information.
7688 	unsigned int value;
7689 	result = snd_pcm_hw_params_get_channels_max(params, &value);
7690 	if (result < 0)
7691 	{
7692 		snd_pcm_close(phandle);
7693 		errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror(result) << ".";
7694 		errorText_ = errorStream_.str();
7695 		error(RtAudioError::WARNING);
7696 		goto captureProbe;
7697 	}
7698 	info.outputChannels = value;
7699 	snd_pcm_close(phandle);
7700 
7701 captureProbe:
7702 	stream = SND_PCM_STREAM_CAPTURE;
7703 	snd_pcm_info_set_stream(pcminfo, stream);
7704 
7705 	// Now try for capture unless default device (with subdev = -1)
7706 	if (subdevice != -1)
7707 	{
7708 		result = snd_ctl_pcm_info(chandle, pcminfo);
7709 		snd_ctl_close(chandle);
7710 		if (result < 0)
7711 		{
7712 			// Device probably doesn't support capture.
7713 			if (info.outputChannels == 0) return info;
7714 			goto probeParameters;
7715 		}
7716 	}
7717 	else
7718 		snd_ctl_close(chandle);
7719 
7720 	result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7721 	if (result < 0)
7722 	{
7723 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".";
7724 		errorText_ = errorStream_.str();
7725 		error(RtAudioError::WARNING);
7726 		if (info.outputChannels == 0) return info;
7727 		goto probeParameters;
7728 	}
7729 
7730 	// The device is open ... fill the parameter structure.
7731 	result = snd_pcm_hw_params_any(phandle, params);
7732 	if (result < 0)
7733 	{
7734 		snd_pcm_close(phandle);
7735 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".";
7736 		errorText_ = errorStream_.str();
7737 		error(RtAudioError::WARNING);
7738 		if (info.outputChannels == 0) return info;
7739 		goto probeParameters;
7740 	}
7741 
7742 	result = snd_pcm_hw_params_get_channels_max(params, &value);
7743 	if (result < 0)
7744 	{
7745 		snd_pcm_close(phandle);
7746 		errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror(result) << ".";
7747 		errorText_ = errorStream_.str();
7748 		error(RtAudioError::WARNING);
7749 		if (info.outputChannels == 0) return info;
7750 		goto probeParameters;
7751 	}
7752 	info.inputChannels = value;
7753 	snd_pcm_close(phandle);
7754 
7755 	// If device opens for both playback and capture, we determine the channels.
7756 	if (info.outputChannels > 0 && info.inputChannels > 0)
7757 		info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7758 
7759 	// ALSA doesn't provide default devices so we'll use the first available one.
7760 	if (device == 0 && info.outputChannels > 0)
7761 		info.isDefaultOutput = true;
7762 	if (device == 0 && info.inputChannels > 0)
7763 		info.isDefaultInput = true;
7764 
7765 probeParameters:
7766 	// At this point, we just need to figure out the supported data
7767 	// formats and sample rates.  We'll proceed by opening the device in
7768 	// the direction with the maximum number of channels, or playback if
7769 	// they are equal.  This might limit our sample rate options, but so
7770 	// be it.
7771 
7772 	if (info.outputChannels >= info.inputChannels)
7773 		stream = SND_PCM_STREAM_PLAYBACK;
7774 	else
7775 		stream = SND_PCM_STREAM_CAPTURE;
7776 	snd_pcm_info_set_stream(pcminfo, stream);
7777 
7778 	result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7779 	if (result < 0)
7780 	{
7781 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".";
7782 		errorText_ = errorStream_.str();
7783 		error(RtAudioError::WARNING);
7784 		return info;
7785 	}
7786 
7787 	// The device is open ... fill the parameter structure.
7788 	result = snd_pcm_hw_params_any(phandle, params);
7789 	if (result < 0)
7790 	{
7791 		snd_pcm_close(phandle);
7792 		errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".";
7793 		errorText_ = errorStream_.str();
7794 		error(RtAudioError::WARNING);
7795 		return info;
7796 	}
7797 
7798 	// Test our discrete set of sample rate values.
7799 	info.sampleRates.clear();
7800 	for (unsigned int i = 0; i < MAX_SAMPLE_RATES; i++)
7801 	{
7802 		if (snd_pcm_hw_params_test_rate(phandle, params, SAMPLE_RATES[i], 0) == 0)
7803 		{
7804 			info.sampleRates.push_back(SAMPLE_RATES[i]);
7805 
7806 			if (!info.preferredSampleRate || (SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate))
7807 				info.preferredSampleRate = SAMPLE_RATES[i];
7808 		}
7809 	}
7810 	if (info.sampleRates.size() == 0)
7811 	{
7812 		snd_pcm_close(phandle);
7813 		errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7814 		errorText_ = errorStream_.str();
7815 		error(RtAudioError::WARNING);
7816 		return info;
7817 	}
7818 
7819 	// Probe the supported data formats ... we don't care about endian-ness just yet
7820 	snd_pcm_format_t format;
7821 	info.nativeFormats = 0;
7822 	format = SND_PCM_FORMAT_S8;
7823 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7824 		info.nativeFormats |= RTAUDIO_SINT8;
7825 	format = SND_PCM_FORMAT_S16;
7826 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7827 		info.nativeFormats |= RTAUDIO_SINT16;
7828 	format = SND_PCM_FORMAT_S24;
7829 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7830 		info.nativeFormats |= RTAUDIO_SINT24;
7831 	format = SND_PCM_FORMAT_S32;
7832 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7833 		info.nativeFormats |= RTAUDIO_SINT32;
7834 	format = SND_PCM_FORMAT_FLOAT;
7835 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7836 		info.nativeFormats |= RTAUDIO_FLOAT32;
7837 	format = SND_PCM_FORMAT_FLOAT64;
7838 	if (snd_pcm_hw_params_test_format(phandle, params, format) == 0)
7839 		info.nativeFormats |= RTAUDIO_FLOAT64;
7840 
7841 	// Check that we have at least one supported format
7842 	if (info.nativeFormats == 0)
7843 	{
7844 		snd_pcm_close(phandle);
7845 		errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7846 		errorText_ = errorStream_.str();
7847 		error(RtAudioError::WARNING);
7848 		return info;
7849 	}
7850 
7851 	// Get the device name
7852 	char *cardname;
7853 	result = snd_card_get_name(card, &cardname);
7854 	if (result >= 0)
7855 	{
7856 		sprintf(name, "hw:%s,%d", cardname, subdevice);
7857 		free(cardname);
7858 	}
7859 	info.name = name;
7860 
7861 	// That's all ... close the device and return
7862 	snd_pcm_close(phandle);
7863 	info.probed = true;
7864 	return info;
7865 }
7866 
saveDeviceInfo(void)7867 void RtApiAlsa ::saveDeviceInfo(void)
7868 {
7869 	devices_.clear();
7870 
7871 	unsigned int nDevices = getDeviceCount();
7872 	devices_.resize(nDevices);
7873 	for (unsigned int i = 0; i < nDevices; i++)
7874 		devices_[i] = getDeviceInfo(i);
7875 }
7876 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)7877 bool RtApiAlsa ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
7878 								 unsigned int firstChannel, unsigned int sampleRate,
7879 								 RtAudioFormat format, unsigned int *bufferSize,
7880 								 RtAudio::StreamOptions *options)
7881 
7882 {
7883 #if defined(__RTAUDIO_DEBUG__)
7884 	snd_output_t *out;
7885 	snd_output_stdio_attach(&out, stderr, 0);
7886 #endif
7887 
7888 	// I'm not using the "plug" interface ... too much inconsistent behavior.
7889 
7890 	unsigned nDevices = 0;
7891 	int result, subdevice, card;
7892 	char name[64];
7893 	snd_ctl_t *chandle;
7894 
7895 	if (options && options->flags & RTAUDIO_ALSA_USE_DEFAULT)
7896 		snprintf(name, sizeof(name), "%s", "default");
7897 	else
7898 	{
7899 		// Count cards and devices
7900 		card = -1;
7901 		snd_card_next(&card);
7902 		while (card >= 0)
7903 		{
7904 			sprintf(name, "hw:%d", card);
7905 			result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
7906 			if (result < 0)
7907 			{
7908 				errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror(result) << ".";
7909 				errorText_ = errorStream_.str();
7910 				return FAILURE;
7911 			}
7912 			subdevice = -1;
7913 			while (1)
7914 			{
7915 				result = snd_ctl_pcm_next_device(chandle, &subdevice);
7916 				if (result < 0) break;
7917 				if (subdevice < 0) break;
7918 				if (nDevices == device)
7919 				{
7920 					sprintf(name, "hw:%d,%d", card, subdevice);
7921 					snd_ctl_close(chandle);
7922 					goto foundDevice;
7923 				}
7924 				nDevices++;
7925 			}
7926 			snd_ctl_close(chandle);
7927 			snd_card_next(&card);
7928 		}
7929 
7930 		result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK);
7931 		if (result == 0)
7932 		{
7933 			if (nDevices == device)
7934 			{
7935 				strcpy(name, "default");
7936 				goto foundDevice;
7937 			}
7938 			nDevices++;
7939 		}
7940 
7941 		if (nDevices == 0)
7942 		{
7943 			// This should not happen because a check is made before this function is called.
7944 			errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7945 			return FAILURE;
7946 		}
7947 
7948 		if (device >= nDevices)
7949 		{
7950 			// This should not happen because a check is made before this function is called.
7951 			errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7952 			return FAILURE;
7953 		}
7954 	}
7955 
7956 foundDevice:
7957 
7958 	// The getDeviceInfo() function will not work for a device that is
7959 	// already open.  Thus, we'll probe the system before opening a
7960 	// stream and save the results for use by getDeviceInfo().
7961 	if (mode == OUTPUT || (mode == INPUT && stream_.mode != OUTPUT))  // only do once
7962 		this->saveDeviceInfo();
7963 
7964 	snd_pcm_stream_t stream;
7965 	if (mode == OUTPUT)
7966 		stream = SND_PCM_STREAM_PLAYBACK;
7967 	else
7968 		stream = SND_PCM_STREAM_CAPTURE;
7969 
7970 	snd_pcm_t *phandle;
7971 	int openMode = SND_PCM_ASYNC;
7972 	result = snd_pcm_open(&phandle, name, stream, openMode);
7973 	if (result < 0)
7974 	{
7975 		if (mode == OUTPUT)
7976 			errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7977 		else
7978 			errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7979 		errorText_ = errorStream_.str();
7980 		return FAILURE;
7981 	}
7982 
7983 	// Fill the parameter structure.
7984 	snd_pcm_hw_params_t *hw_params;
7985 	snd_pcm_hw_params_alloca(&hw_params);
7986 	result = snd_pcm_hw_params_any(phandle, hw_params);
7987 	if (result < 0)
7988 	{
7989 		snd_pcm_close(phandle);
7990 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror(result) << ".";
7991 		errorText_ = errorStream_.str();
7992 		return FAILURE;
7993 	}
7994 
7995 #if defined(__RTAUDIO_DEBUG__)
7996 	fprintf(stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n");
7997 	snd_pcm_hw_params_dump(hw_params, out);
7998 #endif
7999 
8000 	// Set access ... check user preference.
8001 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
8002 	{
8003 		stream_.userInterleaved = false;
8004 		result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED);
8005 		if (result < 0)
8006 		{
8007 			result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
8008 			stream_.deviceInterleaved[mode] = true;
8009 		}
8010 		else
8011 			stream_.deviceInterleaved[mode] = false;
8012 	}
8013 	else
8014 	{
8015 		stream_.userInterleaved = true;
8016 		result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
8017 		if (result < 0)
8018 		{
8019 			result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED);
8020 			stream_.deviceInterleaved[mode] = false;
8021 		}
8022 		else
8023 			stream_.deviceInterleaved[mode] = true;
8024 	}
8025 
8026 	if (result < 0)
8027 	{
8028 		snd_pcm_close(phandle);
8029 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror(result) << ".";
8030 		errorText_ = errorStream_.str();
8031 		return FAILURE;
8032 	}
8033 
8034 	// Determine how to set the device format.
8035 	stream_.userFormat = format;
8036 	snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
8037 
8038 	if (format == RTAUDIO_SINT8)
8039 		deviceFormat = SND_PCM_FORMAT_S8;
8040 	else if (format == RTAUDIO_SINT16)
8041 		deviceFormat = SND_PCM_FORMAT_S16;
8042 	else if (format == RTAUDIO_SINT24)
8043 		deviceFormat = SND_PCM_FORMAT_S24;
8044 	else if (format == RTAUDIO_SINT32)
8045 		deviceFormat = SND_PCM_FORMAT_S32;
8046 	else if (format == RTAUDIO_FLOAT32)
8047 		deviceFormat = SND_PCM_FORMAT_FLOAT;
8048 	else if (format == RTAUDIO_FLOAT64)
8049 		deviceFormat = SND_PCM_FORMAT_FLOAT64;
8050 
8051 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8052 	{
8053 		stream_.deviceFormat[mode] = format;
8054 		goto setFormat;
8055 	}
8056 
8057 	// The user requested format is not natively supported by the device.
8058 	deviceFormat = SND_PCM_FORMAT_FLOAT64;
8059 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8060 	{
8061 		stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
8062 		goto setFormat;
8063 	}
8064 
8065 	deviceFormat = SND_PCM_FORMAT_FLOAT;
8066 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8067 	{
8068 		stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8069 		goto setFormat;
8070 	}
8071 
8072 	deviceFormat = SND_PCM_FORMAT_S32;
8073 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8074 	{
8075 		stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8076 		goto setFormat;
8077 	}
8078 
8079 	deviceFormat = SND_PCM_FORMAT_S24;
8080 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8081 	{
8082 		stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8083 		goto setFormat;
8084 	}
8085 
8086 	deviceFormat = SND_PCM_FORMAT_S16;
8087 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8088 	{
8089 		stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8090 		goto setFormat;
8091 	}
8092 
8093 	deviceFormat = SND_PCM_FORMAT_S8;
8094 	if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0)
8095 	{
8096 		stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8097 		goto setFormat;
8098 	}
8099 
8100 	// If we get here, no supported format was found.
8101 	snd_pcm_close(phandle);
8102 	errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
8103 	errorText_ = errorStream_.str();
8104 	return FAILURE;
8105 
8106 setFormat:
8107 	result = snd_pcm_hw_params_set_format(phandle, hw_params, deviceFormat);
8108 	if (result < 0)
8109 	{
8110 		snd_pcm_close(phandle);
8111 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror(result) << ".";
8112 		errorText_ = errorStream_.str();
8113 		return FAILURE;
8114 	}
8115 
8116 	// Determine whether byte-swaping is necessary.
8117 	stream_.doByteSwap[mode] = false;
8118 	if (deviceFormat != SND_PCM_FORMAT_S8)
8119 	{
8120 		result = snd_pcm_format_cpu_endian(deviceFormat);
8121 		if (result == 0)
8122 			stream_.doByteSwap[mode] = true;
8123 		else if (result < 0)
8124 		{
8125 			snd_pcm_close(phandle);
8126 			errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << ".";
8127 			errorText_ = errorStream_.str();
8128 			return FAILURE;
8129 		}
8130 	}
8131 
8132 	// Set the sample rate.
8133 	result = snd_pcm_hw_params_set_rate_near(phandle, hw_params, (unsigned int *)&sampleRate, 0);
8134 	if (result < 0)
8135 	{
8136 		snd_pcm_close(phandle);
8137 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror(result) << ".";
8138 		errorText_ = errorStream_.str();
8139 		return FAILURE;
8140 	}
8141 
8142 	// Determine the number of channels for this device.  We support a possible
8143 	// minimum device channel number > than the value requested by the user.
8144 	stream_.nUserChannels[mode] = channels;
8145 	unsigned int value;
8146 	result = snd_pcm_hw_params_get_channels_max(hw_params, &value);
8147 	unsigned int deviceChannels = value;
8148 	if (result < 0 || deviceChannels < channels + firstChannel)
8149 	{
8150 		snd_pcm_close(phandle);
8151 		errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror(result) << ".";
8152 		errorText_ = errorStream_.str();
8153 		return FAILURE;
8154 	}
8155 
8156 	result = snd_pcm_hw_params_get_channels_min(hw_params, &value);
8157 	if (result < 0)
8158 	{
8159 		snd_pcm_close(phandle);
8160 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror(result) << ".";
8161 		errorText_ = errorStream_.str();
8162 		return FAILURE;
8163 	}
8164 	deviceChannels = value;
8165 	if (deviceChannels < channels + firstChannel) deviceChannels = channels + firstChannel;
8166 	stream_.nDeviceChannels[mode] = deviceChannels;
8167 
8168 	// Set the device channels.
8169 	result = snd_pcm_hw_params_set_channels(phandle, hw_params, deviceChannels);
8170 	if (result < 0)
8171 	{
8172 		snd_pcm_close(phandle);
8173 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror(result) << ".";
8174 		errorText_ = errorStream_.str();
8175 		return FAILURE;
8176 	}
8177 
8178 	// Set the buffer (or period) size.
8179 	int dir = 0;
8180 	snd_pcm_uframes_t periodSize = *bufferSize;
8181 	result = snd_pcm_hw_params_set_period_size_near(phandle, hw_params, &periodSize, &dir);
8182 	if (result < 0)
8183 	{
8184 		snd_pcm_close(phandle);
8185 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror(result) << ".";
8186 		errorText_ = errorStream_.str();
8187 		return FAILURE;
8188 	}
8189 	*bufferSize = periodSize;
8190 
8191 	// Set the buffer number, which in ALSA is referred to as the "period".
8192 	unsigned int periods = 0;
8193 	if (options && options->flags & RTAUDIO_MINIMIZE_LATENCY) periods = 2;
8194 	if (options && options->numberOfBuffers > 0) periods = options->numberOfBuffers;
8195 	if (periods < 2) periods = 4;  // a fairly safe default value
8196 	result = snd_pcm_hw_params_set_periods_near(phandle, hw_params, &periods, &dir);
8197 	if (result < 0)
8198 	{
8199 		snd_pcm_close(phandle);
8200 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror(result) << ".";
8201 		errorText_ = errorStream_.str();
8202 		return FAILURE;
8203 	}
8204 
8205 	// If attempting to setup a duplex stream, the bufferSize parameter
8206 	// MUST be the same in both directions!
8207 	if (stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize)
8208 	{
8209 		snd_pcm_close(phandle);
8210 		errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
8211 		errorText_ = errorStream_.str();
8212 		return FAILURE;
8213 	}
8214 
8215 	stream_.bufferSize = *bufferSize;
8216 
8217 	// Install the hardware configuration
8218 	result = snd_pcm_hw_params(phandle, hw_params);
8219 	if (result < 0)
8220 	{
8221 		snd_pcm_close(phandle);
8222 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror(result) << ".";
8223 		errorText_ = errorStream_.str();
8224 		return FAILURE;
8225 	}
8226 
8227 #if defined(__RTAUDIO_DEBUG__)
8228 	fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
8229 	snd_pcm_hw_params_dump(hw_params, out);
8230 #endif
8231 
8232 	// Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
8233 	snd_pcm_sw_params_t *sw_params = NULL;
8234 	snd_pcm_sw_params_alloca(&sw_params);
8235 	snd_pcm_sw_params_current(phandle, sw_params);
8236 	snd_pcm_sw_params_set_start_threshold(phandle, sw_params, *bufferSize);
8237 	snd_pcm_sw_params_set_stop_threshold(phandle, sw_params, ULONG_MAX);
8238 	snd_pcm_sw_params_set_silence_threshold(phandle, sw_params, 0);
8239 
8240 	// The following two settings were suggested by Theo Veenker
8241 	//snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
8242 	//snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
8243 
8244 	// here are two options for a fix
8245 	//snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
8246 	snd_pcm_uframes_t val;
8247 	snd_pcm_sw_params_get_boundary(sw_params, &val);
8248 	snd_pcm_sw_params_set_silence_size(phandle, sw_params, val);
8249 
8250 	result = snd_pcm_sw_params(phandle, sw_params);
8251 	if (result < 0)
8252 	{
8253 		snd_pcm_close(phandle);
8254 		errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror(result) << ".";
8255 		errorText_ = errorStream_.str();
8256 		return FAILURE;
8257 	}
8258 
8259 #if defined(__RTAUDIO_DEBUG__)
8260 	fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
8261 	snd_pcm_sw_params_dump(sw_params, out);
8262 #endif
8263 
8264 	// Set flags for buffer conversion
8265 	stream_.doConvertBuffer[mode] = false;
8266 	if (stream_.userFormat != stream_.deviceFormat[mode])
8267 		stream_.doConvertBuffer[mode] = true;
8268 	if (stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode])
8269 		stream_.doConvertBuffer[mode] = true;
8270 	if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
8271 		stream_.nUserChannels[mode] > 1)
8272 		stream_.doConvertBuffer[mode] = true;
8273 
8274 	// Allocate the ApiHandle if necessary and then save.
8275 	AlsaHandle *apiInfo = 0;
8276 	if (stream_.apiHandle == 0)
8277 	{
8278 		try
8279 		{
8280 			apiInfo = (AlsaHandle *)new AlsaHandle;
8281 		}
8282 		catch (std::bad_alloc &)
8283 		{
8284 			errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
8285 			goto error;
8286 		}
8287 
8288 		if (pthread_cond_init(&apiInfo->runnable_cv, NULL))
8289 		{
8290 			errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
8291 			goto error;
8292 		}
8293 
8294 		stream_.apiHandle = (void *)apiInfo;
8295 		apiInfo->handles[0] = 0;
8296 		apiInfo->handles[1] = 0;
8297 	}
8298 	else
8299 	{
8300 		apiInfo = (AlsaHandle *)stream_.apiHandle;
8301 	}
8302 	apiInfo->handles[mode] = phandle;
8303 	phandle = 0;
8304 
8305 	// Allocate necessary internal buffers.
8306 	unsigned long bufferBytes;
8307 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
8308 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
8309 	if (stream_.userBuffer[mode] == NULL)
8310 	{
8311 		errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
8312 		goto error;
8313 	}
8314 
8315 	if (stream_.doConvertBuffer[mode])
8316 	{
8317 		bool makeBuffer = true;
8318 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
8319 		if (mode == INPUT)
8320 		{
8321 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
8322 			{
8323 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
8324 				if (bufferBytes <= bytesOut) makeBuffer = false;
8325 			}
8326 		}
8327 
8328 		if (makeBuffer)
8329 		{
8330 			bufferBytes *= *bufferSize;
8331 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
8332 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
8333 			if (stream_.deviceBuffer == NULL)
8334 			{
8335 				errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
8336 				goto error;
8337 			}
8338 		}
8339 	}
8340 
8341 	stream_.sampleRate = sampleRate;
8342 	stream_.nBuffers = periods;
8343 	stream_.device[mode] = device;
8344 	stream_.state = STREAM_STOPPED;
8345 
8346 	// Setup the buffer conversion information structure.
8347 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, firstChannel);
8348 
8349 	// Setup thread if necessary.
8350 	if (stream_.mode == OUTPUT && mode == INPUT)
8351 	{
8352 		// We had already set up an output stream.
8353 		stream_.mode = DUPLEX;
8354 		// Link the streams if possible.
8355 		apiInfo->synchronized = false;
8356 		if (snd_pcm_link(apiInfo->handles[0], apiInfo->handles[1]) == 0)
8357 			apiInfo->synchronized = true;
8358 		else
8359 		{
8360 			errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
8361 			error(RtAudioError::WARNING);
8362 		}
8363 	}
8364 	else
8365 	{
8366 		stream_.mode = mode;
8367 
8368 		// Setup callback thread.
8369 		stream_.callbackInfo.object = (void *)this;
8370 
8371 		// Set the thread attributes for joinable and realtime scheduling
8372 		// priority (optional).  The higher priority will only take affect
8373 		// if the program is run as root or suid. Note, under Linux
8374 		// processes with CAP_SYS_NICE privilege, a user can change
8375 		// scheduling policy and priority (thus need not be root). See
8376 		// POSIX "capabilities".
8377 		pthread_attr_t attr;
8378 		pthread_attr_init(&attr);
8379 		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
8380 
8381 #ifdef SCHED_RR  // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8382 		if (options && options->flags & RTAUDIO_SCHEDULE_REALTIME)
8383 		{
8384 			// We previously attempted to increase the audio callback priority
8385 			// to SCHED_RR here via the attributes.  However, while no errors
8386 			// were reported in doing so, it did not work.  So, now this is
8387 			// done in the alsaCallbackHandler function.
8388 			stream_.callbackInfo.doRealtime = true;
8389 			int priority = options->priority;
8390 			int min = sched_get_priority_min(SCHED_RR);
8391 			int max = sched_get_priority_max(SCHED_RR);
8392 			if (priority < min)
8393 				priority = min;
8394 			else if (priority > max)
8395 				priority = max;
8396 			stream_.callbackInfo.priority = priority;
8397 		}
8398 #endif
8399 
8400 		stream_.callbackInfo.isRunning = true;
8401 		result = pthread_create(&stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo);
8402 		pthread_attr_destroy(&attr);
8403 		if (result)
8404 		{
8405 			stream_.callbackInfo.isRunning = false;
8406 			errorText_ = "RtApiAlsa::error creating callback thread!";
8407 			goto error;
8408 		}
8409 	}
8410 
8411 	return SUCCESS;
8412 
8413 error:
8414 	if (apiInfo)
8415 	{
8416 		pthread_cond_destroy(&apiInfo->runnable_cv);
8417 		if (apiInfo->handles[0]) snd_pcm_close(apiInfo->handles[0]);
8418 		if (apiInfo->handles[1]) snd_pcm_close(apiInfo->handles[1]);
8419 		delete apiInfo;
8420 		stream_.apiHandle = 0;
8421 	}
8422 
8423 	if (phandle) snd_pcm_close(phandle);
8424 
8425 	for (int i = 0; i < 2; i++)
8426 	{
8427 		if (stream_.userBuffer[i])
8428 		{
8429 			free(stream_.userBuffer[i]);
8430 			stream_.userBuffer[i] = 0;
8431 		}
8432 	}
8433 
8434 	if (stream_.deviceBuffer)
8435 	{
8436 		free(stream_.deviceBuffer);
8437 		stream_.deviceBuffer = 0;
8438 	}
8439 
8440 	stream_.state = STREAM_CLOSED;
8441 	return FAILURE;
8442 }
8443 
closeStream()8444 void RtApiAlsa ::closeStream()
8445 {
8446 	if (stream_.state == STREAM_CLOSED)
8447 	{
8448 		errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8449 		error(RtAudioError::WARNING);
8450 		return;
8451 	}
8452 
8453 	AlsaHandle *apiInfo = (AlsaHandle *)stream_.apiHandle;
8454 	stream_.callbackInfo.isRunning = false;
8455 	MUTEX_LOCK(&stream_.mutex);
8456 	if (stream_.state == STREAM_STOPPED)
8457 	{
8458 		apiInfo->runnable = true;
8459 		pthread_cond_signal(&apiInfo->runnable_cv);
8460 	}
8461 	MUTEX_UNLOCK(&stream_.mutex);
8462 	pthread_join(stream_.callbackInfo.thread, NULL);
8463 
8464 	if (stream_.state == STREAM_RUNNING)
8465 	{
8466 		stream_.state = STREAM_STOPPED;
8467 		if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
8468 			snd_pcm_drop(apiInfo->handles[0]);
8469 		if (stream_.mode == INPUT || stream_.mode == DUPLEX)
8470 			snd_pcm_drop(apiInfo->handles[1]);
8471 	}
8472 
8473 	if (apiInfo)
8474 	{
8475 		pthread_cond_destroy(&apiInfo->runnable_cv);
8476 		if (apiInfo->handles[0]) snd_pcm_close(apiInfo->handles[0]);
8477 		if (apiInfo->handles[1]) snd_pcm_close(apiInfo->handles[1]);
8478 		delete apiInfo;
8479 		stream_.apiHandle = 0;
8480 	}
8481 
8482 	for (int i = 0; i < 2; i++)
8483 	{
8484 		if (stream_.userBuffer[i])
8485 		{
8486 			free(stream_.userBuffer[i]);
8487 			stream_.userBuffer[i] = 0;
8488 		}
8489 	}
8490 
8491 	if (stream_.deviceBuffer)
8492 	{
8493 		free(stream_.deviceBuffer);
8494 		stream_.deviceBuffer = 0;
8495 	}
8496 
8497 	stream_.mode = UNINITIALIZED;
8498 	stream_.state = STREAM_CLOSED;
8499 }
8500 
startStream()8501 void RtApiAlsa ::startStream()
8502 {
8503 	// This method calls snd_pcm_prepare if the device isn't already in that state.
8504 
8505 	verifyStream();
8506 	if (stream_.state == STREAM_RUNNING)
8507 	{
8508 		errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8509 		error(RtAudioError::WARNING);
8510 		return;
8511 	}
8512 
8513 	MUTEX_LOCK(&stream_.mutex);
8514 
8515 	int result = 0;
8516 	snd_pcm_state_t state;
8517 	AlsaHandle *apiInfo = (AlsaHandle *)stream_.apiHandle;
8518 	snd_pcm_t **handle = (snd_pcm_t **)apiInfo->handles;
8519 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
8520 	{
8521 		state = snd_pcm_state(handle[0]);
8522 		if (state != SND_PCM_STATE_PREPARED)
8523 		{
8524 			result = snd_pcm_prepare(handle[0]);
8525 			if (result < 0)
8526 			{
8527 				errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror(result) << ".";
8528 				errorText_ = errorStream_.str();
8529 				goto unlock;
8530 			}
8531 		}
8532 	}
8533 
8534 	if ((stream_.mode == INPUT || stream_.mode == DUPLEX) && !apiInfo->synchronized)
8535 	{
8536 		result = snd_pcm_drop(handle[1]);  // fix to remove stale data received since device has been open
8537 		state = snd_pcm_state(handle[1]);
8538 		if (state != SND_PCM_STATE_PREPARED)
8539 		{
8540 			result = snd_pcm_prepare(handle[1]);
8541 			if (result < 0)
8542 			{
8543 				errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror(result) << ".";
8544 				errorText_ = errorStream_.str();
8545 				goto unlock;
8546 			}
8547 		}
8548 	}
8549 
8550 	stream_.state = STREAM_RUNNING;
8551 
8552 unlock:
8553 	apiInfo->runnable = true;
8554 	pthread_cond_signal(&apiInfo->runnable_cv);
8555 	MUTEX_UNLOCK(&stream_.mutex);
8556 
8557 	if (result >= 0) return;
8558 	error(RtAudioError::SYSTEM_ERROR);
8559 }
8560 
stopStream()8561 void RtApiAlsa ::stopStream()
8562 {
8563 	verifyStream();
8564 	if (stream_.state == STREAM_STOPPED)
8565 	{
8566 		errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8567 		error(RtAudioError::WARNING);
8568 		return;
8569 	}
8570 
8571 	stream_.state = STREAM_STOPPED;
8572 	MUTEX_LOCK(&stream_.mutex);
8573 
8574 	int result = 0;
8575 	AlsaHandle *apiInfo = (AlsaHandle *)stream_.apiHandle;
8576 	snd_pcm_t **handle = (snd_pcm_t **)apiInfo->handles;
8577 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
8578 	{
8579 		if (apiInfo->synchronized)
8580 			result = snd_pcm_drop(handle[0]);
8581 		else
8582 			result = snd_pcm_drain(handle[0]);
8583 		if (result < 0)
8584 		{
8585 			errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror(result) << ".";
8586 			errorText_ = errorStream_.str();
8587 			goto unlock;
8588 		}
8589 	}
8590 
8591 	if ((stream_.mode == INPUT || stream_.mode == DUPLEX) && !apiInfo->synchronized)
8592 	{
8593 		result = snd_pcm_drop(handle[1]);
8594 		if (result < 0)
8595 		{
8596 			errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror(result) << ".";
8597 			errorText_ = errorStream_.str();
8598 			goto unlock;
8599 		}
8600 	}
8601 
8602 unlock:
8603 	apiInfo->runnable = false;  // fixes high CPU usage when stopped
8604 	MUTEX_UNLOCK(&stream_.mutex);
8605 
8606 	if (result >= 0) return;
8607 	error(RtAudioError::SYSTEM_ERROR);
8608 }
8609 
abortStream()8610 void RtApiAlsa ::abortStream()
8611 {
8612 	verifyStream();
8613 	if (stream_.state == STREAM_STOPPED)
8614 	{
8615 		errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8616 		error(RtAudioError::WARNING);
8617 		return;
8618 	}
8619 
8620 	stream_.state = STREAM_STOPPED;
8621 	MUTEX_LOCK(&stream_.mutex);
8622 
8623 	int result = 0;
8624 	AlsaHandle *apiInfo = (AlsaHandle *)stream_.apiHandle;
8625 	snd_pcm_t **handle = (snd_pcm_t **)apiInfo->handles;
8626 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
8627 	{
8628 		result = snd_pcm_drop(handle[0]);
8629 		if (result < 0)
8630 		{
8631 			errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror(result) << ".";
8632 			errorText_ = errorStream_.str();
8633 			goto unlock;
8634 		}
8635 	}
8636 
8637 	if ((stream_.mode == INPUT || stream_.mode == DUPLEX) && !apiInfo->synchronized)
8638 	{
8639 		result = snd_pcm_drop(handle[1]);
8640 		if (result < 0)
8641 		{
8642 			errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror(result) << ".";
8643 			errorText_ = errorStream_.str();
8644 			goto unlock;
8645 		}
8646 	}
8647 
8648 unlock:
8649 	apiInfo->runnable = false;  // fixes high CPU usage when stopped
8650 	MUTEX_UNLOCK(&stream_.mutex);
8651 
8652 	if (result >= 0) return;
8653 	error(RtAudioError::SYSTEM_ERROR);
8654 }
8655 
callbackEvent()8656 void RtApiAlsa ::callbackEvent()
8657 {
8658 	AlsaHandle *apiInfo = (AlsaHandle *)stream_.apiHandle;
8659 	if (stream_.state == STREAM_STOPPED)
8660 	{
8661 		MUTEX_LOCK(&stream_.mutex);
8662 		while (!apiInfo->runnable)
8663 			pthread_cond_wait(&apiInfo->runnable_cv, &stream_.mutex);
8664 
8665 		if (stream_.state != STREAM_RUNNING)
8666 		{
8667 			MUTEX_UNLOCK(&stream_.mutex);
8668 			return;
8669 		}
8670 		MUTEX_UNLOCK(&stream_.mutex);
8671 	}
8672 
8673 	if (stream_.state == STREAM_CLOSED)
8674 	{
8675 		errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8676 		error(RtAudioError::WARNING);
8677 		return;
8678 	}
8679 
8680 	int doStopStream = 0;
8681 	RtAudioCallback callback = (RtAudioCallback)stream_.callbackInfo.callback;
8682 	double streamTime = getStreamTime();
8683 	RtAudioStreamStatus status = 0;
8684 	if (stream_.mode != INPUT && apiInfo->xrun[0] == true)
8685 	{
8686 		status |= RTAUDIO_OUTPUT_UNDERFLOW;
8687 		apiInfo->xrun[0] = false;
8688 	}
8689 	if (stream_.mode != OUTPUT && apiInfo->xrun[1] == true)
8690 	{
8691 		status |= RTAUDIO_INPUT_OVERFLOW;
8692 		apiInfo->xrun[1] = false;
8693 	}
8694 	doStopStream = callback(stream_.userBuffer[0], stream_.userBuffer[1],
8695 							stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData);
8696 
8697 	if (doStopStream == 2)
8698 	{
8699 		abortStream();
8700 		return;
8701 	}
8702 
8703 	MUTEX_LOCK(&stream_.mutex);
8704 
8705 	// The state might change while waiting on a mutex.
8706 	if (stream_.state == STREAM_STOPPED) goto unlock;
8707 
8708 	int result;
8709 	char *buffer;
8710 	int channels;
8711 	snd_pcm_t **handle;
8712 	snd_pcm_sframes_t frames;
8713 	RtAudioFormat format;
8714 	handle = (snd_pcm_t **)apiInfo->handles;
8715 
8716 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
8717 	{
8718 		// Setup parameters.
8719 		if (stream_.doConvertBuffer[1])
8720 		{
8721 			buffer = stream_.deviceBuffer;
8722 			channels = stream_.nDeviceChannels[1];
8723 			format = stream_.deviceFormat[1];
8724 		}
8725 		else
8726 		{
8727 			buffer = stream_.userBuffer[1];
8728 			channels = stream_.nUserChannels[1];
8729 			format = stream_.userFormat;
8730 		}
8731 
8732 		// Read samples from device in interleaved/non-interleaved format.
8733 		if (stream_.deviceInterleaved[1])
8734 			result = snd_pcm_readi(handle[1], buffer, stream_.bufferSize);
8735 		else
8736 		{
8737 			void *bufs[channels];
8738 			size_t offset = stream_.bufferSize * formatBytes(format);
8739 			for (int i = 0; i < channels; i++)
8740 				bufs[i] = (void *)(buffer + (i * offset));
8741 			result = snd_pcm_readn(handle[1], bufs, stream_.bufferSize);
8742 		}
8743 
8744 		if (result < (int)stream_.bufferSize)
8745 		{
8746 			// Either an error or overrun occured.
8747 			if (result == -EPIPE)
8748 			{
8749 				snd_pcm_state_t state = snd_pcm_state(handle[1]);
8750 				if (state == SND_PCM_STATE_XRUN)
8751 				{
8752 					apiInfo->xrun[1] = true;
8753 					result = snd_pcm_prepare(handle[1]);
8754 					if (result < 0)
8755 					{
8756 						errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror(result) << ".";
8757 						errorText_ = errorStream_.str();
8758 					}
8759 				}
8760 				else
8761 				{
8762 					errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".";
8763 					errorText_ = errorStream_.str();
8764 				}
8765 			}
8766 			else
8767 			{
8768 				errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror(result) << ".";
8769 				errorText_ = errorStream_.str();
8770 			}
8771 			error(RtAudioError::WARNING);
8772 			goto tryOutput;
8773 		}
8774 
8775 		// Do byte swapping if necessary.
8776 		if (stream_.doByteSwap[1])
8777 			byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8778 
8779 		// Do buffer conversion if necessary.
8780 		if (stream_.doConvertBuffer[1])
8781 			convertBuffer(stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1]);
8782 
8783 		// Check stream latency
8784 		result = snd_pcm_delay(handle[1], &frames);
8785 		if (result == 0 && frames > 0) stream_.latency[1] = frames;
8786 	}
8787 
8788 tryOutput:
8789 
8790 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
8791 	{
8792 		// Setup parameters and do buffer conversion if necessary.
8793 		if (stream_.doConvertBuffer[0])
8794 		{
8795 			buffer = stream_.deviceBuffer;
8796 			convertBuffer(buffer, stream_.userBuffer[0], stream_.convertInfo[0]);
8797 			channels = stream_.nDeviceChannels[0];
8798 			format = stream_.deviceFormat[0];
8799 		}
8800 		else
8801 		{
8802 			buffer = stream_.userBuffer[0];
8803 			channels = stream_.nUserChannels[0];
8804 			format = stream_.userFormat;
8805 		}
8806 
8807 		// Do byte swapping if necessary.
8808 		if (stream_.doByteSwap[0])
8809 			byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8810 
8811 		// Write samples to device in interleaved/non-interleaved format.
8812 		if (stream_.deviceInterleaved[0])
8813 			result = snd_pcm_writei(handle[0], buffer, stream_.bufferSize);
8814 		else
8815 		{
8816 			void *bufs[channels];
8817 			size_t offset = stream_.bufferSize * formatBytes(format);
8818 			for (int i = 0; i < channels; i++)
8819 				bufs[i] = (void *)(buffer + (i * offset));
8820 			result = snd_pcm_writen(handle[0], bufs, stream_.bufferSize);
8821 		}
8822 
8823 		if (result < (int)stream_.bufferSize)
8824 		{
8825 			// Either an error or underrun occured.
8826 			if (result == -EPIPE)
8827 			{
8828 				snd_pcm_state_t state = snd_pcm_state(handle[0]);
8829 				if (state == SND_PCM_STATE_XRUN)
8830 				{
8831 					apiInfo->xrun[0] = true;
8832 					result = snd_pcm_prepare(handle[0]);
8833 					if (result < 0)
8834 					{
8835 						errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror(result) << ".";
8836 						errorText_ = errorStream_.str();
8837 					}
8838 					else
8839 						errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8840 				}
8841 				else
8842 				{
8843 					errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".";
8844 					errorText_ = errorStream_.str();
8845 				}
8846 			}
8847 			else
8848 			{
8849 				errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror(result) << ".";
8850 				errorText_ = errorStream_.str();
8851 			}
8852 			error(RtAudioError::WARNING);
8853 			goto unlock;
8854 		}
8855 
8856 		// Check stream latency
8857 		result = snd_pcm_delay(handle[0], &frames);
8858 		if (result == 0 && frames > 0) stream_.latency[0] = frames;
8859 	}
8860 
8861 unlock:
8862 	MUTEX_UNLOCK(&stream_.mutex);
8863 
8864 	RtApi::tickStreamTime();
8865 	if (doStopStream == 1) this->stopStream();
8866 }
8867 
alsaCallbackHandler(void * ptr)8868 static void *alsaCallbackHandler(void *ptr)
8869 {
8870 	CallbackInfo *info = (CallbackInfo *)ptr;
8871 	RtApiAlsa *object = (RtApiAlsa *)info->object;
8872 	bool *isRunning = &info->isRunning;
8873 
8874 #ifdef SCHED_RR  // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8875 	if (info->doRealtime)
8876 	{
8877 		pthread_t tID = pthread_self();       // ID of this thread
8878 		sched_param prio = {info->priority};  // scheduling priority of thread
8879 		pthread_setschedparam(tID, SCHED_RR, &prio);
8880 	}
8881 #endif
8882 
8883 	while (*isRunning == true)
8884 	{
8885 		pthread_testcancel();
8886 		object->callbackEvent();
8887 	}
8888 
8889 	pthread_exit(NULL);
8890 }
8891 
8892 //******************** End of __LINUX_ALSA__ *********************//
8893 #endif
8894 
8895 #if defined(__LINUX_PULSE__)
8896 
8897 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8898 // and Tristan Matthews.
8899 
8900 #include <pulse/error.h>
8901 #include <pulse/simple.h>
8902 #include <cstdio>
8903 
8904 static const unsigned int SUPPORTED_SAMPLERATES[] = {8000, 16000, 22050, 32000,
8905 													 44100, 48000, 96000, 0};
8906 
8907 struct rtaudio_pa_format_mapping_t
8908 {
8909 	RtAudioFormat rtaudio_format;
8910 	pa_sample_format_t pa_format;
8911 };
8912 
8913 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8914 	{RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8915 	{RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8916 	{RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8917 	{0, PA_SAMPLE_INVALID}};
8918 
8919 struct PulseAudioHandle
8920 {
8921 	pa_simple *s_play;
8922 	pa_simple *s_rec;
8923 	pthread_t thread;
8924 	pthread_cond_t runnable_cv;
8925 	bool runnable;
PulseAudioHandlePulseAudioHandle8926 	PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) {}
8927 };
8928 
~RtApiPulse()8929 RtApiPulse::~RtApiPulse()
8930 {
8931 	if (stream_.state != STREAM_CLOSED)
8932 		closeStream();
8933 }
8934 
getDeviceCount(void)8935 unsigned int RtApiPulse::getDeviceCount(void)
8936 {
8937 	return 1;
8938 }
8939 
getDeviceInfo(unsigned int)8940 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo(unsigned int /*device*/)
8941 {
8942 	RtAudio::DeviceInfo info;
8943 	info.probed = true;
8944 	info.name = "PulseAudio";
8945 	info.outputChannels = 2;
8946 	info.inputChannels = 2;
8947 	info.duplexChannels = 2;
8948 	info.isDefaultOutput = true;
8949 	info.isDefaultInput = true;
8950 
8951 	for (const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr)
8952 		info.sampleRates.push_back(*sr);
8953 
8954 	info.preferredSampleRate = 48000;
8955 	info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8956 
8957 	return info;
8958 }
8959 
pulseaudio_callback(void * user)8960 static void *pulseaudio_callback(void *user)
8961 {
8962 	CallbackInfo *cbi = static_cast<CallbackInfo *>(user);
8963 	RtApiPulse *context = static_cast<RtApiPulse *>(cbi->object);
8964 	volatile bool *isRunning = &cbi->isRunning;
8965 
8966 	while (*isRunning)
8967 	{
8968 		pthread_testcancel();
8969 		context->callbackEvent();
8970 	}
8971 
8972 	pthread_exit(NULL);
8973 }
8974 
closeStream(void)8975 void RtApiPulse::closeStream(void)
8976 {
8977 	PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
8978 
8979 	stream_.callbackInfo.isRunning = false;
8980 	if (pah)
8981 	{
8982 		MUTEX_LOCK(&stream_.mutex);
8983 		if (stream_.state == STREAM_STOPPED)
8984 		{
8985 			pah->runnable = true;
8986 			pthread_cond_signal(&pah->runnable_cv);
8987 		}
8988 		MUTEX_UNLOCK(&stream_.mutex);
8989 
8990 		pthread_join(pah->thread, 0);
8991 		if (pah->s_play)
8992 		{
8993 			pa_simple_flush(pah->s_play, NULL);
8994 			pa_simple_free(pah->s_play);
8995 		}
8996 		if (pah->s_rec)
8997 			pa_simple_free(pah->s_rec);
8998 
8999 		pthread_cond_destroy(&pah->runnable_cv);
9000 		delete pah;
9001 		stream_.apiHandle = 0;
9002 	}
9003 
9004 	if (stream_.userBuffer[0])
9005 	{
9006 		free(stream_.userBuffer[0]);
9007 		stream_.userBuffer[0] = 0;
9008 	}
9009 	if (stream_.userBuffer[1])
9010 	{
9011 		free(stream_.userBuffer[1]);
9012 		stream_.userBuffer[1] = 0;
9013 	}
9014 
9015 	stream_.state = STREAM_CLOSED;
9016 	stream_.mode = UNINITIALIZED;
9017 }
9018 
callbackEvent(void)9019 void RtApiPulse::callbackEvent(void)
9020 {
9021 	PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
9022 
9023 	if (stream_.state == STREAM_STOPPED)
9024 	{
9025 		MUTEX_LOCK(&stream_.mutex);
9026 		while (!pah->runnable)
9027 			pthread_cond_wait(&pah->runnable_cv, &stream_.mutex);
9028 
9029 		if (stream_.state != STREAM_RUNNING)
9030 		{
9031 			MUTEX_UNLOCK(&stream_.mutex);
9032 			return;
9033 		}
9034 		MUTEX_UNLOCK(&stream_.mutex);
9035 	}
9036 
9037 	if (stream_.state == STREAM_CLOSED)
9038 	{
9039 		errorText_ =
9040 			"RtApiPulse::callbackEvent(): the stream is closed ... "
9041 			"this shouldn't happen!";
9042 		error(RtAudioError::WARNING);
9043 		return;
9044 	}
9045 
9046 	RtAudioCallback callback = (RtAudioCallback)stream_.callbackInfo.callback;
9047 	double streamTime = getStreamTime();
9048 	RtAudioStreamStatus status = 0;
9049 	int doStopStream = callback(stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
9050 								stream_.bufferSize, streamTime, status,
9051 								stream_.callbackInfo.userData);
9052 
9053 	if (doStopStream == 2)
9054 	{
9055 		abortStream();
9056 		return;
9057 	}
9058 
9059 	MUTEX_LOCK(&stream_.mutex);
9060 	void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
9061 	void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
9062 
9063 	if (stream_.state != STREAM_RUNNING)
9064 		goto unlock;
9065 
9066 	int pa_error;
9067 	size_t bytes;
9068 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
9069 	{
9070 		if (stream_.doConvertBuffer[OUTPUT])
9071 		{
9072 			convertBuffer(stream_.deviceBuffer,
9073 						  stream_.userBuffer[OUTPUT],
9074 						  stream_.convertInfo[OUTPUT]);
9075 			bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
9076 					formatBytes(stream_.deviceFormat[OUTPUT]);
9077 		}
9078 		else
9079 			bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
9080 					formatBytes(stream_.userFormat);
9081 
9082 		if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0)
9083 		{
9084 			errorStream_ << "RtApiPulse::callbackEvent: audio write error, " << pa_strerror(pa_error) << ".";
9085 			errorText_ = errorStream_.str();
9086 			error(RtAudioError::WARNING);
9087 		}
9088 	}
9089 
9090 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
9091 	{
9092 		if (stream_.doConvertBuffer[INPUT])
9093 			bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
9094 					formatBytes(stream_.deviceFormat[INPUT]);
9095 		else
9096 			bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
9097 					formatBytes(stream_.userFormat);
9098 
9099 		if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0)
9100 		{
9101 			errorStream_ << "RtApiPulse::callbackEvent: audio read error, " << pa_strerror(pa_error) << ".";
9102 			errorText_ = errorStream_.str();
9103 			error(RtAudioError::WARNING);
9104 		}
9105 		if (stream_.doConvertBuffer[INPUT])
9106 		{
9107 			convertBuffer(stream_.userBuffer[INPUT],
9108 						  stream_.deviceBuffer,
9109 						  stream_.convertInfo[INPUT]);
9110 		}
9111 	}
9112 
9113 unlock:
9114 	MUTEX_UNLOCK(&stream_.mutex);
9115 	RtApi::tickStreamTime();
9116 
9117 	if (doStopStream == 1)
9118 		stopStream();
9119 }
9120 
startStream(void)9121 void RtApiPulse::startStream(void)
9122 {
9123 	PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
9124 
9125 	if (stream_.state == STREAM_CLOSED)
9126 	{
9127 		errorText_ = "RtApiPulse::startStream(): the stream is not open!";
9128 		error(RtAudioError::INVALID_USE);
9129 		return;
9130 	}
9131 	if (stream_.state == STREAM_RUNNING)
9132 	{
9133 		errorText_ = "RtApiPulse::startStream(): the stream is already running!";
9134 		error(RtAudioError::WARNING);
9135 		return;
9136 	}
9137 
9138 	MUTEX_LOCK(&stream_.mutex);
9139 
9140 	stream_.state = STREAM_RUNNING;
9141 
9142 	pah->runnable = true;
9143 	pthread_cond_signal(&pah->runnable_cv);
9144 	MUTEX_UNLOCK(&stream_.mutex);
9145 }
9146 
stopStream(void)9147 void RtApiPulse::stopStream(void)
9148 {
9149 	PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
9150 
9151 	if (stream_.state == STREAM_CLOSED)
9152 	{
9153 		errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
9154 		error(RtAudioError::INVALID_USE);
9155 		return;
9156 	}
9157 	if (stream_.state == STREAM_STOPPED)
9158 	{
9159 		errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
9160 		error(RtAudioError::WARNING);
9161 		return;
9162 	}
9163 
9164 	stream_.state = STREAM_STOPPED;
9165 	MUTEX_LOCK(&stream_.mutex);
9166 
9167 	if (pah && pah->s_play)
9168 	{
9169 		int pa_error;
9170 		if (pa_simple_drain(pah->s_play, &pa_error) < 0)
9171 		{
9172 			errorStream_ << "RtApiPulse::stopStream: error draining output device, " << pa_strerror(pa_error) << ".";
9173 			errorText_ = errorStream_.str();
9174 			MUTEX_UNLOCK(&stream_.mutex);
9175 			error(RtAudioError::SYSTEM_ERROR);
9176 			return;
9177 		}
9178 	}
9179 
9180 	stream_.state = STREAM_STOPPED;
9181 	MUTEX_UNLOCK(&stream_.mutex);
9182 }
9183 
abortStream(void)9184 void RtApiPulse::abortStream(void)
9185 {
9186 	PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
9187 
9188 	if (stream_.state == STREAM_CLOSED)
9189 	{
9190 		errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
9191 		error(RtAudioError::INVALID_USE);
9192 		return;
9193 	}
9194 	if (stream_.state == STREAM_STOPPED)
9195 	{
9196 		errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
9197 		error(RtAudioError::WARNING);
9198 		return;
9199 	}
9200 
9201 	stream_.state = STREAM_STOPPED;
9202 	MUTEX_LOCK(&stream_.mutex);
9203 
9204 	if (pah && pah->s_play)
9205 	{
9206 		int pa_error;
9207 		if (pa_simple_flush(pah->s_play, &pa_error) < 0)
9208 		{
9209 			errorStream_ << "RtApiPulse::abortStream: error flushing output device, " << pa_strerror(pa_error) << ".";
9210 			errorText_ = errorStream_.str();
9211 			MUTEX_UNLOCK(&stream_.mutex);
9212 			error(RtAudioError::SYSTEM_ERROR);
9213 			return;
9214 		}
9215 	}
9216 
9217 	stream_.state = STREAM_STOPPED;
9218 	MUTEX_UNLOCK(&stream_.mutex);
9219 }
9220 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)9221 bool RtApiPulse::probeDeviceOpen(unsigned int device, StreamMode mode,
9222 								 unsigned int channels, unsigned int firstChannel,
9223 								 unsigned int sampleRate, RtAudioFormat format,
9224 								 unsigned int *bufferSize, RtAudio::StreamOptions *options)
9225 {
9226 	PulseAudioHandle *pah = 0;
9227 	unsigned long bufferBytes = 0;
9228 	pa_sample_spec ss;
9229 
9230 	if (device != 0) return false;
9231 	if (mode != INPUT && mode != OUTPUT) return false;
9232 	if (channels != 1 && channels != 2)
9233 	{
9234 		errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
9235 		return false;
9236 	}
9237 	ss.channels = channels;
9238 
9239 	if (firstChannel != 0) return false;
9240 
9241 	bool sr_found = false;
9242 	for (const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr)
9243 	{
9244 		if (sampleRate == *sr)
9245 		{
9246 			sr_found = true;
9247 			stream_.sampleRate = sampleRate;
9248 			ss.rate = sampleRate;
9249 			break;
9250 		}
9251 	}
9252 	if (!sr_found)
9253 	{
9254 		errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
9255 		return false;
9256 	}
9257 
9258 	bool sf_found = 0;
9259 	for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
9260 		 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf)
9261 	{
9262 		if (format == sf->rtaudio_format)
9263 		{
9264 			sf_found = true;
9265 			stream_.userFormat = sf->rtaudio_format;
9266 			stream_.deviceFormat[mode] = stream_.userFormat;
9267 			ss.format = sf->pa_format;
9268 			break;
9269 		}
9270 	}
9271 	if (!sf_found)
9272 	{  // Use internal data format conversion.
9273 		stream_.userFormat = format;
9274 		stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
9275 		ss.format = PA_SAMPLE_FLOAT32LE;
9276 	}
9277 
9278 	// Set other stream parameters.
9279 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
9280 		stream_.userInterleaved = false;
9281 	else
9282 		stream_.userInterleaved = true;
9283 	stream_.deviceInterleaved[mode] = true;
9284 	stream_.nBuffers = 1;
9285 	stream_.doByteSwap[mode] = false;
9286 	stream_.nUserChannels[mode] = channels;
9287 	stream_.nDeviceChannels[mode] = channels + firstChannel;
9288 	stream_.channelOffset[mode] = 0;
9289 	std::string streamName = "RtAudio";
9290 
9291 	// Set flags for buffer conversion.
9292 	stream_.doConvertBuffer[mode] = false;
9293 	if (stream_.userFormat != stream_.deviceFormat[mode])
9294 		stream_.doConvertBuffer[mode] = true;
9295 	if (stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode])
9296 		stream_.doConvertBuffer[mode] = true;
9297 
9298 	// Allocate necessary internal buffers.
9299 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
9300 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
9301 	if (stream_.userBuffer[mode] == NULL)
9302 	{
9303 		errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
9304 		goto error;
9305 	}
9306 	stream_.bufferSize = *bufferSize;
9307 
9308 	if (stream_.doConvertBuffer[mode])
9309 	{
9310 		bool makeBuffer = true;
9311 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
9312 		if (mode == INPUT)
9313 		{
9314 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
9315 			{
9316 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
9317 				if (bufferBytes <= bytesOut) makeBuffer = false;
9318 			}
9319 		}
9320 
9321 		if (makeBuffer)
9322 		{
9323 			bufferBytes *= *bufferSize;
9324 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
9325 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
9326 			if (stream_.deviceBuffer == NULL)
9327 			{
9328 				errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
9329 				goto error;
9330 			}
9331 		}
9332 	}
9333 
9334 	stream_.device[mode] = device;
9335 
9336 	// Setup the buffer conversion information structure.
9337 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, firstChannel);
9338 
9339 	if (!stream_.apiHandle)
9340 	{
9341 		PulseAudioHandle *pah = new PulseAudioHandle;
9342 		if (!pah)
9343 		{
9344 			errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
9345 			goto error;
9346 		}
9347 
9348 		stream_.apiHandle = pah;
9349 		if (pthread_cond_init(&pah->runnable_cv, NULL) != 0)
9350 		{
9351 			errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
9352 			goto error;
9353 		}
9354 	}
9355 	pah = static_cast<PulseAudioHandle *>(stream_.apiHandle);
9356 
9357 	int error;
9358 	if (options && !options->streamName.empty()) streamName = options->streamName;
9359 	switch (mode)
9360 	{
9361 		case INPUT:
9362 			pa_buffer_attr buffer_attr;
9363 			buffer_attr.fragsize = bufferBytes;
9364 			buffer_attr.maxlength = -1;
9365 
9366 			pah->s_rec = pa_simple_new(NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error);
9367 			if (!pah->s_rec)
9368 			{
9369 				errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
9370 				goto error;
9371 			}
9372 			break;
9373 		case OUTPUT:
9374 			pah->s_play = pa_simple_new(NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error);
9375 			if (!pah->s_play)
9376 			{
9377 				errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
9378 				goto error;
9379 			}
9380 			break;
9381 		default:
9382 			goto error;
9383 	}
9384 
9385 	if (stream_.mode == UNINITIALIZED)
9386 		stream_.mode = mode;
9387 	else if (stream_.mode == mode)
9388 		goto error;
9389 	else
9390 		stream_.mode = DUPLEX;
9391 
9392 	if (!stream_.callbackInfo.isRunning)
9393 	{
9394 		stream_.callbackInfo.object = this;
9395 		stream_.callbackInfo.isRunning = true;
9396 		if (pthread_create(&pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0)
9397 		{
9398 			errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9399 			goto error;
9400 		}
9401 	}
9402 
9403 	stream_.state = STREAM_STOPPED;
9404 	return true;
9405 
9406 error:
9407 	if (pah && stream_.callbackInfo.isRunning)
9408 	{
9409 		pthread_cond_destroy(&pah->runnable_cv);
9410 		delete pah;
9411 		stream_.apiHandle = 0;
9412 	}
9413 
9414 	for (int i = 0; i < 2; i++)
9415 	{
9416 		if (stream_.userBuffer[i])
9417 		{
9418 			free(stream_.userBuffer[i]);
9419 			stream_.userBuffer[i] = 0;
9420 		}
9421 	}
9422 
9423 	if (stream_.deviceBuffer)
9424 	{
9425 		free(stream_.deviceBuffer);
9426 		stream_.deviceBuffer = 0;
9427 	}
9428 
9429 	return FAILURE;
9430 }
9431 
9432 //******************** End of __LINUX_PULSE__ *********************//
9433 #endif
9434 
9435 #if defined(__LINUX_OSS__)
9436 
9437 #include <unistd.h>
9438 #include <sys/ioctl.h>
9439 #include <unistd.h>
9440 #include <fcntl.h>
9441 #include <sys/soundcard.h>
9442 #include <errno.h>
9443 #include <math.h>
9444 
9445 static void *ossCallbackHandler(void *ptr);
9446 
9447 // A structure to hold various information related to the OSS API
9448 // implementation.
9449 struct OssHandle
9450 {
9451 	int id[2];  // device ids
9452 	bool xrun[2];
9453 	bool triggered;
9454 	pthread_cond_t runnable;
9455 
OssHandleOssHandle9456 	OssHandle()
9457 		: triggered(false)
9458 	{
9459 		id[0] = 0;
9460 		id[1] = 0;
9461 		xrun[0] = false;
9462 		xrun[1] = false;
9463 	}
9464 };
9465 
RtApiOss()9466 RtApiOss ::RtApiOss()
9467 {
9468 	// Nothing to do here.
9469 }
9470 
~RtApiOss()9471 RtApiOss ::~RtApiOss()
9472 {
9473 	if (stream_.state != STREAM_CLOSED) closeStream();
9474 }
9475 
getDeviceCount(void)9476 unsigned int RtApiOss ::getDeviceCount(void)
9477 {
9478 	int mixerfd = open("/dev/mixer", O_RDWR, 0);
9479 	if (mixerfd == -1)
9480 	{
9481 		errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9482 		error(RtAudioError::WARNING);
9483 		return 0;
9484 	}
9485 
9486 	oss_sysinfo sysinfo;
9487 	if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1)
9488 	{
9489 		close(mixerfd);
9490 		errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9491 		error(RtAudioError::WARNING);
9492 		return 0;
9493 	}
9494 
9495 	close(mixerfd);
9496 	return sysinfo.numaudios;
9497 }
9498 
getDeviceInfo(unsigned int device)9499 RtAudio::DeviceInfo RtApiOss ::getDeviceInfo(unsigned int device)
9500 {
9501 	RtAudio::DeviceInfo info;
9502 	info.probed = false;
9503 
9504 	int mixerfd = open("/dev/mixer", O_RDWR, 0);
9505 	if (mixerfd == -1)
9506 	{
9507 		errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9508 		error(RtAudioError::WARNING);
9509 		return info;
9510 	}
9511 
9512 	oss_sysinfo sysinfo;
9513 	int result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
9514 	if (result == -1)
9515 	{
9516 		close(mixerfd);
9517 		errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9518 		error(RtAudioError::WARNING);
9519 		return info;
9520 	}
9521 
9522 	unsigned nDevices = sysinfo.numaudios;
9523 	if (nDevices == 0)
9524 	{
9525 		close(mixerfd);
9526 		errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9527 		error(RtAudioError::INVALID_USE);
9528 		return info;
9529 	}
9530 
9531 	if (device >= nDevices)
9532 	{
9533 		close(mixerfd);
9534 		errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9535 		error(RtAudioError::INVALID_USE);
9536 		return info;
9537 	}
9538 
9539 	oss_audioinfo ainfo;
9540 	ainfo.dev = device;
9541 	result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
9542 	close(mixerfd);
9543 	if (result == -1)
9544 	{
9545 		errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9546 		errorText_ = errorStream_.str();
9547 		error(RtAudioError::WARNING);
9548 		return info;
9549 	}
9550 
9551 	// Probe channels
9552 	if (ainfo.caps & PCM_CAP_OUTPUT) info.outputChannels = ainfo.max_channels;
9553 	if (ainfo.caps & PCM_CAP_INPUT) info.inputChannels = ainfo.max_channels;
9554 	if (ainfo.caps & PCM_CAP_DUPLEX)
9555 	{
9556 		if (info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX)
9557 			info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9558 	}
9559 
9560 	// Probe data formats ... do for input
9561 	unsigned long mask = ainfo.iformats;
9562 	if (mask & AFMT_S16_LE || mask & AFMT_S16_BE)
9563 		info.nativeFormats |= RTAUDIO_SINT16;
9564 	if (mask & AFMT_S8)
9565 		info.nativeFormats |= RTAUDIO_SINT8;
9566 	if (mask & AFMT_S32_LE || mask & AFMT_S32_BE)
9567 		info.nativeFormats |= RTAUDIO_SINT32;
9568 	if (mask & AFMT_FLOAT)
9569 		info.nativeFormats |= RTAUDIO_FLOAT32;
9570 	if (mask & AFMT_S24_LE || mask & AFMT_S24_BE)
9571 		info.nativeFormats |= RTAUDIO_SINT24;
9572 
9573 	// Check that we have at least one supported format
9574 	if (info.nativeFormats == 0)
9575 	{
9576 		errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9577 		errorText_ = errorStream_.str();
9578 		error(RtAudioError::WARNING);
9579 		return info;
9580 	}
9581 
9582 	// Probe the supported sample rates.
9583 	info.sampleRates.clear();
9584 	if (ainfo.nrates)
9585 	{
9586 		for (unsigned int i = 0; i < ainfo.nrates; i++)
9587 		{
9588 			for (unsigned int k = 0; k < MAX_SAMPLE_RATES; k++)
9589 			{
9590 				if (ainfo.rates[i] == SAMPLE_RATES[k])
9591 				{
9592 					info.sampleRates.push_back(SAMPLE_RATES[k]);
9593 
9594 					if (!info.preferredSampleRate || (SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate))
9595 						info.preferredSampleRate = SAMPLE_RATES[k];
9596 
9597 					break;
9598 				}
9599 			}
9600 		}
9601 	}
9602 	else
9603 	{
9604 		// Check min and max rate values;
9605 		for (unsigned int k = 0; k < MAX_SAMPLE_RATES; k++)
9606 		{
9607 			if (ainfo.min_rate <= (int)SAMPLE_RATES[k] && ainfo.max_rate >= (int)SAMPLE_RATES[k])
9608 			{
9609 				info.sampleRates.push_back(SAMPLE_RATES[k]);
9610 
9611 				if (!info.preferredSampleRate || (SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate))
9612 					info.preferredSampleRate = SAMPLE_RATES[k];
9613 			}
9614 		}
9615 	}
9616 
9617 	if (info.sampleRates.size() == 0)
9618 	{
9619 		errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9620 		errorText_ = errorStream_.str();
9621 		error(RtAudioError::WARNING);
9622 	}
9623 	else
9624 	{
9625 		info.probed = true;
9626 		info.name = ainfo.name;
9627 	}
9628 
9629 	return info;
9630 }
9631 
probeDeviceOpen(unsigned int device,StreamMode mode,unsigned int channels,unsigned int firstChannel,unsigned int sampleRate,RtAudioFormat format,unsigned int * bufferSize,RtAudio::StreamOptions * options)9632 bool RtApiOss ::probeDeviceOpen(unsigned int device, StreamMode mode, unsigned int channels,
9633 								unsigned int firstChannel, unsigned int sampleRate,
9634 								RtAudioFormat format, unsigned int *bufferSize,
9635 								RtAudio::StreamOptions *options)
9636 {
9637 	int mixerfd = open("/dev/mixer", O_RDWR, 0);
9638 	if (mixerfd == -1)
9639 	{
9640 		errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9641 		return FAILURE;
9642 	}
9643 
9644 	oss_sysinfo sysinfo;
9645 	int result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
9646 	if (result == -1)
9647 	{
9648 		close(mixerfd);
9649 		errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9650 		return FAILURE;
9651 	}
9652 
9653 	unsigned nDevices = sysinfo.numaudios;
9654 	if (nDevices == 0)
9655 	{
9656 		// This should not happen because a check is made before this function is called.
9657 		close(mixerfd);
9658 		errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9659 		return FAILURE;
9660 	}
9661 
9662 	if (device >= nDevices)
9663 	{
9664 		// This should not happen because a check is made before this function is called.
9665 		close(mixerfd);
9666 		errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9667 		return FAILURE;
9668 	}
9669 
9670 	oss_audioinfo ainfo;
9671 	ainfo.dev = device;
9672 	result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
9673 	close(mixerfd);
9674 	if (result == -1)
9675 	{
9676 		errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9677 		errorText_ = errorStream_.str();
9678 		return FAILURE;
9679 	}
9680 
9681 	// Check if device supports input or output
9682 	if ((mode == OUTPUT && !(ainfo.caps & PCM_CAP_OUTPUT)) ||
9683 		(mode == INPUT && !(ainfo.caps & PCM_CAP_INPUT)))
9684 	{
9685 		if (mode == OUTPUT)
9686 			errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9687 		else
9688 			errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9689 		errorText_ = errorStream_.str();
9690 		return FAILURE;
9691 	}
9692 
9693 	int flags = 0;
9694 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
9695 	if (mode == OUTPUT)
9696 		flags |= O_WRONLY;
9697 	else
9698 	{  // mode == INPUT
9699 		if (stream_.mode == OUTPUT && stream_.device[0] == device)
9700 		{
9701 			// We just set the same device for playback ... close and reopen for duplex (OSS only).
9702 			close(handle->id[0]);
9703 			handle->id[0] = 0;
9704 			if (!(ainfo.caps & PCM_CAP_DUPLEX))
9705 			{
9706 				errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9707 				errorText_ = errorStream_.str();
9708 				return FAILURE;
9709 			}
9710 			// Check that the number previously set channels is the same.
9711 			if (stream_.nUserChannels[0] != channels)
9712 			{
9713 				errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9714 				errorText_ = errorStream_.str();
9715 				return FAILURE;
9716 			}
9717 			flags |= O_RDWR;
9718 		}
9719 		else
9720 			flags |= O_RDONLY;
9721 	}
9722 
9723 	// Set exclusive access if specified.
9724 	if (options && options->flags & RTAUDIO_HOG_DEVICE) flags |= O_EXCL;
9725 
9726 	// Try to open the device.
9727 	int fd;
9728 	fd = open(ainfo.devnode, flags, 0);
9729 	if (fd == -1)
9730 	{
9731 		if (errno == EBUSY)
9732 			errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9733 		else
9734 			errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9735 		errorText_ = errorStream_.str();
9736 		return FAILURE;
9737 	}
9738 
9739 	// For duplex operation, specifically set this mode (this doesn't seem to work).
9740 	/*
9741     if ( flags | O_RDWR ) {
9742     result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9743     if ( result == -1) {
9744     errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9745     errorText_ = errorStream_.str();
9746     return FAILURE;
9747     }
9748     }
9749   */
9750 
9751 	// Check the device channel support.
9752 	stream_.nUserChannels[mode] = channels;
9753 	if (ainfo.max_channels < (int)(channels + firstChannel))
9754 	{
9755 		close(fd);
9756 		errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9757 		errorText_ = errorStream_.str();
9758 		return FAILURE;
9759 	}
9760 
9761 	// Set the number of channels.
9762 	int deviceChannels = channels + firstChannel;
9763 	result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
9764 	if (result == -1 || deviceChannels < (int)(channels + firstChannel))
9765 	{
9766 		close(fd);
9767 		errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9768 		errorText_ = errorStream_.str();
9769 		return FAILURE;
9770 	}
9771 	stream_.nDeviceChannels[mode] = deviceChannels;
9772 
9773 	// Get the data format mask
9774 	int mask;
9775 	result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
9776 	if (result == -1)
9777 	{
9778 		close(fd);
9779 		errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9780 		errorText_ = errorStream_.str();
9781 		return FAILURE;
9782 	}
9783 
9784 	// Determine how to set the device format.
9785 	stream_.userFormat = format;
9786 	int deviceFormat = -1;
9787 	stream_.doByteSwap[mode] = false;
9788 	if (format == RTAUDIO_SINT8)
9789 	{
9790 		if (mask & AFMT_S8)
9791 		{
9792 			deviceFormat = AFMT_S8;
9793 			stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9794 		}
9795 	}
9796 	else if (format == RTAUDIO_SINT16)
9797 	{
9798 		if (mask & AFMT_S16_NE)
9799 		{
9800 			deviceFormat = AFMT_S16_NE;
9801 			stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9802 		}
9803 		else if (mask & AFMT_S16_OE)
9804 		{
9805 			deviceFormat = AFMT_S16_OE;
9806 			stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9807 			stream_.doByteSwap[mode] = true;
9808 		}
9809 	}
9810 	else if (format == RTAUDIO_SINT24)
9811 	{
9812 		if (mask & AFMT_S24_NE)
9813 		{
9814 			deviceFormat = AFMT_S24_NE;
9815 			stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9816 		}
9817 		else if (mask & AFMT_S24_OE)
9818 		{
9819 			deviceFormat = AFMT_S24_OE;
9820 			stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9821 			stream_.doByteSwap[mode] = true;
9822 		}
9823 	}
9824 	else if (format == RTAUDIO_SINT32)
9825 	{
9826 		if (mask & AFMT_S32_NE)
9827 		{
9828 			deviceFormat = AFMT_S32_NE;
9829 			stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9830 		}
9831 		else if (mask & AFMT_S32_OE)
9832 		{
9833 			deviceFormat = AFMT_S32_OE;
9834 			stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9835 			stream_.doByteSwap[mode] = true;
9836 		}
9837 	}
9838 
9839 	if (deviceFormat == -1)
9840 	{
9841 		// The user requested format is not natively supported by the device.
9842 		if (mask & AFMT_S16_NE)
9843 		{
9844 			deviceFormat = AFMT_S16_NE;
9845 			stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9846 		}
9847 		else if (mask & AFMT_S32_NE)
9848 		{
9849 			deviceFormat = AFMT_S32_NE;
9850 			stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9851 		}
9852 		else if (mask & AFMT_S24_NE)
9853 		{
9854 			deviceFormat = AFMT_S24_NE;
9855 			stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9856 		}
9857 		else if (mask & AFMT_S16_OE)
9858 		{
9859 			deviceFormat = AFMT_S16_OE;
9860 			stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9861 			stream_.doByteSwap[mode] = true;
9862 		}
9863 		else if (mask & AFMT_S32_OE)
9864 		{
9865 			deviceFormat = AFMT_S32_OE;
9866 			stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9867 			stream_.doByteSwap[mode] = true;
9868 		}
9869 		else if (mask & AFMT_S24_OE)
9870 		{
9871 			deviceFormat = AFMT_S24_OE;
9872 			stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9873 			stream_.doByteSwap[mode] = true;
9874 		}
9875 		else if (mask & AFMT_S8)
9876 		{
9877 			deviceFormat = AFMT_S8;
9878 			stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9879 		}
9880 	}
9881 
9882 	if (stream_.deviceFormat[mode] == 0)
9883 	{
9884 		// This really shouldn't happen ...
9885 		close(fd);
9886 		errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9887 		errorText_ = errorStream_.str();
9888 		return FAILURE;
9889 	}
9890 
9891 	// Set the data format.
9892 	int temp = deviceFormat;
9893 	result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
9894 	if (result == -1 || deviceFormat != temp)
9895 	{
9896 		close(fd);
9897 		errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9898 		errorText_ = errorStream_.str();
9899 		return FAILURE;
9900 	}
9901 
9902 	// Attempt to set the buffer size.  According to OSS, the minimum
9903 	// number of buffers is two.  The supposed minimum buffer size is 16
9904 	// bytes, so that will be our lower bound.  The argument to this
9905 	// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9906 	// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9907 	// We'll check the actual value used near the end of the setup
9908 	// procedure.
9909 	int ossBufferBytes = *bufferSize * formatBytes(stream_.deviceFormat[mode]) * deviceChannels;
9910 	if (ossBufferBytes < 16) ossBufferBytes = 16;
9911 	int buffers = 0;
9912 	if (options) buffers = options->numberOfBuffers;
9913 	if (options && options->flags & RTAUDIO_MINIMIZE_LATENCY) buffers = 2;
9914 	if (buffers < 2) buffers = 3;
9915 	temp = ((int)buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
9916 	result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
9917 	if (result == -1)
9918 	{
9919 		close(fd);
9920 		errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9921 		errorText_ = errorStream_.str();
9922 		return FAILURE;
9923 	}
9924 	stream_.nBuffers = buffers;
9925 
9926 	// Save buffer size (in sample frames).
9927 	*bufferSize = ossBufferBytes / (formatBytes(stream_.deviceFormat[mode]) * deviceChannels);
9928 	stream_.bufferSize = *bufferSize;
9929 
9930 	// Set the sample rate.
9931 	int srate = sampleRate;
9932 	result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
9933 	if (result == -1)
9934 	{
9935 		close(fd);
9936 		errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9937 		errorText_ = errorStream_.str();
9938 		return FAILURE;
9939 	}
9940 
9941 	// Verify the sample rate setup worked.
9942 	if (abs(srate - sampleRate) > 100)
9943 	{
9944 		close(fd);
9945 		errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9946 		errorText_ = errorStream_.str();
9947 		return FAILURE;
9948 	}
9949 	stream_.sampleRate = sampleRate;
9950 
9951 	if (mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device)
9952 	{
9953 		// We're doing duplex setup here.
9954 		stream_.deviceFormat[0] = stream_.deviceFormat[1];
9955 		stream_.nDeviceChannels[0] = deviceChannels;
9956 	}
9957 
9958 	// Set interleaving parameters.
9959 	stream_.userInterleaved = true;
9960 	stream_.deviceInterleaved[mode] = true;
9961 	if (options && options->flags & RTAUDIO_NONINTERLEAVED)
9962 		stream_.userInterleaved = false;
9963 
9964 	// Set flags for buffer conversion
9965 	stream_.doConvertBuffer[mode] = false;
9966 	if (stream_.userFormat != stream_.deviceFormat[mode])
9967 		stream_.doConvertBuffer[mode] = true;
9968 	if (stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode])
9969 		stream_.doConvertBuffer[mode] = true;
9970 	if (stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9971 		stream_.nUserChannels[mode] > 1)
9972 		stream_.doConvertBuffer[mode] = true;
9973 
9974 	// Allocate the stream handles if necessary and then save.
9975 	if (stream_.apiHandle == 0)
9976 	{
9977 		try
9978 		{
9979 			handle = new OssHandle;
9980 		}
9981 		catch (std::bad_alloc &)
9982 		{
9983 			errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9984 			goto error;
9985 		}
9986 
9987 		if (pthread_cond_init(&handle->runnable, NULL))
9988 		{
9989 			errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9990 			goto error;
9991 		}
9992 
9993 		stream_.apiHandle = (void *)handle;
9994 	}
9995 	else
9996 	{
9997 		handle = (OssHandle *)stream_.apiHandle;
9998 	}
9999 	handle->id[mode] = fd;
10000 
10001 	// Allocate necessary internal buffers.
10002 	unsigned long bufferBytes;
10003 	bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes(stream_.userFormat);
10004 	stream_.userBuffer[mode] = (char *)calloc(bufferBytes, 1);
10005 	if (stream_.userBuffer[mode] == NULL)
10006 	{
10007 		errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
10008 		goto error;
10009 	}
10010 
10011 	if (stream_.doConvertBuffer[mode])
10012 	{
10013 		bool makeBuffer = true;
10014 		bufferBytes = stream_.nDeviceChannels[mode] * formatBytes(stream_.deviceFormat[mode]);
10015 		if (mode == INPUT)
10016 		{
10017 			if (stream_.mode == OUTPUT && stream_.deviceBuffer)
10018 			{
10019 				unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
10020 				if (bufferBytes <= bytesOut) makeBuffer = false;
10021 			}
10022 		}
10023 
10024 		if (makeBuffer)
10025 		{
10026 			bufferBytes *= *bufferSize;
10027 			if (stream_.deviceBuffer) free(stream_.deviceBuffer);
10028 			stream_.deviceBuffer = (char *)calloc(bufferBytes, 1);
10029 			if (stream_.deviceBuffer == NULL)
10030 			{
10031 				errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
10032 				goto error;
10033 			}
10034 		}
10035 	}
10036 
10037 	stream_.device[mode] = device;
10038 	stream_.state = STREAM_STOPPED;
10039 
10040 	// Setup the buffer conversion information structure.
10041 	if (stream_.doConvertBuffer[mode]) setConvertInfo(mode, firstChannel);
10042 
10043 	// Setup thread if necessary.
10044 	if (stream_.mode == OUTPUT && mode == INPUT)
10045 	{
10046 		// We had already set up an output stream.
10047 		stream_.mode = DUPLEX;
10048 		if (stream_.device[0] == device) handle->id[0] = fd;
10049 	}
10050 	else
10051 	{
10052 		stream_.mode = mode;
10053 
10054 		// Setup callback thread.
10055 		stream_.callbackInfo.object = (void *)this;
10056 
10057 		// Set the thread attributes for joinable and realtime scheduling
10058 		// priority.  The higher priority will only take affect if the
10059 		// program is run as root or suid.
10060 		pthread_attr_t attr;
10061 		pthread_attr_init(&attr);
10062 		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
10063 #ifdef SCHED_RR  // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
10064 		if (options && options->flags & RTAUDIO_SCHEDULE_REALTIME)
10065 		{
10066 			struct sched_param param;
10067 			int priority = options->priority;
10068 			int min = sched_get_priority_min(SCHED_RR);
10069 			int max = sched_get_priority_max(SCHED_RR);
10070 			if (priority < min)
10071 				priority = min;
10072 			else if (priority > max)
10073 				priority = max;
10074 			param.sched_priority = priority;
10075 			pthread_attr_setschedparam(&attr, &param);
10076 			pthread_attr_setschedpolicy(&attr, SCHED_RR);
10077 		}
10078 		else
10079 			pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
10080 #else
10081 		pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
10082 #endif
10083 
10084 		stream_.callbackInfo.isRunning = true;
10085 		result = pthread_create(&stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo);
10086 		pthread_attr_destroy(&attr);
10087 		if (result)
10088 		{
10089 			stream_.callbackInfo.isRunning = false;
10090 			errorText_ = "RtApiOss::error creating callback thread!";
10091 			goto error;
10092 		}
10093 	}
10094 
10095 	return SUCCESS;
10096 
10097 error:
10098 	if (handle)
10099 	{
10100 		pthread_cond_destroy(&handle->runnable);
10101 		if (handle->id[0]) close(handle->id[0]);
10102 		if (handle->id[1]) close(handle->id[1]);
10103 		delete handle;
10104 		stream_.apiHandle = 0;
10105 	}
10106 
10107 	for (int i = 0; i < 2; i++)
10108 	{
10109 		if (stream_.userBuffer[i])
10110 		{
10111 			free(stream_.userBuffer[i]);
10112 			stream_.userBuffer[i] = 0;
10113 		}
10114 	}
10115 
10116 	if (stream_.deviceBuffer)
10117 	{
10118 		free(stream_.deviceBuffer);
10119 		stream_.deviceBuffer = 0;
10120 	}
10121 
10122 	return FAILURE;
10123 }
10124 
closeStream()10125 void RtApiOss ::closeStream()
10126 {
10127 	if (stream_.state == STREAM_CLOSED)
10128 	{
10129 		errorText_ = "RtApiOss::closeStream(): no open stream to close!";
10130 		error(RtAudioError::WARNING);
10131 		return;
10132 	}
10133 
10134 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
10135 	stream_.callbackInfo.isRunning = false;
10136 	MUTEX_LOCK(&stream_.mutex);
10137 	if (stream_.state == STREAM_STOPPED)
10138 		pthread_cond_signal(&handle->runnable);
10139 	MUTEX_UNLOCK(&stream_.mutex);
10140 	pthread_join(stream_.callbackInfo.thread, NULL);
10141 
10142 	if (stream_.state == STREAM_RUNNING)
10143 	{
10144 		if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
10145 			ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
10146 		else
10147 			ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
10148 		stream_.state = STREAM_STOPPED;
10149 	}
10150 
10151 	if (handle)
10152 	{
10153 		pthread_cond_destroy(&handle->runnable);
10154 		if (handle->id[0]) close(handle->id[0]);
10155 		if (handle->id[1]) close(handle->id[1]);
10156 		delete handle;
10157 		stream_.apiHandle = 0;
10158 	}
10159 
10160 	for (int i = 0; i < 2; i++)
10161 	{
10162 		if (stream_.userBuffer[i])
10163 		{
10164 			free(stream_.userBuffer[i]);
10165 			stream_.userBuffer[i] = 0;
10166 		}
10167 	}
10168 
10169 	if (stream_.deviceBuffer)
10170 	{
10171 		free(stream_.deviceBuffer);
10172 		stream_.deviceBuffer = 0;
10173 	}
10174 
10175 	stream_.mode = UNINITIALIZED;
10176 	stream_.state = STREAM_CLOSED;
10177 }
10178 
startStream()10179 void RtApiOss ::startStream()
10180 {
10181 	verifyStream();
10182 	if (stream_.state == STREAM_RUNNING)
10183 	{
10184 		errorText_ = "RtApiOss::startStream(): the stream is already running!";
10185 		error(RtAudioError::WARNING);
10186 		return;
10187 	}
10188 
10189 	MUTEX_LOCK(&stream_.mutex);
10190 
10191 	stream_.state = STREAM_RUNNING;
10192 
10193 	// No need to do anything else here ... OSS automatically starts
10194 	// when fed samples.
10195 
10196 	MUTEX_UNLOCK(&stream_.mutex);
10197 
10198 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
10199 	pthread_cond_signal(&handle->runnable);
10200 }
10201 
stopStream()10202 void RtApiOss ::stopStream()
10203 {
10204 	verifyStream();
10205 	if (stream_.state == STREAM_STOPPED)
10206 	{
10207 		errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
10208 		error(RtAudioError::WARNING);
10209 		return;
10210 	}
10211 
10212 	MUTEX_LOCK(&stream_.mutex);
10213 
10214 	// The state might change while waiting on a mutex.
10215 	if (stream_.state == STREAM_STOPPED)
10216 	{
10217 		MUTEX_UNLOCK(&stream_.mutex);
10218 		return;
10219 	}
10220 
10221 	int result = 0;
10222 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
10223 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
10224 	{
10225 		// Flush the output with zeros a few times.
10226 		char *buffer;
10227 		int samples;
10228 		RtAudioFormat format;
10229 
10230 		if (stream_.doConvertBuffer[0])
10231 		{
10232 			buffer = stream_.deviceBuffer;
10233 			samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10234 			format = stream_.deviceFormat[0];
10235 		}
10236 		else
10237 		{
10238 			buffer = stream_.userBuffer[0];
10239 			samples = stream_.bufferSize * stream_.nUserChannels[0];
10240 			format = stream_.userFormat;
10241 		}
10242 
10243 		memset(buffer, 0, samples * formatBytes(format));
10244 		for (unsigned int i = 0; i < stream_.nBuffers + 1; i++)
10245 		{
10246 			result = write(handle->id[0], buffer, samples * formatBytes(format));
10247 			if (result == -1)
10248 			{
10249 				errorText_ = "RtApiOss::stopStream: audio write error.";
10250 				error(RtAudioError::WARNING);
10251 			}
10252 		}
10253 
10254 		result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
10255 		if (result == -1)
10256 		{
10257 			errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10258 			errorText_ = errorStream_.str();
10259 			goto unlock;
10260 		}
10261 		handle->triggered = false;
10262 	}
10263 
10264 	if (stream_.mode == INPUT || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1]))
10265 	{
10266 		result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
10267 		if (result == -1)
10268 		{
10269 			errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10270 			errorText_ = errorStream_.str();
10271 			goto unlock;
10272 		}
10273 	}
10274 
10275 unlock:
10276 	stream_.state = STREAM_STOPPED;
10277 	MUTEX_UNLOCK(&stream_.mutex);
10278 
10279 	if (result != -1) return;
10280 	error(RtAudioError::SYSTEM_ERROR);
10281 }
10282 
abortStream()10283 void RtApiOss ::abortStream()
10284 {
10285 	verifyStream();
10286 	if (stream_.state == STREAM_STOPPED)
10287 	{
10288 		errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
10289 		error(RtAudioError::WARNING);
10290 		return;
10291 	}
10292 
10293 	MUTEX_LOCK(&stream_.mutex);
10294 
10295 	// The state might change while waiting on a mutex.
10296 	if (stream_.state == STREAM_STOPPED)
10297 	{
10298 		MUTEX_UNLOCK(&stream_.mutex);
10299 		return;
10300 	}
10301 
10302 	int result = 0;
10303 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
10304 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
10305 	{
10306 		result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
10307 		if (result == -1)
10308 		{
10309 			errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10310 			errorText_ = errorStream_.str();
10311 			goto unlock;
10312 		}
10313 		handle->triggered = false;
10314 	}
10315 
10316 	if (stream_.mode == INPUT || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1]))
10317 	{
10318 		result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
10319 		if (result == -1)
10320 		{
10321 			errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10322 			errorText_ = errorStream_.str();
10323 			goto unlock;
10324 		}
10325 	}
10326 
10327 unlock:
10328 	stream_.state = STREAM_STOPPED;
10329 	MUTEX_UNLOCK(&stream_.mutex);
10330 
10331 	if (result != -1) return;
10332 	error(RtAudioError::SYSTEM_ERROR);
10333 }
10334 
callbackEvent()10335 void RtApiOss ::callbackEvent()
10336 {
10337 	OssHandle *handle = (OssHandle *)stream_.apiHandle;
10338 	if (stream_.state == STREAM_STOPPED)
10339 	{
10340 		MUTEX_LOCK(&stream_.mutex);
10341 		pthread_cond_wait(&handle->runnable, &stream_.mutex);
10342 		if (stream_.state != STREAM_RUNNING)
10343 		{
10344 			MUTEX_UNLOCK(&stream_.mutex);
10345 			return;
10346 		}
10347 		MUTEX_UNLOCK(&stream_.mutex);
10348 	}
10349 
10350 	if (stream_.state == STREAM_CLOSED)
10351 	{
10352 		errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
10353 		error(RtAudioError::WARNING);
10354 		return;
10355 	}
10356 
10357 	// Invoke user callback to get fresh output data.
10358 	int doStopStream = 0;
10359 	RtAudioCallback callback = (RtAudioCallback)stream_.callbackInfo.callback;
10360 	double streamTime = getStreamTime();
10361 	RtAudioStreamStatus status = 0;
10362 	if (stream_.mode != INPUT && handle->xrun[0] == true)
10363 	{
10364 		status |= RTAUDIO_OUTPUT_UNDERFLOW;
10365 		handle->xrun[0] = false;
10366 	}
10367 	if (stream_.mode != OUTPUT && handle->xrun[1] == true)
10368 	{
10369 		status |= RTAUDIO_INPUT_OVERFLOW;
10370 		handle->xrun[1] = false;
10371 	}
10372 	doStopStream = callback(stream_.userBuffer[0], stream_.userBuffer[1],
10373 							stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData);
10374 	if (doStopStream == 2)
10375 	{
10376 		this->abortStream();
10377 		return;
10378 	}
10379 
10380 	MUTEX_LOCK(&stream_.mutex);
10381 
10382 	// The state might change while waiting on a mutex.
10383 	if (stream_.state == STREAM_STOPPED) goto unlock;
10384 
10385 	int result;
10386 	char *buffer;
10387 	int samples;
10388 	RtAudioFormat format;
10389 
10390 	if (stream_.mode == OUTPUT || stream_.mode == DUPLEX)
10391 	{
10392 		// Setup parameters and do buffer conversion if necessary.
10393 		if (stream_.doConvertBuffer[0])
10394 		{
10395 			buffer = stream_.deviceBuffer;
10396 			convertBuffer(buffer, stream_.userBuffer[0], stream_.convertInfo[0]);
10397 			samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10398 			format = stream_.deviceFormat[0];
10399 		}
10400 		else
10401 		{
10402 			buffer = stream_.userBuffer[0];
10403 			samples = stream_.bufferSize * stream_.nUserChannels[0];
10404 			format = stream_.userFormat;
10405 		}
10406 
10407 		// Do byte swapping if necessary.
10408 		if (stream_.doByteSwap[0])
10409 			byteSwapBuffer(buffer, samples, format);
10410 
10411 		if (stream_.mode == DUPLEX && handle->triggered == false)
10412 		{
10413 			int trig = 0;
10414 			ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
10415 			result = write(handle->id[0], buffer, samples * formatBytes(format));
10416 			trig = PCM_ENABLE_INPUT | PCM_ENABLE_OUTPUT;
10417 			ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
10418 			handle->triggered = true;
10419 		}
10420 		else
10421 			// Write samples to device.
10422 			result = write(handle->id[0], buffer, samples * formatBytes(format));
10423 
10424 		if (result == -1)
10425 		{
10426 			// We'll assume this is an underrun, though there isn't a
10427 			// specific means for determining that.
10428 			handle->xrun[0] = true;
10429 			errorText_ = "RtApiOss::callbackEvent: audio write error.";
10430 			error(RtAudioError::WARNING);
10431 			// Continue on to input section.
10432 		}
10433 	}
10434 
10435 	if (stream_.mode == INPUT || stream_.mode == DUPLEX)
10436 	{
10437 		// Setup parameters.
10438 		if (stream_.doConvertBuffer[1])
10439 		{
10440 			buffer = stream_.deviceBuffer;
10441 			samples = stream_.bufferSize * stream_.nDeviceChannels[1];
10442 			format = stream_.deviceFormat[1];
10443 		}
10444 		else
10445 		{
10446 			buffer = stream_.userBuffer[1];
10447 			samples = stream_.bufferSize * stream_.nUserChannels[1];
10448 			format = stream_.userFormat;
10449 		}
10450 
10451 		// Read samples from device.
10452 		result = read(handle->id[1], buffer, samples * formatBytes(format));
10453 
10454 		if (result == -1)
10455 		{
10456 			// We'll assume this is an overrun, though there isn't a
10457 			// specific means for determining that.
10458 			handle->xrun[1] = true;
10459 			errorText_ = "RtApiOss::callbackEvent: audio read error.";
10460 			error(RtAudioError::WARNING);
10461 			goto unlock;
10462 		}
10463 
10464 		// Do byte swapping if necessary.
10465 		if (stream_.doByteSwap[1])
10466 			byteSwapBuffer(buffer, samples, format);
10467 
10468 		// Do buffer conversion if necessary.
10469 		if (stream_.doConvertBuffer[1])
10470 			convertBuffer(stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1]);
10471 	}
10472 
10473 unlock:
10474 	MUTEX_UNLOCK(&stream_.mutex);
10475 
10476 	RtApi::tickStreamTime();
10477 	if (doStopStream == 1) this->stopStream();
10478 }
10479 
ossCallbackHandler(void * ptr)10480 static void *ossCallbackHandler(void *ptr)
10481 {
10482 	CallbackInfo *info = (CallbackInfo *)ptr;
10483 	RtApiOss *object = (RtApiOss *)info->object;
10484 	bool *isRunning = &info->isRunning;
10485 
10486 	while (*isRunning == true)
10487 	{
10488 		pthread_testcancel();
10489 		object->callbackEvent();
10490 	}
10491 
10492 	pthread_exit(NULL);
10493 }
10494 
10495 //******************** End of __LINUX_OSS__ *********************//
10496 #endif
10497 
10498 // *************************************************** //
10499 //
10500 // Protected common (OS-independent) RtAudio methods.
10501 //
10502 // *************************************************** //
10503 
10504 // This method can be modified to control the behavior of error
10505 // message printing.
error(RtAudioError::Type type)10506 void RtApi ::error(RtAudioError::Type type)
10507 {
10508 	errorStream_.str("");  // clear the ostringstream
10509 
10510 	RtAudioErrorCallback errorCallback = (RtAudioErrorCallback)stream_.callbackInfo.errorCallback;
10511 	if (errorCallback)
10512 	{
10513 		// abortStream() can generate new error messages. Ignore them. Just keep original one.
10514 
10515 		if (firstErrorOccurred_)
10516 			return;
10517 
10518 		firstErrorOccurred_ = true;
10519 		const std::string errorMessage = errorText_;
10520 
10521 		if (type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED)
10522 		{
10523 			stream_.callbackInfo.isRunning = false;  // exit from the thread
10524 			abortStream();
10525 		}
10526 
10527 		errorCallback(type, errorMessage);
10528 		firstErrorOccurred_ = false;
10529 		return;
10530 	}
10531 
10532 	if (type == RtAudioError::WARNING && showWarnings_ == true)
10533 		std::cerr << '\n'
10534 				  << errorText_ << "\n\n";
10535 	else if (type != RtAudioError::WARNING)
10536 		throw(RtAudioError(errorText_, type));
10537 }
10538 
verifyStream()10539 void RtApi ::verifyStream()
10540 {
10541 	if (stream_.state == STREAM_CLOSED)
10542 	{
10543 		errorText_ = "RtApi:: a stream is not open!";
10544 		error(RtAudioError::INVALID_USE);
10545 	}
10546 }
10547 
clearStreamInfo()10548 void RtApi ::clearStreamInfo()
10549 {
10550 	stream_.mode = UNINITIALIZED;
10551 	stream_.state = STREAM_CLOSED;
10552 	stream_.sampleRate = 0;
10553 	stream_.bufferSize = 0;
10554 	stream_.nBuffers = 0;
10555 	stream_.userFormat = 0;
10556 	stream_.userInterleaved = true;
10557 	stream_.streamTime = 0.0;
10558 	stream_.apiHandle = 0;
10559 	stream_.deviceBuffer = 0;
10560 	stream_.callbackInfo.callback = 0;
10561 	stream_.callbackInfo.userData = 0;
10562 	stream_.callbackInfo.isRunning = false;
10563 	stream_.callbackInfo.errorCallback = 0;
10564 	for (int i = 0; i < 2; i++)
10565 	{
10566 		stream_.device[i] = 11111;
10567 		stream_.doConvertBuffer[i] = false;
10568 		stream_.deviceInterleaved[i] = true;
10569 		stream_.doByteSwap[i] = false;
10570 		stream_.nUserChannels[i] = 0;
10571 		stream_.nDeviceChannels[i] = 0;
10572 		stream_.channelOffset[i] = 0;
10573 		stream_.deviceFormat[i] = 0;
10574 		stream_.latency[i] = 0;
10575 		stream_.userBuffer[i] = 0;
10576 		stream_.convertInfo[i].channels = 0;
10577 		stream_.convertInfo[i].inJump = 0;
10578 		stream_.convertInfo[i].outJump = 0;
10579 		stream_.convertInfo[i].inFormat = 0;
10580 		stream_.convertInfo[i].outFormat = 0;
10581 		stream_.convertInfo[i].inOffset.clear();
10582 		stream_.convertInfo[i].outOffset.clear();
10583 	}
10584 }
10585 
formatBytes(RtAudioFormat format)10586 unsigned int RtApi ::formatBytes(RtAudioFormat format)
10587 {
10588 	if (format == RTAUDIO_SINT16)
10589 		return 2;
10590 	else if (format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32)
10591 		return 4;
10592 	else if (format == RTAUDIO_FLOAT64)
10593 		return 8;
10594 	else if (format == RTAUDIO_SINT24)
10595 		return 3;
10596 	else if (format == RTAUDIO_SINT8)
10597 		return 1;
10598 
10599 	errorText_ = "RtApi::formatBytes: undefined format.";
10600 	error(RtAudioError::WARNING);
10601 
10602 	return 0;
10603 }
10604 
setConvertInfo(StreamMode mode,unsigned int firstChannel)10605 void RtApi ::setConvertInfo(StreamMode mode, unsigned int firstChannel)
10606 {
10607 	if (mode == INPUT)
10608 	{  // convert device to user buffer
10609 		stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10610 		stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10611 		stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10612 		stream_.convertInfo[mode].outFormat = stream_.userFormat;
10613 	}
10614 	else
10615 	{  // convert user to device buffer
10616 		stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10617 		stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10618 		stream_.convertInfo[mode].inFormat = stream_.userFormat;
10619 		stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10620 	}
10621 
10622 	if (stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump)
10623 		stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10624 	else
10625 		stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10626 
10627 	// Set up the interleave/deinterleave offsets.
10628 	if (stream_.deviceInterleaved[mode] != stream_.userInterleaved)
10629 	{
10630 		if ((mode == OUTPUT && stream_.deviceInterleaved[mode]) ||
10631 			(mode == INPUT && stream_.userInterleaved))
10632 		{
10633 			for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10634 			{
10635 				stream_.convertInfo[mode].inOffset.push_back(k * stream_.bufferSize);
10636 				stream_.convertInfo[mode].outOffset.push_back(k);
10637 				stream_.convertInfo[mode].inJump = 1;
10638 			}
10639 		}
10640 		else
10641 		{
10642 			for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10643 			{
10644 				stream_.convertInfo[mode].inOffset.push_back(k);
10645 				stream_.convertInfo[mode].outOffset.push_back(k * stream_.bufferSize);
10646 				stream_.convertInfo[mode].outJump = 1;
10647 			}
10648 		}
10649 	}
10650 	else
10651 	{  // no (de)interleaving
10652 		if (stream_.userInterleaved)
10653 		{
10654 			for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10655 			{
10656 				stream_.convertInfo[mode].inOffset.push_back(k);
10657 				stream_.convertInfo[mode].outOffset.push_back(k);
10658 			}
10659 		}
10660 		else
10661 		{
10662 			for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10663 			{
10664 				stream_.convertInfo[mode].inOffset.push_back(k * stream_.bufferSize);
10665 				stream_.convertInfo[mode].outOffset.push_back(k * stream_.bufferSize);
10666 				stream_.convertInfo[mode].inJump = 1;
10667 				stream_.convertInfo[mode].outJump = 1;
10668 			}
10669 		}
10670 	}
10671 
10672 	// Add channel offset.
10673 	if (firstChannel > 0)
10674 	{
10675 		if (stream_.deviceInterleaved[mode])
10676 		{
10677 			if (mode == OUTPUT)
10678 			{
10679 				for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10680 					stream_.convertInfo[mode].outOffset[k] += firstChannel;
10681 			}
10682 			else
10683 			{
10684 				for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10685 					stream_.convertInfo[mode].inOffset[k] += firstChannel;
10686 			}
10687 		}
10688 		else
10689 		{
10690 			if (mode == OUTPUT)
10691 			{
10692 				for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10693 					stream_.convertInfo[mode].outOffset[k] += (firstChannel * stream_.bufferSize);
10694 			}
10695 			else
10696 			{
10697 				for (int k = 0; k < stream_.convertInfo[mode].channels; k++)
10698 					stream_.convertInfo[mode].inOffset[k] += (firstChannel * stream_.bufferSize);
10699 			}
10700 		}
10701 	}
10702 }
10703 
convertBuffer(char * outBuffer,char * inBuffer,ConvertInfo & info)10704 void RtApi ::convertBuffer(char *outBuffer, char *inBuffer, ConvertInfo &info)
10705 {
10706 	// This function does format conversion, input/output channel compensation, and
10707 	// data interleaving/deinterleaving.  24-bit integers are assumed to occupy
10708 	// the lower three bytes of a 32-bit integer.
10709 
10710 	// Clear our device buffer when in/out duplex device channels are different
10711 	if (outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10712 		(stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1]))
10713 		memset(outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes(info.outFormat));
10714 
10715 	int j;
10716 	if (info.outFormat == RTAUDIO_FLOAT64)
10717 	{
10718 		Float64 scale;
10719 		Float64 *out = (Float64 *)outBuffer;
10720 
10721 		if (info.inFormat == RTAUDIO_SINT8)
10722 		{
10723 			signed char *in = (signed char *)inBuffer;
10724 			scale = 1.0 / 127.5;
10725 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10726 			{
10727 				for (j = 0; j < info.channels; j++)
10728 				{
10729 					out[info.outOffset[j]] = (Float64)in[info.inOffset[j]];
10730 					out[info.outOffset[j]] += 0.5;
10731 					out[info.outOffset[j]] *= scale;
10732 				}
10733 				in += info.inJump;
10734 				out += info.outJump;
10735 			}
10736 		}
10737 		else if (info.inFormat == RTAUDIO_SINT16)
10738 		{
10739 			Int16 *in = (Int16 *)inBuffer;
10740 			scale = 1.0 / 32767.5;
10741 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10742 			{
10743 				for (j = 0; j < info.channels; j++)
10744 				{
10745 					out[info.outOffset[j]] = (Float64)in[info.inOffset[j]];
10746 					out[info.outOffset[j]] += 0.5;
10747 					out[info.outOffset[j]] *= scale;
10748 				}
10749 				in += info.inJump;
10750 				out += info.outJump;
10751 			}
10752 		}
10753 		else if (info.inFormat == RTAUDIO_SINT24)
10754 		{
10755 			Int24 *in = (Int24 *)inBuffer;
10756 			scale = 1.0 / 8388607.5;
10757 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10758 			{
10759 				for (j = 0; j < info.channels; j++)
10760 				{
10761 					out[info.outOffset[j]] = (Float64)(in[info.inOffset[j]].asInt());
10762 					out[info.outOffset[j]] += 0.5;
10763 					out[info.outOffset[j]] *= scale;
10764 				}
10765 				in += info.inJump;
10766 				out += info.outJump;
10767 			}
10768 		}
10769 		else if (info.inFormat == RTAUDIO_SINT32)
10770 		{
10771 			Int32 *in = (Int32 *)inBuffer;
10772 			scale = 1.0 / 2147483647.5;
10773 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10774 			{
10775 				for (j = 0; j < info.channels; j++)
10776 				{
10777 					out[info.outOffset[j]] = (Float64)in[info.inOffset[j]];
10778 					out[info.outOffset[j]] += 0.5;
10779 					out[info.outOffset[j]] *= scale;
10780 				}
10781 				in += info.inJump;
10782 				out += info.outJump;
10783 			}
10784 		}
10785 		else if (info.inFormat == RTAUDIO_FLOAT32)
10786 		{
10787 			Float32 *in = (Float32 *)inBuffer;
10788 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10789 			{
10790 				for (j = 0; j < info.channels; j++)
10791 				{
10792 					out[info.outOffset[j]] = (Float64)in[info.inOffset[j]];
10793 				}
10794 				in += info.inJump;
10795 				out += info.outJump;
10796 			}
10797 		}
10798 		else if (info.inFormat == RTAUDIO_FLOAT64)
10799 		{
10800 			// Channel compensation and/or (de)interleaving only.
10801 			Float64 *in = (Float64 *)inBuffer;
10802 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10803 			{
10804 				for (j = 0; j < info.channels; j++)
10805 				{
10806 					out[info.outOffset[j]] = in[info.inOffset[j]];
10807 				}
10808 				in += info.inJump;
10809 				out += info.outJump;
10810 			}
10811 		}
10812 	}
10813 	else if (info.outFormat == RTAUDIO_FLOAT32)
10814 	{
10815 		Float32 scale;
10816 		Float32 *out = (Float32 *)outBuffer;
10817 
10818 		if (info.inFormat == RTAUDIO_SINT8)
10819 		{
10820 			signed char *in = (signed char *)inBuffer;
10821 			scale = (Float32)(1.0 / 127.5);
10822 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10823 			{
10824 				for (j = 0; j < info.channels; j++)
10825 				{
10826 					out[info.outOffset[j]] = (Float32)in[info.inOffset[j]];
10827 					out[info.outOffset[j]] += 0.5;
10828 					out[info.outOffset[j]] *= scale;
10829 				}
10830 				in += info.inJump;
10831 				out += info.outJump;
10832 			}
10833 		}
10834 		else if (info.inFormat == RTAUDIO_SINT16)
10835 		{
10836 			Int16 *in = (Int16 *)inBuffer;
10837 			scale = (Float32)(1.0 / 32767.5);
10838 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10839 			{
10840 				for (j = 0; j < info.channels; j++)
10841 				{
10842 					out[info.outOffset[j]] = (Float32)in[info.inOffset[j]];
10843 					out[info.outOffset[j]] += 0.5;
10844 					out[info.outOffset[j]] *= scale;
10845 				}
10846 				in += info.inJump;
10847 				out += info.outJump;
10848 			}
10849 		}
10850 		else if (info.inFormat == RTAUDIO_SINT24)
10851 		{
10852 			Int24 *in = (Int24 *)inBuffer;
10853 			scale = (Float32)(1.0 / 8388607.5);
10854 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10855 			{
10856 				for (j = 0; j < info.channels; j++)
10857 				{
10858 					out[info.outOffset[j]] = (Float32)(in[info.inOffset[j]].asInt());
10859 					out[info.outOffset[j]] += 0.5;
10860 					out[info.outOffset[j]] *= scale;
10861 				}
10862 				in += info.inJump;
10863 				out += info.outJump;
10864 			}
10865 		}
10866 		else if (info.inFormat == RTAUDIO_SINT32)
10867 		{
10868 			Int32 *in = (Int32 *)inBuffer;
10869 			scale = (Float32)(1.0 / 2147483647.5);
10870 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10871 			{
10872 				for (j = 0; j < info.channels; j++)
10873 				{
10874 					out[info.outOffset[j]] = (Float32)in[info.inOffset[j]];
10875 					out[info.outOffset[j]] += 0.5;
10876 					out[info.outOffset[j]] *= scale;
10877 				}
10878 				in += info.inJump;
10879 				out += info.outJump;
10880 			}
10881 		}
10882 		else if (info.inFormat == RTAUDIO_FLOAT32)
10883 		{
10884 			// Channel compensation and/or (de)interleaving only.
10885 			Float32 *in = (Float32 *)inBuffer;
10886 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10887 			{
10888 				for (j = 0; j < info.channels; j++)
10889 				{
10890 					out[info.outOffset[j]] = in[info.inOffset[j]];
10891 				}
10892 				in += info.inJump;
10893 				out += info.outJump;
10894 			}
10895 		}
10896 		else if (info.inFormat == RTAUDIO_FLOAT64)
10897 		{
10898 			Float64 *in = (Float64 *)inBuffer;
10899 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10900 			{
10901 				for (j = 0; j < info.channels; j++)
10902 				{
10903 					out[info.outOffset[j]] = (Float32)in[info.inOffset[j]];
10904 				}
10905 				in += info.inJump;
10906 				out += info.outJump;
10907 			}
10908 		}
10909 	}
10910 	else if (info.outFormat == RTAUDIO_SINT32)
10911 	{
10912 		Int32 *out = (Int32 *)outBuffer;
10913 		if (info.inFormat == RTAUDIO_SINT8)
10914 		{
10915 			signed char *in = (signed char *)inBuffer;
10916 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10917 			{
10918 				for (j = 0; j < info.channels; j++)
10919 				{
10920 					out[info.outOffset[j]] = (Int32)in[info.inOffset[j]];
10921 					out[info.outOffset[j]] <<= 24;
10922 				}
10923 				in += info.inJump;
10924 				out += info.outJump;
10925 			}
10926 		}
10927 		else if (info.inFormat == RTAUDIO_SINT16)
10928 		{
10929 			Int16 *in = (Int16 *)inBuffer;
10930 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10931 			{
10932 				for (j = 0; j < info.channels; j++)
10933 				{
10934 					out[info.outOffset[j]] = (Int32)in[info.inOffset[j]];
10935 					out[info.outOffset[j]] <<= 16;
10936 				}
10937 				in += info.inJump;
10938 				out += info.outJump;
10939 			}
10940 		}
10941 		else if (info.inFormat == RTAUDIO_SINT24)
10942 		{
10943 			Int24 *in = (Int24 *)inBuffer;
10944 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10945 			{
10946 				for (j = 0; j < info.channels; j++)
10947 				{
10948 					out[info.outOffset[j]] = (Int32)in[info.inOffset[j]].asInt();
10949 					out[info.outOffset[j]] <<= 8;
10950 				}
10951 				in += info.inJump;
10952 				out += info.outJump;
10953 			}
10954 		}
10955 		else if (info.inFormat == RTAUDIO_SINT32)
10956 		{
10957 			// Channel compensation and/or (de)interleaving only.
10958 			Int32 *in = (Int32 *)inBuffer;
10959 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10960 			{
10961 				for (j = 0; j < info.channels; j++)
10962 				{
10963 					out[info.outOffset[j]] = in[info.inOffset[j]];
10964 				}
10965 				in += info.inJump;
10966 				out += info.outJump;
10967 			}
10968 		}
10969 		else if (info.inFormat == RTAUDIO_FLOAT32)
10970 		{
10971 			Float32 *in = (Float32 *)inBuffer;
10972 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10973 			{
10974 				for (j = 0; j < info.channels; j++)
10975 				{
10976 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] * 2147483647.5 - 0.5);
10977 				}
10978 				in += info.inJump;
10979 				out += info.outJump;
10980 			}
10981 		}
10982 		else if (info.inFormat == RTAUDIO_FLOAT64)
10983 		{
10984 			Float64 *in = (Float64 *)inBuffer;
10985 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
10986 			{
10987 				for (j = 0; j < info.channels; j++)
10988 				{
10989 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] * 2147483647.5 - 0.5);
10990 				}
10991 				in += info.inJump;
10992 				out += info.outJump;
10993 			}
10994 		}
10995 	}
10996 	else if (info.outFormat == RTAUDIO_SINT24)
10997 	{
10998 		Int24 *out = (Int24 *)outBuffer;
10999 		if (info.inFormat == RTAUDIO_SINT8)
11000 		{
11001 			signed char *in = (signed char *)inBuffer;
11002 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11003 			{
11004 				for (j = 0; j < info.channels; j++)
11005 				{
11006 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] << 16);
11007 					//out[info.outOffset[j]] <<= 16;
11008 				}
11009 				in += info.inJump;
11010 				out += info.outJump;
11011 			}
11012 		}
11013 		else if (info.inFormat == RTAUDIO_SINT16)
11014 		{
11015 			Int16 *in = (Int16 *)inBuffer;
11016 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11017 			{
11018 				for (j = 0; j < info.channels; j++)
11019 				{
11020 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] << 8);
11021 					//out[info.outOffset[j]] <<= 8;
11022 				}
11023 				in += info.inJump;
11024 				out += info.outJump;
11025 			}
11026 		}
11027 		else if (info.inFormat == RTAUDIO_SINT24)
11028 		{
11029 			// Channel compensation and/or (de)interleaving only.
11030 			Int24 *in = (Int24 *)inBuffer;
11031 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11032 			{
11033 				for (j = 0; j < info.channels; j++)
11034 				{
11035 					out[info.outOffset[j]] = in[info.inOffset[j]];
11036 				}
11037 				in += info.inJump;
11038 				out += info.outJump;
11039 			}
11040 		}
11041 		else if (info.inFormat == RTAUDIO_SINT32)
11042 		{
11043 			Int32 *in = (Int32 *)inBuffer;
11044 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11045 			{
11046 				for (j = 0; j < info.channels; j++)
11047 				{
11048 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] >> 8);
11049 					//out[info.outOffset[j]] >>= 8;
11050 				}
11051 				in += info.inJump;
11052 				out += info.outJump;
11053 			}
11054 		}
11055 		else if (info.inFormat == RTAUDIO_FLOAT32)
11056 		{
11057 			Float32 *in = (Float32 *)inBuffer;
11058 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11059 			{
11060 				for (j = 0; j < info.channels; j++)
11061 				{
11062 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] * 8388607.5 - 0.5);
11063 				}
11064 				in += info.inJump;
11065 				out += info.outJump;
11066 			}
11067 		}
11068 		else if (info.inFormat == RTAUDIO_FLOAT64)
11069 		{
11070 			Float64 *in = (Float64 *)inBuffer;
11071 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11072 			{
11073 				for (j = 0; j < info.channels; j++)
11074 				{
11075 					out[info.outOffset[j]] = (Int32)(in[info.inOffset[j]] * 8388607.5 - 0.5);
11076 				}
11077 				in += info.inJump;
11078 				out += info.outJump;
11079 			}
11080 		}
11081 	}
11082 	else if (info.outFormat == RTAUDIO_SINT16)
11083 	{
11084 		Int16 *out = (Int16 *)outBuffer;
11085 		if (info.inFormat == RTAUDIO_SINT8)
11086 		{
11087 			signed char *in = (signed char *)inBuffer;
11088 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11089 			{
11090 				for (j = 0; j < info.channels; j++)
11091 				{
11092 					out[info.outOffset[j]] = (Int16)in[info.inOffset[j]];
11093 					out[info.outOffset[j]] <<= 8;
11094 				}
11095 				in += info.inJump;
11096 				out += info.outJump;
11097 			}
11098 		}
11099 		else if (info.inFormat == RTAUDIO_SINT16)
11100 		{
11101 			// Channel compensation and/or (de)interleaving only.
11102 			Int16 *in = (Int16 *)inBuffer;
11103 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11104 			{
11105 				for (j = 0; j < info.channels; j++)
11106 				{
11107 					out[info.outOffset[j]] = in[info.inOffset[j]];
11108 				}
11109 				in += info.inJump;
11110 				out += info.outJump;
11111 			}
11112 		}
11113 		else if (info.inFormat == RTAUDIO_SINT24)
11114 		{
11115 			Int24 *in = (Int24 *)inBuffer;
11116 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11117 			{
11118 				for (j = 0; j < info.channels; j++)
11119 				{
11120 					out[info.outOffset[j]] = (Int16)(in[info.inOffset[j]].asInt() >> 8);
11121 				}
11122 				in += info.inJump;
11123 				out += info.outJump;
11124 			}
11125 		}
11126 		else if (info.inFormat == RTAUDIO_SINT32)
11127 		{
11128 			Int32 *in = (Int32 *)inBuffer;
11129 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11130 			{
11131 				for (j = 0; j < info.channels; j++)
11132 				{
11133 					out[info.outOffset[j]] = (Int16)((in[info.inOffset[j]] >> 16) & 0x0000ffff);
11134 				}
11135 				in += info.inJump;
11136 				out += info.outJump;
11137 			}
11138 		}
11139 		else if (info.inFormat == RTAUDIO_FLOAT32)
11140 		{
11141 			Float32 *in = (Float32 *)inBuffer;
11142 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11143 			{
11144 				for (j = 0; j < info.channels; j++)
11145 				{
11146 					out[info.outOffset[j]] = (Int16)(in[info.inOffset[j]] * 32767.5 - 0.5);
11147 				}
11148 				in += info.inJump;
11149 				out += info.outJump;
11150 			}
11151 		}
11152 		else if (info.inFormat == RTAUDIO_FLOAT64)
11153 		{
11154 			Float64 *in = (Float64 *)inBuffer;
11155 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11156 			{
11157 				for (j = 0; j < info.channels; j++)
11158 				{
11159 					out[info.outOffset[j]] = (Int16)(in[info.inOffset[j]] * 32767.5 - 0.5);
11160 				}
11161 				in += info.inJump;
11162 				out += info.outJump;
11163 			}
11164 		}
11165 	}
11166 	else if (info.outFormat == RTAUDIO_SINT8)
11167 	{
11168 		signed char *out = (signed char *)outBuffer;
11169 		if (info.inFormat == RTAUDIO_SINT8)
11170 		{
11171 			// Channel compensation and/or (de)interleaving only.
11172 			signed char *in = (signed char *)inBuffer;
11173 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11174 			{
11175 				for (j = 0; j < info.channels; j++)
11176 				{
11177 					out[info.outOffset[j]] = in[info.inOffset[j]];
11178 				}
11179 				in += info.inJump;
11180 				out += info.outJump;
11181 			}
11182 		}
11183 		if (info.inFormat == RTAUDIO_SINT16)
11184 		{
11185 			Int16 *in = (Int16 *)inBuffer;
11186 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11187 			{
11188 				for (j = 0; j < info.channels; j++)
11189 				{
11190 					out[info.outOffset[j]] = (signed char)((in[info.inOffset[j]] >> 8) & 0x00ff);
11191 				}
11192 				in += info.inJump;
11193 				out += info.outJump;
11194 			}
11195 		}
11196 		else if (info.inFormat == RTAUDIO_SINT24)
11197 		{
11198 			Int24 *in = (Int24 *)inBuffer;
11199 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11200 			{
11201 				for (j = 0; j < info.channels; j++)
11202 				{
11203 					out[info.outOffset[j]] = (signed char)(in[info.inOffset[j]].asInt() >> 16);
11204 				}
11205 				in += info.inJump;
11206 				out += info.outJump;
11207 			}
11208 		}
11209 		else if (info.inFormat == RTAUDIO_SINT32)
11210 		{
11211 			Int32 *in = (Int32 *)inBuffer;
11212 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11213 			{
11214 				for (j = 0; j < info.channels; j++)
11215 				{
11216 					out[info.outOffset[j]] = (signed char)((in[info.inOffset[j]] >> 24) & 0x000000ff);
11217 				}
11218 				in += info.inJump;
11219 				out += info.outJump;
11220 			}
11221 		}
11222 		else if (info.inFormat == RTAUDIO_FLOAT32)
11223 		{
11224 			Float32 *in = (Float32 *)inBuffer;
11225 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11226 			{
11227 				for (j = 0; j < info.channels; j++)
11228 				{
11229 					out[info.outOffset[j]] = (signed char)(in[info.inOffset[j]] * 127.5 - 0.5);
11230 				}
11231 				in += info.inJump;
11232 				out += info.outJump;
11233 			}
11234 		}
11235 		else if (info.inFormat == RTAUDIO_FLOAT64)
11236 		{
11237 			Float64 *in = (Float64 *)inBuffer;
11238 			for (unsigned int i = 0; i < stream_.bufferSize; i++)
11239 			{
11240 				for (j = 0; j < info.channels; j++)
11241 				{
11242 					out[info.outOffset[j]] = (signed char)(in[info.inOffset[j]] * 127.5 - 0.5);
11243 				}
11244 				in += info.inJump;
11245 				out += info.outJump;
11246 			}
11247 		}
11248 	}
11249 }
11250 
11251 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
11252 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
11253 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
11254 
byteSwapBuffer(char * buffer,unsigned int samples,RtAudioFormat format)11255 void RtApi ::byteSwapBuffer(char *buffer, unsigned int samples, RtAudioFormat format)
11256 {
11257 	char val;
11258 	char *ptr;
11259 
11260 	ptr = buffer;
11261 	if (format == RTAUDIO_SINT16)
11262 	{
11263 		for (unsigned int i = 0; i < samples; i++)
11264 		{
11265 			// Swap 1st and 2nd bytes.
11266 			val = *(ptr);
11267 			*(ptr) = *(ptr + 1);
11268 			*(ptr + 1) = val;
11269 
11270 			// Increment 2 bytes.
11271 			ptr += 2;
11272 		}
11273 	}
11274 	else if (format == RTAUDIO_SINT32 ||
11275 			 format == RTAUDIO_FLOAT32)
11276 	{
11277 		for (unsigned int i = 0; i < samples; i++)
11278 		{
11279 			// Swap 1st and 4th bytes.
11280 			val = *(ptr);
11281 			*(ptr) = *(ptr + 3);
11282 			*(ptr + 3) = val;
11283 
11284 			// Swap 2nd and 3rd bytes.
11285 			ptr += 1;
11286 			val = *(ptr);
11287 			*(ptr) = *(ptr + 1);
11288 			*(ptr + 1) = val;
11289 
11290 			// Increment 3 more bytes.
11291 			ptr += 3;
11292 		}
11293 	}
11294 	else if (format == RTAUDIO_SINT24)
11295 	{
11296 		for (unsigned int i = 0; i < samples; i++)
11297 		{
11298 			// Swap 1st and 3rd bytes.
11299 			val = *(ptr);
11300 			*(ptr) = *(ptr + 2);
11301 			*(ptr + 2) = val;
11302 
11303 			// Increment 2 more bytes.
11304 			ptr += 2;
11305 		}
11306 	}
11307 	else if (format == RTAUDIO_FLOAT64)
11308 	{
11309 		for (unsigned int i = 0; i < samples; i++)
11310 		{
11311 			// Swap 1st and 8th bytes
11312 			val = *(ptr);
11313 			*(ptr) = *(ptr + 7);
11314 			*(ptr + 7) = val;
11315 
11316 			// Swap 2nd and 7th bytes
11317 			ptr += 1;
11318 			val = *(ptr);
11319 			*(ptr) = *(ptr + 5);
11320 			*(ptr + 5) = val;
11321 
11322 			// Swap 3rd and 6th bytes
11323 			ptr += 1;
11324 			val = *(ptr);
11325 			*(ptr) = *(ptr + 3);
11326 			*(ptr + 3) = val;
11327 
11328 			// Swap 4th and 5th bytes
11329 			ptr += 1;
11330 			val = *(ptr);
11331 			*(ptr) = *(ptr + 1);
11332 			*(ptr + 1) = val;
11333 
11334 			// Increment 5 more bytes.
11335 			ptr += 5;
11336 		}
11337 	}
11338 }
11339 
11340 // Indentation settings for Vim and Emacs
11341 //
11342 // Local Variables:
11343 // c-basic-offset: 2
11344 // indent-tabs-mode: nil
11345 // End:
11346 //
11347 // vim: et sts=2 sw=2
11348