comparison DPF-Prymula-audioplugins/dpf/distrho/src/jackbridge/rtaudio/RtAudio.cpp @ 3:84e66ea83026

DPF-Prymula-audioplugins-0.231015-2
author prymula <prymula76@outlook.com>
date Mon, 16 Oct 2023 21:53:34 +0200
parents
children
comparison
equal deleted inserted replaced
2:cf2cb71d31dd 3:84e66ea83026
1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
15
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
23
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
26
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
31
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 */
40 /************************************************************************/
41
42 // RtAudio: Version 5.1.0
43
44 #include "RtAudio.h"
45 #include <iostream>
46 #include <cstdlib>
47 #include <cstring>
48 #include <climits>
49 #include <cmath>
50 #include <algorithm>
51
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
57 };
58
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
64
65 #include "tchar.h"
66
67 template<typename T> inline
68 std::string convertCharPointerToStdString(const T *text);
69
70 template<> inline
71 std::string convertCharPointerToStdString(const char *text)
72 {
73 return std::string(text);
74 }
75
76 template<> inline
77 std::string convertCharPointerToStdString(const wchar_t *text)
78 {
79 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
80 std::string s( length-1, '\0' );
81 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
82 return s;
83 }
84
85 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
86 // pthread API
87 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
88 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
89 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
90 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
91 #else
92 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
93 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
94 #endif
95
96 // *************************************************** //
97 //
98 // RtAudio definitions.
99 //
100 // *************************************************** //
101
102 std::string RtAudio :: getVersion( void )
103 {
104 return RTAUDIO_VERSION;
105 }
106
107 // Define API names and display names.
108 // Must be in same order as API enum.
109 extern "C" {
110 const char* rtaudio_api_names[][2] = {
111 { "unspecified" , "Unknown" },
112 { "alsa" , "ALSA" },
113 { "pulse" , "Pulse" },
114 { "oss" , "OpenSoundSystem" },
115 { "jack" , "Jack" },
116 { "core" , "CoreAudio" },
117 { "wasapi" , "WASAPI" },
118 { "asio" , "ASIO" },
119 { "ds" , "DirectSound" },
120 { "dummy" , "Dummy" },
121 };
122 const unsigned int rtaudio_num_api_names =
123 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
124
125 // The order here will control the order of RtAudio's API search in
126 // the constructor.
127 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
128 #if defined(__UNIX_JACK__)
129 RtAudio::UNIX_JACK,
130 #endif
131 #if defined(__LINUX_PULSE__)
132 RtAudio::LINUX_PULSE,
133 #endif
134 #if defined(__LINUX_ALSA__)
135 RtAudio::LINUX_ALSA,
136 #endif
137 #if defined(__LINUX_OSS__)
138 RtAudio::LINUX_OSS,
139 #endif
140 #if defined(__WINDOWS_ASIO__)
141 RtAudio::WINDOWS_ASIO,
142 #endif
143 #if defined(__WINDOWS_WASAPI__)
144 RtAudio::WINDOWS_WASAPI,
145 #endif
146 #if defined(__WINDOWS_DS__)
147 RtAudio::WINDOWS_DS,
148 #endif
149 #if defined(__MACOSX_CORE__)
150 RtAudio::MACOSX_CORE,
151 #endif
152 #if defined(__RTAUDIO_DUMMY__)
153 RtAudio::RTAUDIO_DUMMY,
154 #endif
155 RtAudio::UNSPECIFIED,
156 };
157 extern "C" const unsigned int rtaudio_num_compiled_apis =
158 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
159 }
160
161 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
162 // If the build breaks here, check that they match.
163 template<bool b> class StaticAssert { private: StaticAssert() {} };
164 template<> class StaticAssert<true>{ public: StaticAssert() {} };
165 class StaticAssertions { StaticAssertions() {
166 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
167 }};
168
169 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
170 {
171 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
172 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
173 }
174
175 std::string RtAudio :: getApiName( RtAudio::Api api )
176 {
177 if (api < 0 || api >= RtAudio::NUM_APIS)
178 return "";
179 return rtaudio_api_names[api][0];
180 }
181
182 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
183 {
184 if (api < 0 || api >= RtAudio::NUM_APIS)
185 return "Unknown";
186 return rtaudio_api_names[api][1];
187 }
188
189 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
190 {
191 unsigned int i=0;
192 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
193 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
194 return rtaudio_compiled_apis[i];
195 return RtAudio::UNSPECIFIED;
196 }
197
198 void RtAudio :: openRtApi( RtAudio::Api api )
199 {
200 if ( rtapi_ )
201 delete rtapi_;
202 rtapi_ = 0;
203
204 #if defined(__UNIX_JACK__)
205 if ( api == UNIX_JACK )
206 rtapi_ = new RtApiJack();
207 #endif
208 #if defined(__LINUX_ALSA__)
209 if ( api == LINUX_ALSA )
210 rtapi_ = new RtApiAlsa();
211 #endif
212 #if defined(__LINUX_PULSE__)
213 if ( api == LINUX_PULSE )
214 rtapi_ = new RtApiPulse();
215 #endif
216 #if defined(__LINUX_OSS__)
217 if ( api == LINUX_OSS )
218 rtapi_ = new RtApiOss();
219 #endif
220 #if defined(__WINDOWS_ASIO__)
221 if ( api == WINDOWS_ASIO )
222 rtapi_ = new RtApiAsio();
223 #endif
224 #if defined(__WINDOWS_WASAPI__)
225 if ( api == WINDOWS_WASAPI )
226 rtapi_ = new RtApiWasapi();
227 #endif
228 #if defined(__WINDOWS_DS__)
229 if ( api == WINDOWS_DS )
230 rtapi_ = new RtApiDs();
231 #endif
232 #if defined(__MACOSX_CORE__)
233 if ( api == MACOSX_CORE )
234 rtapi_ = new RtApiCore();
235 #endif
236 #if defined(__RTAUDIO_DUMMY__)
237 if ( api == RTAUDIO_DUMMY )
238 rtapi_ = new RtApiDummy();
239 #endif
240 }
241
242 RtAudio :: RtAudio( RtAudio::Api api )
243 {
244 rtapi_ = 0;
245
246 if ( api != UNSPECIFIED ) {
247 // Attempt to open the specified API.
248 openRtApi( api );
249 if ( rtapi_ ) return;
250
251 // No compiled support for specified API value. Issue a debug
252 // warning and continue as if no API was specified.
253 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
254 }
255
256 // Iterate through the compiled APIs and return as soon as we find
257 // one with at least one device or we reach the end of the list.
258 std::vector< RtAudio::Api > apis;
259 getCompiledApi( apis );
260 for ( unsigned int i=0; i<apis.size(); i++ ) {
261 openRtApi( apis[i] );
262 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
263 }
264
265 if ( rtapi_ ) return;
266
267 // It should not be possible to get here because the preprocessor
268 // definition __RTAUDIO_DUMMY__ is automatically defined if no
269 // API-specific definitions are passed to the compiler. But just in
270 // case something weird happens, we'll thow an error.
271 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
272 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
273 }
274
275 RtAudio :: ~RtAudio()
276 {
277 if ( rtapi_ )
278 delete rtapi_;
279 }
280
281 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
282 RtAudio::StreamParameters *inputParameters,
283 RtAudioFormat format, unsigned int sampleRate,
284 unsigned int *bufferFrames,
285 RtAudioCallback callback, void *userData,
286 RtAudio::StreamOptions *options,
287 RtAudioErrorCallback errorCallback )
288 {
289 return rtapi_->openStream( outputParameters, inputParameters, format,
290 sampleRate, bufferFrames, callback,
291 userData, options, errorCallback );
292 }
293
294 // *************************************************** //
295 //
296 // Public RtApi definitions (see end of file for
297 // private or protected utility functions).
298 //
299 // *************************************************** //
300
301 RtApi :: RtApi()
302 {
303 stream_.state = STREAM_CLOSED;
304 stream_.mode = UNINITIALIZED;
305 stream_.apiHandle = 0;
306 stream_.userBuffer[0] = 0;
307 stream_.userBuffer[1] = 0;
308 MUTEX_INITIALIZE( &stream_.mutex );
309 showWarnings_ = true;
310 firstErrorOccurred_ = false;
311 }
312
313 RtApi :: ~RtApi()
314 {
315 MUTEX_DESTROY( &stream_.mutex );
316 }
317
318 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
319 RtAudio::StreamParameters *iParams,
320 RtAudioFormat format, unsigned int sampleRate,
321 unsigned int *bufferFrames,
322 RtAudioCallback callback, void *userData,
323 RtAudio::StreamOptions *options,
324 RtAudioErrorCallback errorCallback )
325 {
326 if ( stream_.state != STREAM_CLOSED ) {
327 errorText_ = "RtApi::openStream: a stream is already open!";
328 error( RtAudioError::INVALID_USE );
329 return;
330 }
331
332 // Clear stream information potentially left from a previously open stream.
333 clearStreamInfo();
334
335 if ( oParams && oParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
338 return;
339 }
340
341 if ( iParams && iParams->nChannels < 1 ) {
342 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
343 error( RtAudioError::INVALID_USE );
344 return;
345 }
346
347 if ( oParams == NULL && iParams == NULL ) {
348 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
349 error( RtAudioError::INVALID_USE );
350 return;
351 }
352
353 if ( formatBytes(format) == 0 ) {
354 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
355 error( RtAudioError::INVALID_USE );
356 return;
357 }
358
359 unsigned int nDevices = getDeviceCount();
360 unsigned int oChannels = 0;
361 if ( oParams ) {
362 oChannels = oParams->nChannels;
363 if ( oParams->deviceId >= nDevices ) {
364 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
365 error( RtAudioError::INVALID_USE );
366 return;
367 }
368 }
369
370 unsigned int iChannels = 0;
371 if ( iParams ) {
372 iChannels = iParams->nChannels;
373 if ( iParams->deviceId >= nDevices ) {
374 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
375 error( RtAudioError::INVALID_USE );
376 return;
377 }
378 }
379
380 bool result;
381
382 if ( oChannels > 0 ) {
383
384 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 error( RtAudioError::SYSTEM_ERROR );
388 return;
389 }
390 }
391
392 if ( iChannels > 0 ) {
393
394 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
395 sampleRate, format, bufferFrames, options );
396 if ( result == false ) {
397 if ( oChannels > 0 ) closeStream();
398 error( RtAudioError::SYSTEM_ERROR );
399 return;
400 }
401 }
402
403 stream_.callbackInfo.callback = (void *) callback;
404 stream_.callbackInfo.userData = userData;
405 stream_.callbackInfo.errorCallback = (void *) errorCallback;
406
407 if ( options ) options->numberOfBuffers = stream_.nBuffers;
408 stream_.state = STREAM_STOPPED;
409 }
410
411 unsigned int RtApi :: getDefaultInputDevice( void )
412 {
413 // Should be implemented in subclasses if possible.
414 return 0;
415 }
416
417 unsigned int RtApi :: getDefaultOutputDevice( void )
418 {
419 // Should be implemented in subclasses if possible.
420 return 0;
421 }
422
423 void RtApi :: closeStream( void )
424 {
425 // MUST be implemented in subclasses!
426 return;
427 }
428
429 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
430 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
431 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
432 RtAudio::StreamOptions * /*options*/ )
433 {
434 // MUST be implemented in subclasses!
435 return FAILURE;
436 }
437
438 void RtApi :: tickStreamTime( void )
439 {
440 // Subclasses that do not provide their own implementation of
441 // getStreamTime should call this function once per buffer I/O to
442 // provide basic stream time support.
443
444 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
445
446 #if defined( HAVE_GETTIMEOFDAY )
447 gettimeofday( &stream_.lastTickTimestamp, NULL );
448 #endif
449 }
450
451 long RtApi :: getStreamLatency( void )
452 {
453 verifyStream();
454
455 long totalLatency = 0;
456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
457 totalLatency = stream_.latency[0];
458 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
459 totalLatency += stream_.latency[1];
460
461 return totalLatency;
462 }
463
464 double RtApi :: getStreamTime( void )
465 {
466 verifyStream();
467
468 #if defined( HAVE_GETTIMEOFDAY )
469 // Return a very accurate estimate of the stream time by
470 // adding in the elapsed time since the last tick.
471 struct timeval then;
472 struct timeval now;
473
474 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
475 return stream_.streamTime;
476
477 gettimeofday( &now, NULL );
478 then = stream_.lastTickTimestamp;
479 return stream_.streamTime +
480 ((now.tv_sec + 0.000001 * now.tv_usec) -
481 (then.tv_sec + 0.000001 * then.tv_usec));
482 #else
483 return stream_.streamTime;
484 #endif
485 }
486
487 void RtApi :: setStreamTime( double time )
488 {
489 verifyStream();
490
491 if ( time >= 0.0 )
492 stream_.streamTime = time;
493 #if defined( HAVE_GETTIMEOFDAY )
494 gettimeofday( &stream_.lastTickTimestamp, NULL );
495 #endif
496 }
497
498 unsigned int RtApi :: getStreamSampleRate( void )
499 {
500 verifyStream();
501
502 return stream_.sampleRate;
503 }
504
505
506 // *************************************************** //
507 //
508 // OS/API-specific methods.
509 //
510 // *************************************************** //
511
512 #if defined(__MACOSX_CORE__)
513
514 #include <unistd.h>
515
516 // The OS X CoreAudio API is designed to use a separate callback
517 // procedure for each of its audio devices. A single RtAudio duplex
518 // stream using two different devices is supported here, though it
519 // cannot be guaranteed to always behave correctly because we cannot
520 // synchronize these two callbacks.
521 //
522 // A property listener is installed for over/underrun information.
523 // However, no functionality is currently provided to allow property
524 // listeners to trigger user handlers because it is unclear what could
525 // be done if a critical stream parameter (buffer size, sample rate,
526 // device disconnect) notification arrived. The listeners entail
527 // quite a bit of extra code and most likely, a user program wouldn't
528 // be prepared for the result anyway. However, we do provide a flag
529 // to the client callback function to inform of an over/underrun.
530
531 // A structure to hold various information related to the CoreAudio API
532 // implementation.
533 struct CoreHandle {
534 AudioDeviceID id[2]; // device ids
535 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
536 AudioDeviceIOProcID procId[2];
537 #endif
538 UInt32 iStream[2]; // device stream index (or first if using multiple)
539 UInt32 nStreams[2]; // number of streams to use
540 bool xrun[2];
541 char *deviceBuffer;
542 pthread_cond_t condition;
543 int drainCounter; // Tracks callback counts when draining
544 bool internalDrain; // Indicates if stop is initiated from callback or not.
545
546 CoreHandle()
547 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
548 };
549
550 RtApiCore:: RtApiCore()
551 {
552 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
553 // This is a largely undocumented but absolutely necessary
554 // requirement starting with OS-X 10.6. If not called, queries and
555 // updates to various audio device properties are not handled
556 // correctly.
557 CFRunLoopRef theRunLoop = NULL;
558 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
559 kAudioObjectPropertyScopeGlobal,
560 kAudioObjectPropertyElementMaster };
561 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
562 if ( result != noErr ) {
563 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
564 error( RtAudioError::WARNING );
565 }
566 #endif
567 }
568
569 RtApiCore :: ~RtApiCore()
570 {
571 // The subclass destructor gets called before the base class
572 // destructor, so close an existing stream before deallocating
573 // apiDeviceId memory.
574 if ( stream_.state != STREAM_CLOSED ) closeStream();
575 }
576
577 unsigned int RtApiCore :: getDeviceCount( void )
578 {
579 // Find out how many audio devices there are, if any.
580 UInt32 dataSize;
581 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
582 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
583 if ( result != noErr ) {
584 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
585 error( RtAudioError::WARNING );
586 return 0;
587 }
588
589 return dataSize / sizeof( AudioDeviceID );
590 }
591
592 unsigned int RtApiCore :: getDefaultInputDevice( void )
593 {
594 unsigned int nDevices = getDeviceCount();
595 if ( nDevices <= 1 ) return 0;
596
597 AudioDeviceID id;
598 UInt32 dataSize = sizeof( AudioDeviceID );
599 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
600 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
601 if ( result != noErr ) {
602 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
603 error( RtAudioError::WARNING );
604 return 0;
605 }
606
607 dataSize *= nDevices;
608 AudioDeviceID deviceList[ nDevices ];
609 property.mSelector = kAudioHardwarePropertyDevices;
610 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
611 if ( result != noErr ) {
612 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
613 error( RtAudioError::WARNING );
614 return 0;
615 }
616
617 for ( unsigned int i=0; i<nDevices; i++ )
618 if ( id == deviceList[i] ) return i;
619
620 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
621 error( RtAudioError::WARNING );
622 return 0;
623 }
624
625 unsigned int RtApiCore :: getDefaultOutputDevice( void )
626 {
627 unsigned int nDevices = getDeviceCount();
628 if ( nDevices <= 1 ) return 0;
629
630 AudioDeviceID id;
631 UInt32 dataSize = sizeof( AudioDeviceID );
632 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
633 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
634 if ( result != noErr ) {
635 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
636 error( RtAudioError::WARNING );
637 return 0;
638 }
639
640 dataSize = sizeof( AudioDeviceID ) * nDevices;
641 AudioDeviceID deviceList[ nDevices ];
642 property.mSelector = kAudioHardwarePropertyDevices;
643 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
644 if ( result != noErr ) {
645 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
646 error( RtAudioError::WARNING );
647 return 0;
648 }
649
650 for ( unsigned int i=0; i<nDevices; i++ )
651 if ( id == deviceList[i] ) return i;
652
653 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
654 error( RtAudioError::WARNING );
655 return 0;
656 }
657
658 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
659 {
660 RtAudio::DeviceInfo info;
661 info.probed = false;
662
663 // Get device ID
664 unsigned int nDevices = getDeviceCount();
665 if ( nDevices == 0 ) {
666 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
667 error( RtAudioError::INVALID_USE );
668 return info;
669 }
670
671 if ( device >= nDevices ) {
672 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
673 error( RtAudioError::INVALID_USE );
674 return info;
675 }
676
677 AudioDeviceID deviceList[ nDevices ];
678 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
679 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
680 kAudioObjectPropertyScopeGlobal,
681 kAudioObjectPropertyElementMaster };
682 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
683 0, NULL, &dataSize, (void *) &deviceList );
684 if ( result != noErr ) {
685 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
686 error( RtAudioError::WARNING );
687 return info;
688 }
689
690 AudioDeviceID id = deviceList[ device ];
691
692 // Get the device name.
693 info.name.erase();
694 CFStringRef cfname;
695 dataSize = sizeof( CFStringRef );
696 property.mSelector = kAudioObjectPropertyManufacturer;
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
698 if ( result != noErr ) {
699 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
700 errorText_ = errorStream_.str();
701 error( RtAudioError::WARNING );
702 return info;
703 }
704
705 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
706 int length = CFStringGetLength(cfname);
707 char *mname = (char *)malloc(length * 3 + 1);
708 #if defined( UNICODE ) || defined( _UNICODE )
709 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
710 #else
711 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
712 #endif
713 info.name.append( (const char *)mname, strlen(mname) );
714 info.name.append( ": " );
715 CFRelease( cfname );
716 free(mname);
717
718 property.mSelector = kAudioObjectPropertyName;
719 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
720 if ( result != noErr ) {
721 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
722 errorText_ = errorStream_.str();
723 error( RtAudioError::WARNING );
724 return info;
725 }
726
727 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
728 length = CFStringGetLength(cfname);
729 char *name = (char *)malloc(length * 3 + 1);
730 #if defined( UNICODE ) || defined( _UNICODE )
731 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
732 #else
733 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
734 #endif
735 info.name.append( (const char *)name, strlen(name) );
736 CFRelease( cfname );
737 free(name);
738
739 // Get the output stream "configuration".
740 AudioBufferList *bufferList = nil;
741 property.mSelector = kAudioDevicePropertyStreamConfiguration;
742 property.mScope = kAudioDevicePropertyScopeOutput;
743 // property.mElement = kAudioObjectPropertyElementWildcard;
744 dataSize = 0;
745 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
746 if ( result != noErr || dataSize == 0 ) {
747 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
748 errorText_ = errorStream_.str();
749 error( RtAudioError::WARNING );
750 return info;
751 }
752
753 // Allocate the AudioBufferList.
754 bufferList = (AudioBufferList *) malloc( dataSize );
755 if ( bufferList == NULL ) {
756 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
757 error( RtAudioError::WARNING );
758 return info;
759 }
760
761 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
762 if ( result != noErr || dataSize == 0 ) {
763 free( bufferList );
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
765 errorText_ = errorStream_.str();
766 error( RtAudioError::WARNING );
767 return info;
768 }
769
770 // Get output channel information.
771 unsigned int i, nStreams = bufferList->mNumberBuffers;
772 for ( i=0; i<nStreams; i++ )
773 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
774 free( bufferList );
775
776 // Get the input stream "configuration".
777 property.mScope = kAudioDevicePropertyScopeInput;
778 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
779 if ( result != noErr || dataSize == 0 ) {
780 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
781 errorText_ = errorStream_.str();
782 error( RtAudioError::WARNING );
783 return info;
784 }
785
786 // Allocate the AudioBufferList.
787 bufferList = (AudioBufferList *) malloc( dataSize );
788 if ( bufferList == NULL ) {
789 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
790 error( RtAudioError::WARNING );
791 return info;
792 }
793
794 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
795 if (result != noErr || dataSize == 0) {
796 free( bufferList );
797 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
798 errorText_ = errorStream_.str();
799 error( RtAudioError::WARNING );
800 return info;
801 }
802
803 // Get input channel information.
804 nStreams = bufferList->mNumberBuffers;
805 for ( i=0; i<nStreams; i++ )
806 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
807 free( bufferList );
808
809 // If device opens for both playback and capture, we determine the channels.
810 if ( info.outputChannels > 0 && info.inputChannels > 0 )
811 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
812
813 // Probe the device sample rates.
814 bool isInput = false;
815 if ( info.outputChannels == 0 ) isInput = true;
816
817 // Determine the supported sample rates.
818 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
819 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
820 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
821 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
822 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
823 errorText_ = errorStream_.str();
824 error( RtAudioError::WARNING );
825 return info;
826 }
827
828 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
829 AudioValueRange rangeList[ nRanges ];
830 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
831 if ( result != kAudioHardwareNoError ) {
832 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
833 errorText_ = errorStream_.str();
834 error( RtAudioError::WARNING );
835 return info;
836 }
837
838 // The sample rate reporting mechanism is a bit of a mystery. It
839 // seems that it can either return individual rates or a range of
840 // rates. I assume that if the min / max range values are the same,
841 // then that represents a single supported rate and if the min / max
842 // range values are different, the device supports an arbitrary
843 // range of values (though there might be multiple ranges, so we'll
844 // use the most conservative range).
845 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
846 bool haveValueRange = false;
847 info.sampleRates.clear();
848 for ( UInt32 i=0; i<nRanges; i++ ) {
849 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
850 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
851 info.sampleRates.push_back( tmpSr );
852
853 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
854 info.preferredSampleRate = tmpSr;
855
856 } else {
857 haveValueRange = true;
858 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
859 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
860 }
861 }
862
863 if ( haveValueRange ) {
864 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
865 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
866 info.sampleRates.push_back( SAMPLE_RATES[k] );
867
868 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
869 info.preferredSampleRate = SAMPLE_RATES[k];
870 }
871 }
872 }
873
874 // Sort and remove any redundant values
875 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
876 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
877
878 if ( info.sampleRates.size() == 0 ) {
879 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
880 errorText_ = errorStream_.str();
881 error( RtAudioError::WARNING );
882 return info;
883 }
884
885 // CoreAudio always uses 32-bit floating point data for PCM streams.
886 // Thus, any other "physical" formats supported by the device are of
887 // no interest to the client.
888 info.nativeFormats = RTAUDIO_FLOAT32;
889
890 if ( info.outputChannels > 0 )
891 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
892 if ( info.inputChannels > 0 )
893 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
894
895 info.probed = true;
896 return info;
897 }
898
899 static OSStatus callbackHandler( AudioDeviceID inDevice,
900 const AudioTimeStamp* /*inNow*/,
901 const AudioBufferList* inInputData,
902 const AudioTimeStamp* /*inInputTime*/,
903 AudioBufferList* outOutputData,
904 const AudioTimeStamp* /*inOutputTime*/,
905 void* infoPointer )
906 {
907 CallbackInfo *info = (CallbackInfo *) infoPointer;
908
909 RtApiCore *object = (RtApiCore *) info->object;
910 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
911 return kAudioHardwareUnspecifiedError;
912 else
913 return kAudioHardwareNoError;
914 }
915
916 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
917 UInt32 nAddresses,
918 const AudioObjectPropertyAddress properties[],
919 void* handlePointer )
920 {
921 CoreHandle *handle = (CoreHandle *) handlePointer;
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
924 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
925 handle->xrun[1] = true;
926 else
927 handle->xrun[0] = true;
928 }
929 }
930
931 return kAudioHardwareNoError;
932 }
933
934 static OSStatus rateListener( AudioObjectID inDevice,
935 UInt32 /*nAddresses*/,
936 const AudioObjectPropertyAddress /*properties*/[],
937 void* ratePointer )
938 {
939 Float64 *rate = (Float64 *) ratePointer;
940 UInt32 dataSize = sizeof( Float64 );
941 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
942 kAudioObjectPropertyScopeGlobal,
943 kAudioObjectPropertyElementMaster };
944 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
945 return kAudioHardwareNoError;
946 }
947
948 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
949 unsigned int firstChannel, unsigned int sampleRate,
950 RtAudioFormat format, unsigned int *bufferSize,
951 RtAudio::StreamOptions *options )
952 {
953 // Get device ID
954 unsigned int nDevices = getDeviceCount();
955 if ( nDevices == 0 ) {
956 // This should not happen because a check is made before this function is called.
957 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
958 return FAILURE;
959 }
960
961 if ( device >= nDevices ) {
962 // This should not happen because a check is made before this function is called.
963 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
964 return FAILURE;
965 }
966
967 AudioDeviceID deviceList[ nDevices ];
968 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
969 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
970 kAudioObjectPropertyScopeGlobal,
971 kAudioObjectPropertyElementMaster };
972 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
973 0, NULL, &dataSize, (void *) &deviceList );
974 if ( result != noErr ) {
975 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
976 return FAILURE;
977 }
978
979 AudioDeviceID id = deviceList[ device ];
980
981 // Setup for stream mode.
982 bool isInput = false;
983 if ( mode == INPUT ) {
984 isInput = true;
985 property.mScope = kAudioDevicePropertyScopeInput;
986 }
987 else
988 property.mScope = kAudioDevicePropertyScopeOutput;
989
990 // Get the stream "configuration".
991 AudioBufferList *bufferList = nil;
992 dataSize = 0;
993 property.mSelector = kAudioDevicePropertyStreamConfiguration;
994 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
995 if ( result != noErr || dataSize == 0 ) {
996 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
997 errorText_ = errorStream_.str();
998 return FAILURE;
999 }
1000
1001 // Allocate the AudioBufferList.
1002 bufferList = (AudioBufferList *) malloc( dataSize );
1003 if ( bufferList == NULL ) {
1004 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1005 return FAILURE;
1006 }
1007
1008 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1009 if (result != noErr || dataSize == 0) {
1010 free( bufferList );
1011 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1012 errorText_ = errorStream_.str();
1013 return FAILURE;
1014 }
1015
1016 // Search for one or more streams that contain the desired number of
1017 // channels. CoreAudio devices can have an arbitrary number of
1018 // streams and each stream can have an arbitrary number of channels.
1019 // For each stream, a single buffer of interleaved samples is
1020 // provided. RtAudio prefers the use of one stream of interleaved
1021 // data or multiple consecutive single-channel streams. However, we
1022 // now support multiple consecutive multi-channel streams of
1023 // interleaved data as well.
1024 UInt32 iStream, offsetCounter = firstChannel;
1025 UInt32 nStreams = bufferList->mNumberBuffers;
1026 bool monoMode = false;
1027 bool foundStream = false;
1028
1029 // First check that the device supports the requested number of
1030 // channels.
1031 UInt32 deviceChannels = 0;
1032 for ( iStream=0; iStream<nStreams; iStream++ )
1033 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1034
1035 if ( deviceChannels < ( channels + firstChannel ) ) {
1036 free( bufferList );
1037 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1038 errorText_ = errorStream_.str();
1039 return FAILURE;
1040 }
1041
1042 // Look for a single stream meeting our needs.
1043 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1044 for ( iStream=0; iStream<nStreams; iStream++ ) {
1045 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1046 if ( streamChannels >= channels + offsetCounter ) {
1047 firstStream = iStream;
1048 channelOffset = offsetCounter;
1049 foundStream = true;
1050 break;
1051 }
1052 if ( streamChannels > offsetCounter ) break;
1053 offsetCounter -= streamChannels;
1054 }
1055
1056 // If we didn't find a single stream above, then we should be able
1057 // to meet the channel specification with multiple streams.
1058 if ( foundStream == false ) {
1059 monoMode = true;
1060 offsetCounter = firstChannel;
1061 for ( iStream=0; iStream<nStreams; iStream++ ) {
1062 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1063 if ( streamChannels > offsetCounter ) break;
1064 offsetCounter -= streamChannels;
1065 }
1066
1067 firstStream = iStream;
1068 channelOffset = offsetCounter;
1069 Int32 channelCounter = channels + offsetCounter - streamChannels;
1070
1071 if ( streamChannels > 1 ) monoMode = false;
1072 while ( channelCounter > 0 ) {
1073 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1074 if ( streamChannels > 1 ) monoMode = false;
1075 channelCounter -= streamChannels;
1076 streamCount++;
1077 }
1078 }
1079
1080 free( bufferList );
1081
1082 // Determine the buffer size.
1083 AudioValueRange bufferRange;
1084 dataSize = sizeof( AudioValueRange );
1085 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1087
1088 if ( result != noErr ) {
1089 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1090 errorText_ = errorStream_.str();
1091 return FAILURE;
1092 }
1093
1094 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1095 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1096 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1097
1098 // Set the buffer size. For multiple streams, I'm assuming we only
1099 // need to make this setting for the master channel.
1100 UInt32 theSize = (UInt32) *bufferSize;
1101 dataSize = sizeof( UInt32 );
1102 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1104
1105 if ( result != noErr ) {
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1107 errorText_ = errorStream_.str();
1108 return FAILURE;
1109 }
1110
1111 // If attempting to setup a duplex stream, the bufferSize parameter
1112 // MUST be the same in both directions!
1113 *bufferSize = theSize;
1114 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1115 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1116 errorText_ = errorStream_.str();
1117 return FAILURE;
1118 }
1119
1120 stream_.bufferSize = *bufferSize;
1121 stream_.nBuffers = 1;
1122
1123 // Try to set "hog" mode ... it's not clear to me this is working.
1124 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1125 pid_t hog_pid;
1126 dataSize = sizeof( hog_pid );
1127 property.mSelector = kAudioDevicePropertyHogMode;
1128 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1129 if ( result != noErr ) {
1130 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1131 errorText_ = errorStream_.str();
1132 return FAILURE;
1133 }
1134
1135 if ( hog_pid != getpid() ) {
1136 hog_pid = getpid();
1137 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1138 if ( result != noErr ) {
1139 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1140 errorText_ = errorStream_.str();
1141 return FAILURE;
1142 }
1143 }
1144 }
1145
1146 // Check and if necessary, change the sample rate for the device.
1147 Float64 nominalRate;
1148 dataSize = sizeof( Float64 );
1149 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1150 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1151 if ( result != noErr ) {
1152 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1153 errorText_ = errorStream_.str();
1154 return FAILURE;
1155 }
1156
1157 // Only change the sample rate if off by more than 1 Hz.
1158 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1159
1160 // Set a property listener for the sample rate change
1161 Float64 reportedRate = 0.0;
1162 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1163 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1164 if ( result != noErr ) {
1165 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1166 errorText_ = errorStream_.str();
1167 return FAILURE;
1168 }
1169
1170 nominalRate = (Float64) sampleRate;
1171 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1172 if ( result != noErr ) {
1173 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1174 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1175 errorText_ = errorStream_.str();
1176 return FAILURE;
1177 }
1178
1179 // Now wait until the reported nominal rate is what we just set.
1180 UInt32 microCounter = 0;
1181 while ( reportedRate != nominalRate ) {
1182 microCounter += 5000;
1183 if ( microCounter > 5000000 ) break;
1184 usleep( 5000 );
1185 }
1186
1187 // Remove the property listener.
1188 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1189
1190 if ( microCounter > 5000000 ) {
1191 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1192 errorText_ = errorStream_.str();
1193 return FAILURE;
1194 }
1195 }
1196
1197 // Now set the stream format for all streams. Also, check the
1198 // physical format of the device and change that if necessary.
1199 AudioStreamBasicDescription description;
1200 dataSize = sizeof( AudioStreamBasicDescription );
1201 property.mSelector = kAudioStreamPropertyVirtualFormat;
1202 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1203 if ( result != noErr ) {
1204 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1205 errorText_ = errorStream_.str();
1206 return FAILURE;
1207 }
1208
1209 // Set the sample rate and data format id. However, only make the
1210 // change if the sample rate is not within 1.0 of the desired
1211 // rate and the format is not linear pcm.
1212 bool updateFormat = false;
1213 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1214 description.mSampleRate = (Float64) sampleRate;
1215 updateFormat = true;
1216 }
1217
1218 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1219 description.mFormatID = kAudioFormatLinearPCM;
1220 updateFormat = true;
1221 }
1222
1223 if ( updateFormat ) {
1224 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1225 if ( result != noErr ) {
1226 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1227 errorText_ = errorStream_.str();
1228 return FAILURE;
1229 }
1230 }
1231
1232 // Now check the physical format.
1233 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1235 if ( result != noErr ) {
1236 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1237 errorText_ = errorStream_.str();
1238 return FAILURE;
1239 }
1240
1241 //std::cout << "Current physical stream format:" << std::endl;
1242 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1243 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1244 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1245 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1246
1247 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1248 description.mFormatID = kAudioFormatLinearPCM;
1249 //description.mSampleRate = (Float64) sampleRate;
1250 AudioStreamBasicDescription testDescription = description;
1251 UInt32 formatFlags;
1252
1253 // We'll try higher bit rates first and then work our way down.
1254 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1255 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1260 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1262 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1263 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1264 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1265 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1267
1268 bool setPhysicalFormat = false;
1269 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1270 testDescription = description;
1271 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1272 testDescription.mFormatFlags = physicalFormats[i].second;
1273 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1274 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1275 else
1276 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1277 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1278 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1279 if ( result == noErr ) {
1280 setPhysicalFormat = true;
1281 //std::cout << "Updated physical stream format:" << std::endl;
1282 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1283 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1284 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1285 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1286 break;
1287 }
1288 }
1289
1290 if ( !setPhysicalFormat ) {
1291 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1292 errorText_ = errorStream_.str();
1293 return FAILURE;
1294 }
1295 } // done setting virtual/physical formats.
1296
1297 // Get the stream / device latency.
1298 UInt32 latency;
1299 dataSize = sizeof( UInt32 );
1300 property.mSelector = kAudioDevicePropertyLatency;
1301 if ( AudioObjectHasProperty( id, &property ) == true ) {
1302 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1303 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1304 else {
1305 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1306 errorText_ = errorStream_.str();
1307 error( RtAudioError::WARNING );
1308 }
1309 }
1310
1311 // Byte-swapping: According to AudioHardware.h, the stream data will
1312 // always be presented in native-endian format, so we should never
1313 // need to byte swap.
1314 stream_.doByteSwap[mode] = false;
1315
1316 // From the CoreAudio documentation, PCM data must be supplied as
1317 // 32-bit floats.
1318 stream_.userFormat = format;
1319 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1320
1321 if ( streamCount == 1 )
1322 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1323 else // multiple streams
1324 stream_.nDeviceChannels[mode] = channels;
1325 stream_.nUserChannels[mode] = channels;
1326 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1327 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1328 else stream_.userInterleaved = true;
1329 stream_.deviceInterleaved[mode] = true;
1330 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1331
1332 // Set flags for buffer conversion.
1333 stream_.doConvertBuffer[mode] = false;
1334 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1335 stream_.doConvertBuffer[mode] = true;
1336 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1337 stream_.doConvertBuffer[mode] = true;
1338 if ( streamCount == 1 ) {
1339 if ( stream_.nUserChannels[mode] > 1 &&
1340 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1341 stream_.doConvertBuffer[mode] = true;
1342 }
1343 else if ( monoMode && stream_.userInterleaved )
1344 stream_.doConvertBuffer[mode] = true;
1345
1346 // Allocate our CoreHandle structure for the stream.
1347 CoreHandle *handle = 0;
1348 if ( stream_.apiHandle == 0 ) {
1349 try {
1350 handle = new CoreHandle;
1351 }
1352 catch ( std::bad_alloc& ) {
1353 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1354 goto error;
1355 }
1356
1357 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1358 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1359 goto error;
1360 }
1361 stream_.apiHandle = (void *) handle;
1362 }
1363 else
1364 handle = (CoreHandle *) stream_.apiHandle;
1365 handle->iStream[mode] = firstStream;
1366 handle->nStreams[mode] = streamCount;
1367 handle->id[mode] = id;
1368
1369 // Allocate necessary internal buffers.
1370 unsigned long bufferBytes;
1371 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1372 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1373 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1374 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1375 if ( stream_.userBuffer[mode] == NULL ) {
1376 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1377 goto error;
1378 }
1379
1380 // If possible, we will make use of the CoreAudio stream buffers as
1381 // "device buffers". However, we can't do this if using multiple
1382 // streams.
1383 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1384
1385 bool makeBuffer = true;
1386 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1387 if ( mode == INPUT ) {
1388 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1389 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1390 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1391 }
1392 }
1393
1394 if ( makeBuffer ) {
1395 bufferBytes *= *bufferSize;
1396 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1397 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1398 if ( stream_.deviceBuffer == NULL ) {
1399 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1400 goto error;
1401 }
1402 }
1403 }
1404
1405 stream_.sampleRate = sampleRate;
1406 stream_.device[mode] = device;
1407 stream_.state = STREAM_STOPPED;
1408 stream_.callbackInfo.object = (void *) this;
1409
1410 // Setup the buffer conversion information structure.
1411 if ( stream_.doConvertBuffer[mode] ) {
1412 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1413 else setConvertInfo( mode, channelOffset );
1414 }
1415
1416 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1417 // Only one callback procedure per device.
1418 stream_.mode = DUPLEX;
1419 else {
1420 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1421 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1422 #else
1423 // deprecated in favor of AudioDeviceCreateIOProcID()
1424 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1425 #endif
1426 if ( result != noErr ) {
1427 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1428 errorText_ = errorStream_.str();
1429 goto error;
1430 }
1431 if ( stream_.mode == OUTPUT && mode == INPUT )
1432 stream_.mode = DUPLEX;
1433 else
1434 stream_.mode = mode;
1435 }
1436
1437 // Setup the device property listener for over/underload.
1438 property.mSelector = kAudioDeviceProcessorOverload;
1439 property.mScope = kAudioObjectPropertyScopeGlobal;
1440 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1441
1442 return SUCCESS;
1443
1444 error:
1445 if ( handle ) {
1446 pthread_cond_destroy( &handle->condition );
1447 delete handle;
1448 stream_.apiHandle = 0;
1449 }
1450
1451 for ( int i=0; i<2; i++ ) {
1452 if ( stream_.userBuffer[i] ) {
1453 free( stream_.userBuffer[i] );
1454 stream_.userBuffer[i] = 0;
1455 }
1456 }
1457
1458 if ( stream_.deviceBuffer ) {
1459 free( stream_.deviceBuffer );
1460 stream_.deviceBuffer = 0;
1461 }
1462
1463 stream_.state = STREAM_CLOSED;
1464 return FAILURE;
1465 }
1466
1467 void RtApiCore :: closeStream( void )
1468 {
1469 if ( stream_.state == STREAM_CLOSED ) {
1470 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1471 error( RtAudioError::WARNING );
1472 return;
1473 }
1474
1475 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1477 if (handle) {
1478 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1479 kAudioObjectPropertyScopeGlobal,
1480 kAudioObjectPropertyElementMaster };
1481
1482 property.mSelector = kAudioDeviceProcessorOverload;
1483 property.mScope = kAudioObjectPropertyScopeGlobal;
1484 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1485 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1486 error( RtAudioError::WARNING );
1487 }
1488
1489 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1490 if ( stream_.state == STREAM_RUNNING )
1491 AudioDeviceStop( handle->id[0], handle->procId[0] );
1492 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1493 #else // deprecated behaviour
1494 if ( stream_.state == STREAM_RUNNING )
1495 AudioDeviceStop( handle->id[0], callbackHandler );
1496 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1497 #endif
1498 }
1499 }
1500
1501 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502 if (handle) {
1503 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1504 kAudioObjectPropertyScopeGlobal,
1505 kAudioObjectPropertyElementMaster };
1506
1507 property.mSelector = kAudioDeviceProcessorOverload;
1508 property.mScope = kAudioObjectPropertyScopeGlobal;
1509 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1510 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1511 error( RtAudioError::WARNING );
1512 }
1513
1514 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1515 if ( stream_.state == STREAM_RUNNING )
1516 AudioDeviceStop( handle->id[1], handle->procId[1] );
1517 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1518 #else // deprecated behaviour
1519 if ( stream_.state == STREAM_RUNNING )
1520 AudioDeviceStop( handle->id[1], callbackHandler );
1521 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1522 #endif
1523 }
1524 }
1525
1526 for ( int i=0; i<2; i++ ) {
1527 if ( stream_.userBuffer[i] ) {
1528 free( stream_.userBuffer[i] );
1529 stream_.userBuffer[i] = 0;
1530 }
1531 }
1532
1533 if ( stream_.deviceBuffer ) {
1534 free( stream_.deviceBuffer );
1535 stream_.deviceBuffer = 0;
1536 }
1537
1538 // Destroy pthread condition variable.
1539 pthread_cond_destroy( &handle->condition );
1540 delete handle;
1541 stream_.apiHandle = 0;
1542
1543 stream_.mode = UNINITIALIZED;
1544 stream_.state = STREAM_CLOSED;
1545 }
1546
1547 void RtApiCore :: startStream( void )
1548 {
1549 verifyStream();
1550 if ( stream_.state == STREAM_RUNNING ) {
1551 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1552 error( RtAudioError::WARNING );
1553 return;
1554 }
1555
1556 #if defined( HAVE_GETTIMEOFDAY )
1557 gettimeofday( &stream_.lastTickTimestamp, NULL );
1558 #endif
1559
1560 OSStatus result = noErr;
1561 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1562 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1563
1564 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1565 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1566 #else // deprecated behaviour
1567 result = AudioDeviceStart( handle->id[0], callbackHandler );
1568 #endif
1569 if ( result != noErr ) {
1570 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1571 errorText_ = errorStream_.str();
1572 goto unlock;
1573 }
1574 }
1575
1576 if ( stream_.mode == INPUT ||
1577 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1578
1579 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1580 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1581 #else // deprecated behaviour
1582 result = AudioDeviceStart( handle->id[1], callbackHandler );
1583 #endif
1584 if ( result != noErr ) {
1585 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1586 errorText_ = errorStream_.str();
1587 goto unlock;
1588 }
1589 }
1590
1591 handle->drainCounter = 0;
1592 handle->internalDrain = false;
1593 stream_.state = STREAM_RUNNING;
1594
1595 unlock:
1596 if ( result == noErr ) return;
1597 error( RtAudioError::SYSTEM_ERROR );
1598 }
1599
1600 void RtApiCore :: stopStream( void )
1601 {
1602 verifyStream();
1603 if ( stream_.state == STREAM_STOPPED ) {
1604 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1605 error( RtAudioError::WARNING );
1606 return;
1607 }
1608
1609 OSStatus result = noErr;
1610 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1612
1613 if ( handle->drainCounter == 0 ) {
1614 handle->drainCounter = 2;
1615 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1616 }
1617
1618 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1619 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1620 #else // deprecated behaviour
1621 result = AudioDeviceStop( handle->id[0], callbackHandler );
1622 #endif
1623 if ( result != noErr ) {
1624 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1625 errorText_ = errorStream_.str();
1626 goto unlock;
1627 }
1628 }
1629
1630 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1631
1632 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1633 result = AudioDeviceStop( handle->id[1], handle->procId[1] );
1634 #else // deprecated behaviour
1635 result = AudioDeviceStop( handle->id[1], callbackHandler );
1636 #endif
1637 if ( result != noErr ) {
1638 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1639 errorText_ = errorStream_.str();
1640 goto unlock;
1641 }
1642 }
1643
1644 stream_.state = STREAM_STOPPED;
1645
1646 unlock:
1647 if ( result == noErr ) return;
1648 error( RtAudioError::SYSTEM_ERROR );
1649 }
1650
1651 void RtApiCore :: abortStream( void )
1652 {
1653 verifyStream();
1654 if ( stream_.state == STREAM_STOPPED ) {
1655 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1656 error( RtAudioError::WARNING );
1657 return;
1658 }
1659
1660 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1661 handle->drainCounter = 2;
1662
1663 stopStream();
1664 }
1665
1666 // This function will be called by a spawned thread when the user
1667 // callback function signals that the stream should be stopped or
1668 // aborted. It is better to handle it this way because the
1669 // callbackEvent() function probably should return before the AudioDeviceStop()
1670 // function is called.
1671 static void *coreStopStream( void *ptr )
1672 {
1673 CallbackInfo *info = (CallbackInfo *) ptr;
1674 RtApiCore *object = (RtApiCore *) info->object;
1675
1676 object->stopStream();
1677 pthread_exit( NULL );
1678 }
1679
1680 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1681 const AudioBufferList *inBufferList,
1682 const AudioBufferList *outBufferList )
1683 {
1684 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1685 if ( stream_.state == STREAM_CLOSED ) {
1686 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1687 error( RtAudioError::WARNING );
1688 return FAILURE;
1689 }
1690
1691 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1692 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1693
1694 // Check if we were draining the stream and signal is finished.
1695 if ( handle->drainCounter > 3 ) {
1696 ThreadHandle threadId;
1697
1698 stream_.state = STREAM_STOPPING;
1699 if ( handle->internalDrain == true )
1700 pthread_create( &threadId, NULL, coreStopStream, info );
1701 else // external call to stopStream()
1702 pthread_cond_signal( &handle->condition );
1703 return SUCCESS;
1704 }
1705
1706 AudioDeviceID outputDevice = handle->id[0];
1707
1708 // Invoke user callback to get fresh output data UNLESS we are
1709 // draining stream or duplex mode AND the input/output devices are
1710 // different AND this function is called for the input device.
1711 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1712 RtAudioCallback callback = (RtAudioCallback) info->callback;
1713 double streamTime = getStreamTime();
1714 RtAudioStreamStatus status = 0;
1715 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1716 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1717 handle->xrun[0] = false;
1718 }
1719 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1720 status |= RTAUDIO_INPUT_OVERFLOW;
1721 handle->xrun[1] = false;
1722 }
1723
1724 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1725 stream_.bufferSize, streamTime, status, info->userData );
1726 if ( cbReturnValue == 2 ) {
1727 stream_.state = STREAM_STOPPING;
1728 handle->drainCounter = 2;
1729 abortStream();
1730 return SUCCESS;
1731 }
1732 else if ( cbReturnValue == 1 ) {
1733 handle->drainCounter = 1;
1734 handle->internalDrain = true;
1735 }
1736 }
1737
1738 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1739
1740 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1741
1742 if ( handle->nStreams[0] == 1 ) {
1743 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1744 0,
1745 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1746 }
1747 else { // fill multiple streams with zeros
1748 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1749 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1750 0,
1751 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1752 }
1753 }
1754 }
1755 else if ( handle->nStreams[0] == 1 ) {
1756 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1757 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1758 stream_.userBuffer[0], stream_.convertInfo[0] );
1759 }
1760 else { // copy from user buffer
1761 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1762 stream_.userBuffer[0],
1763 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1764 }
1765 }
1766 else { // fill multiple streams
1767 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1768 if ( stream_.doConvertBuffer[0] ) {
1769 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1770 inBuffer = (Float32 *) stream_.deviceBuffer;
1771 }
1772
1773 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1774 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1775 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1776 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1777 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1778 }
1779 }
1780 else { // fill multiple multi-channel streams with interleaved data
1781 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1782 Float32 *out, *in;
1783
1784 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1785 UInt32 inChannels = stream_.nUserChannels[0];
1786 if ( stream_.doConvertBuffer[0] ) {
1787 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1788 inChannels = stream_.nDeviceChannels[0];
1789 }
1790
1791 if ( inInterleaved ) inOffset = 1;
1792 else inOffset = stream_.bufferSize;
1793
1794 channelsLeft = inChannels;
1795 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1796 in = inBuffer;
1797 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1798 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1799
1800 outJump = 0;
1801 // Account for possible channel offset in first stream
1802 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1803 streamChannels -= stream_.channelOffset[0];
1804 outJump = stream_.channelOffset[0];
1805 out += outJump;
1806 }
1807
1808 // Account for possible unfilled channels at end of the last stream
1809 if ( streamChannels > channelsLeft ) {
1810 outJump = streamChannels - channelsLeft;
1811 streamChannels = channelsLeft;
1812 }
1813
1814 // Determine input buffer offsets and skips
1815 if ( inInterleaved ) {
1816 inJump = inChannels;
1817 in += inChannels - channelsLeft;
1818 }
1819 else {
1820 inJump = 1;
1821 in += (inChannels - channelsLeft) * inOffset;
1822 }
1823
1824 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1825 for ( unsigned int j=0; j<streamChannels; j++ ) {
1826 *out++ = in[j*inOffset];
1827 }
1828 out += outJump;
1829 in += inJump;
1830 }
1831 channelsLeft -= streamChannels;
1832 }
1833 }
1834 }
1835 }
1836
1837 // Don't bother draining input
1838 if ( handle->drainCounter ) {
1839 handle->drainCounter++;
1840 goto unlock;
1841 }
1842
1843 AudioDeviceID inputDevice;
1844 inputDevice = handle->id[1];
1845 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1846
1847 if ( handle->nStreams[1] == 1 ) {
1848 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1849 convertBuffer( stream_.userBuffer[1],
1850 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1851 stream_.convertInfo[1] );
1852 }
1853 else { // copy to user buffer
1854 memcpy( stream_.userBuffer[1],
1855 inBufferList->mBuffers[handle->iStream[1]].mData,
1856 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1857 }
1858 }
1859 else { // read from multiple streams
1860 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1861 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1862
1863 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1864 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1865 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1866 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1867 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1868 }
1869 }
1870 else { // read from multiple multi-channel streams
1871 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1872 Float32 *out, *in;
1873
1874 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1875 UInt32 outChannels = stream_.nUserChannels[1];
1876 if ( stream_.doConvertBuffer[1] ) {
1877 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1878 outChannels = stream_.nDeviceChannels[1];
1879 }
1880
1881 if ( outInterleaved ) outOffset = 1;
1882 else outOffset = stream_.bufferSize;
1883
1884 channelsLeft = outChannels;
1885 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1886 out = outBuffer;
1887 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1888 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1889
1890 inJump = 0;
1891 // Account for possible channel offset in first stream
1892 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1893 streamChannels -= stream_.channelOffset[1];
1894 inJump = stream_.channelOffset[1];
1895 in += inJump;
1896 }
1897
1898 // Account for possible unread channels at end of the last stream
1899 if ( streamChannels > channelsLeft ) {
1900 inJump = streamChannels - channelsLeft;
1901 streamChannels = channelsLeft;
1902 }
1903
1904 // Determine output buffer offsets and skips
1905 if ( outInterleaved ) {
1906 outJump = outChannels;
1907 out += outChannels - channelsLeft;
1908 }
1909 else {
1910 outJump = 1;
1911 out += (outChannels - channelsLeft) * outOffset;
1912 }
1913
1914 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1915 for ( unsigned int j=0; j<streamChannels; j++ ) {
1916 out[j*outOffset] = *in++;
1917 }
1918 out += outJump;
1919 in += inJump;
1920 }
1921 channelsLeft -= streamChannels;
1922 }
1923 }
1924
1925 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1926 convertBuffer( stream_.userBuffer[1],
1927 stream_.deviceBuffer,
1928 stream_.convertInfo[1] );
1929 }
1930 }
1931 }
1932
1933 unlock:
1934 //MUTEX_UNLOCK( &stream_.mutex );
1935
1936 // Make sure to only tick duplex stream time once if using two devices
1937 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1938 RtApi::tickStreamTime();
1939
1940 return SUCCESS;
1941 }
1942
1943 const char* RtApiCore :: getErrorCode( OSStatus code )
1944 {
1945 switch( code ) {
1946
1947 case kAudioHardwareNotRunningError:
1948 return "kAudioHardwareNotRunningError";
1949
1950 case kAudioHardwareUnspecifiedError:
1951 return "kAudioHardwareUnspecifiedError";
1952
1953 case kAudioHardwareUnknownPropertyError:
1954 return "kAudioHardwareUnknownPropertyError";
1955
1956 case kAudioHardwareBadPropertySizeError:
1957 return "kAudioHardwareBadPropertySizeError";
1958
1959 case kAudioHardwareIllegalOperationError:
1960 return "kAudioHardwareIllegalOperationError";
1961
1962 case kAudioHardwareBadObjectError:
1963 return "kAudioHardwareBadObjectError";
1964
1965 case kAudioHardwareBadDeviceError:
1966 return "kAudioHardwareBadDeviceError";
1967
1968 case kAudioHardwareBadStreamError:
1969 return "kAudioHardwareBadStreamError";
1970
1971 case kAudioHardwareUnsupportedOperationError:
1972 return "kAudioHardwareUnsupportedOperationError";
1973
1974 case kAudioDeviceUnsupportedFormatError:
1975 return "kAudioDeviceUnsupportedFormatError";
1976
1977 case kAudioDevicePermissionsError:
1978 return "kAudioDevicePermissionsError";
1979
1980 default:
1981 return "CoreAudio unknown error";
1982 }
1983 }
1984
1985 //******************** End of __MACOSX_CORE__ *********************//
1986 #endif
1987
1988 #if defined(__UNIX_JACK__)
1989
1990 // JACK is a low-latency audio server, originally written for the
1991 // GNU/Linux operating system and now also ported to OS-X. It can
1992 // connect a number of different applications to an audio device, as
1993 // well as allowing them to share audio between themselves.
1994 //
1995 // When using JACK with RtAudio, "devices" refer to JACK clients that
1996 // have ports connected to the server. The JACK server is typically
1997 // started in a terminal as follows:
1998 //
1999 // .jackd -d alsa -d hw:0
2000 //
2001 // or through an interface program such as qjackctl. Many of the
2002 // parameters normally set for a stream are fixed by the JACK server
2003 // and can be specified when the JACK server is started. In
2004 // particular,
2005 //
2006 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2007 //
2008 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2009 // frames, and number of buffers = 4. Once the server is running, it
2010 // is not possible to override these values. If the values are not
2011 // specified in the command-line, the JACK server uses default values.
2012 //
2013 // The JACK server does not have to be running when an instance of
2014 // RtApiJack is created, though the function getDeviceCount() will
2015 // report 0 devices found until JACK has been started. When no
2016 // devices are available (i.e., the JACK server is not running), a
2017 // stream cannot be opened.
2018
2019 #include <jack/jack.h>
2020 #include <unistd.h>
2021 #include <cstdio>
2022
2023 // A structure to hold various information related to the Jack API
2024 // implementation.
2025 struct JackHandle {
2026 jack_client_t *client;
2027 jack_port_t **ports[2];
2028 std::string deviceName[2];
2029 bool xrun[2];
2030 pthread_cond_t condition;
2031 int drainCounter; // Tracks callback counts when draining
2032 bool internalDrain; // Indicates if stop is initiated from callback or not.
2033
2034 JackHandle()
2035 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2036 };
2037
2038 #if !defined(__RTAUDIO_DEBUG__)
2039 static void jackSilentError( const char * ) {};
2040 #endif
2041
2042 RtApiJack :: RtApiJack()
2043 :shouldAutoconnect_(true) {
2044 // Nothing to do here.
2045 #if !defined(__RTAUDIO_DEBUG__)
2046 // Turn off Jack's internal error reporting.
2047 jack_set_error_function( &jackSilentError );
2048 #endif
2049 }
2050
2051 RtApiJack :: ~RtApiJack()
2052 {
2053 if ( stream_.state != STREAM_CLOSED ) closeStream();
2054 }
2055
2056 unsigned int RtApiJack :: getDeviceCount( void )
2057 {
2058 // See if we can become a jack client.
2059 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2060 jack_status_t *status = NULL;
2061 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2062 if ( client == 0 ) return 0;
2063
2064 const char **ports;
2065 std::string port, previousPort;
2066 unsigned int nChannels = 0, nDevices = 0;
2067 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2068 if ( ports ) {
2069 // Parse the port names up to the first colon (:).
2070 size_t iColon = 0;
2071 do {
2072 port = (char *) ports[ nChannels ];
2073 iColon = port.find(":");
2074 if ( iColon != std::string::npos ) {
2075 port = port.substr( 0, iColon + 1 );
2076 if ( port != previousPort ) {
2077 nDevices++;
2078 previousPort = port;
2079 }
2080 }
2081 } while ( ports[++nChannels] );
2082 free( ports );
2083 }
2084
2085 jack_client_close( client );
2086 return nDevices;
2087 }
2088
2089 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2090 {
2091 RtAudio::DeviceInfo info;
2092 info.probed = false;
2093
2094 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2095 jack_status_t *status = NULL;
2096 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2097 if ( client == 0 ) {
2098 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2099 error( RtAudioError::WARNING );
2100 return info;
2101 }
2102
2103 const char **ports;
2104 std::string port, previousPort;
2105 unsigned int nPorts = 0, nDevices = 0;
2106 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2107 if ( ports ) {
2108 // Parse the port names up to the first colon (:).
2109 size_t iColon = 0;
2110 do {
2111 port = (char *) ports[ nPorts ];
2112 iColon = port.find(":");
2113 if ( iColon != std::string::npos ) {
2114 port = port.substr( 0, iColon );
2115 if ( port != previousPort ) {
2116 if ( nDevices == device ) info.name = port;
2117 nDevices++;
2118 previousPort = port;
2119 }
2120 }
2121 } while ( ports[++nPorts] );
2122 free( ports );
2123 }
2124
2125 if ( device >= nDevices ) {
2126 jack_client_close( client );
2127 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2128 error( RtAudioError::INVALID_USE );
2129 return info;
2130 }
2131
2132 // Get the current jack server sample rate.
2133 info.sampleRates.clear();
2134
2135 info.preferredSampleRate = jack_get_sample_rate( client );
2136 info.sampleRates.push_back( info.preferredSampleRate );
2137
2138 // Count the available ports containing the client name as device
2139 // channels. Jack "input ports" equal RtAudio output channels.
2140 unsigned int nChannels = 0;
2141 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2142 if ( ports ) {
2143 while ( ports[ nChannels ] ) nChannels++;
2144 free( ports );
2145 info.outputChannels = nChannels;
2146 }
2147
2148 // Jack "output ports" equal RtAudio input channels.
2149 nChannels = 0;
2150 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2151 if ( ports ) {
2152 while ( ports[ nChannels ] ) nChannels++;
2153 free( ports );
2154 info.inputChannels = nChannels;
2155 }
2156
2157 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2158 jack_client_close(client);
2159 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2160 error( RtAudioError::WARNING );
2161 return info;
2162 }
2163
2164 // If device opens for both playback and capture, we determine the channels.
2165 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2166 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2167
2168 // Jack always uses 32-bit floats.
2169 info.nativeFormats = RTAUDIO_FLOAT32;
2170
2171 // Jack doesn't provide default devices so we'll use the first available one.
2172 if ( device == 0 && info.outputChannels > 0 )
2173 info.isDefaultOutput = true;
2174 if ( device == 0 && info.inputChannels > 0 )
2175 info.isDefaultInput = true;
2176
2177 jack_client_close(client);
2178 info.probed = true;
2179 return info;
2180 }
2181
2182 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2183 {
2184 CallbackInfo *info = (CallbackInfo *) infoPointer;
2185
2186 RtApiJack *object = (RtApiJack *) info->object;
2187 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2188
2189 return 0;
2190 }
2191
2192 // This function will be called by a spawned thread when the Jack
2193 // server signals that it is shutting down. It is necessary to handle
2194 // it this way because the jackShutdown() function must return before
2195 // the jack_deactivate() function (in closeStream()) will return.
2196 static void *jackCloseStream( void *ptr )
2197 {
2198 CallbackInfo *info = (CallbackInfo *) ptr;
2199 RtApiJack *object = (RtApiJack *) info->object;
2200
2201 object->closeStream();
2202
2203 pthread_exit( NULL );
2204 }
2205 static void jackShutdown( void *infoPointer )
2206 {
2207 CallbackInfo *info = (CallbackInfo *) infoPointer;
2208 RtApiJack *object = (RtApiJack *) info->object;
2209
2210 // Check current stream state. If stopped, then we'll assume this
2211 // was called as a result of a call to RtApiJack::stopStream (the
2212 // deactivation of a client handle causes this function to be called).
2213 // If not, we'll assume the Jack server is shutting down or some
2214 // other problem occurred and we should close the stream.
2215 if ( object->isStreamRunning() == false ) return;
2216
2217 ThreadHandle threadId;
2218 pthread_create( &threadId, NULL, jackCloseStream, info );
2219 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2220 }
2221
2222 static int jackXrun( void *infoPointer )
2223 {
2224 JackHandle *handle = *((JackHandle **) infoPointer);
2225
2226 if ( handle->ports[0] ) handle->xrun[0] = true;
2227 if ( handle->ports[1] ) handle->xrun[1] = true;
2228
2229 return 0;
2230 }
2231
2232 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2233 unsigned int firstChannel, unsigned int sampleRate,
2234 RtAudioFormat format, unsigned int *bufferSize,
2235 RtAudio::StreamOptions *options )
2236 {
2237 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2238
2239 // Look for jack server and try to become a client (only do once per stream).
2240 jack_client_t *client = 0;
2241 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2242 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2243 jack_status_t *status = NULL;
2244 if ( options && !options->streamName.empty() )
2245 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2246 else
2247 client = jack_client_open( "RtApiJack", jackoptions, status );
2248 if ( client == 0 ) {
2249 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2250 error( RtAudioError::WARNING );
2251 return FAILURE;
2252 }
2253 }
2254 else {
2255 // The handle must have been created on an earlier pass.
2256 client = handle->client;
2257 }
2258
2259 const char **ports;
2260 std::string port, previousPort, deviceName;
2261 unsigned int nPorts = 0, nDevices = 0;
2262 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2263 if ( ports ) {
2264 // Parse the port names up to the first colon (:).
2265 size_t iColon = 0;
2266 do {
2267 port = (char *) ports[ nPorts ];
2268 iColon = port.find(":");
2269 if ( iColon != std::string::npos ) {
2270 port = port.substr( 0, iColon );
2271 if ( port != previousPort ) {
2272 if ( nDevices == device ) deviceName = port;
2273 nDevices++;
2274 previousPort = port;
2275 }
2276 }
2277 } while ( ports[++nPorts] );
2278 free( ports );
2279 }
2280
2281 if ( device >= nDevices ) {
2282 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2283 return FAILURE;
2284 }
2285
2286 unsigned long flag = JackPortIsInput;
2287 if ( mode == INPUT ) flag = JackPortIsOutput;
2288
2289 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2290 // Count the available ports containing the client name as device
2291 // channels. Jack "input ports" equal RtAudio output channels.
2292 unsigned int nChannels = 0;
2293 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2294 if ( ports ) {
2295 while ( ports[ nChannels ] ) nChannels++;
2296 free( ports );
2297 }
2298 // Compare the jack ports for specified client to the requested number of channels.
2299 if ( nChannels < (channels + firstChannel) ) {
2300 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2301 errorText_ = errorStream_.str();
2302 return FAILURE;
2303 }
2304 }
2305
2306 // Check the jack server sample rate.
2307 unsigned int jackRate = jack_get_sample_rate( client );
2308 if ( sampleRate != jackRate ) {
2309 jack_client_close( client );
2310 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2311 errorText_ = errorStream_.str();
2312 return FAILURE;
2313 }
2314 stream_.sampleRate = jackRate;
2315
2316 // Get the latency of the JACK port.
2317 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2318 if ( ports[ firstChannel ] ) {
2319 // Added by Ge Wang
2320 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2321 // the range (usually the min and max are equal)
2322 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2323 // get the latency range
2324 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2325 // be optimistic, use the min!
2326 stream_.latency[mode] = latrange.min;
2327 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2328 }
2329 free( ports );
2330
2331 // The jack server always uses 32-bit floating-point data.
2332 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2333 stream_.userFormat = format;
2334
2335 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2336 else stream_.userInterleaved = true;
2337
2338 // Jack always uses non-interleaved buffers.
2339 stream_.deviceInterleaved[mode] = false;
2340
2341 // Jack always provides host byte-ordered data.
2342 stream_.doByteSwap[mode] = false;
2343
2344 // Get the buffer size. The buffer size and number of buffers
2345 // (periods) is set when the jack server is started.
2346 stream_.bufferSize = (int) jack_get_buffer_size( client );
2347 *bufferSize = stream_.bufferSize;
2348
2349 stream_.nDeviceChannels[mode] = channels;
2350 stream_.nUserChannels[mode] = channels;
2351
2352 // Set flags for buffer conversion.
2353 stream_.doConvertBuffer[mode] = false;
2354 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2355 stream_.doConvertBuffer[mode] = true;
2356 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2357 stream_.nUserChannels[mode] > 1 )
2358 stream_.doConvertBuffer[mode] = true;
2359
2360 // Allocate our JackHandle structure for the stream.
2361 if ( handle == 0 ) {
2362 try {
2363 handle = new JackHandle;
2364 }
2365 catch ( std::bad_alloc& ) {
2366 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2367 goto error;
2368 }
2369
2370 if ( pthread_cond_init(&handle->condition, NULL) ) {
2371 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2372 goto error;
2373 }
2374 stream_.apiHandle = (void *) handle;
2375 handle->client = client;
2376 }
2377 handle->deviceName[mode] = deviceName;
2378
2379 // Allocate necessary internal buffers.
2380 unsigned long bufferBytes;
2381 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2382 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2383 if ( stream_.userBuffer[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2385 goto error;
2386 }
2387
2388 if ( stream_.doConvertBuffer[mode] ) {
2389
2390 bool makeBuffer = true;
2391 if ( mode == OUTPUT )
2392 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2393 else { // mode == INPUT
2394 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2397 if ( bufferBytes < bytesOut ) makeBuffer = false;
2398 }
2399 }
2400
2401 if ( makeBuffer ) {
2402 bufferBytes *= *bufferSize;
2403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2405 if ( stream_.deviceBuffer == NULL ) {
2406 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2407 goto error;
2408 }
2409 }
2410 }
2411
2412 // Allocate memory for the Jack ports (channels) identifiers.
2413 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2414 if ( handle->ports[mode] == NULL ) {
2415 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2416 goto error;
2417 }
2418
2419 stream_.device[mode] = device;
2420 stream_.channelOffset[mode] = firstChannel;
2421 stream_.state = STREAM_STOPPED;
2422 stream_.callbackInfo.object = (void *) this;
2423
2424 if ( stream_.mode == OUTPUT && mode == INPUT )
2425 // We had already set up the stream for output.
2426 stream_.mode = DUPLEX;
2427 else {
2428 stream_.mode = mode;
2429 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2430 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2431 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2432 }
2433
2434 // Register our ports.
2435 char label[64];
2436 if ( mode == OUTPUT ) {
2437 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2438 snprintf( label, 64, "outport %d", i );
2439 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2440 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2441 }
2442 }
2443 else {
2444 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2445 snprintf( label, 64, "inport %d", i );
2446 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2447 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2448 }
2449 }
2450
2451 // Setup the buffer conversion information structure. We don't use
2452 // buffers to do channel offsets, so we override that parameter
2453 // here.
2454 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2455
2456 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2457
2458 return SUCCESS;
2459
2460 error:
2461 if ( handle ) {
2462 pthread_cond_destroy( &handle->condition );
2463 jack_client_close( handle->client );
2464
2465 if ( handle->ports[0] ) free( handle->ports[0] );
2466 if ( handle->ports[1] ) free( handle->ports[1] );
2467
2468 delete handle;
2469 stream_.apiHandle = 0;
2470 }
2471
2472 for ( int i=0; i<2; i++ ) {
2473 if ( stream_.userBuffer[i] ) {
2474 free( stream_.userBuffer[i] );
2475 stream_.userBuffer[i] = 0;
2476 }
2477 }
2478
2479 if ( stream_.deviceBuffer ) {
2480 free( stream_.deviceBuffer );
2481 stream_.deviceBuffer = 0;
2482 }
2483
2484 return FAILURE;
2485 }
2486
2487 void RtApiJack :: closeStream( void )
2488 {
2489 if ( stream_.state == STREAM_CLOSED ) {
2490 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2491 error( RtAudioError::WARNING );
2492 return;
2493 }
2494
2495 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2496 if ( handle ) {
2497
2498 if ( stream_.state == STREAM_RUNNING )
2499 jack_deactivate( handle->client );
2500
2501 jack_client_close( handle->client );
2502 }
2503
2504 if ( handle ) {
2505 if ( handle->ports[0] ) free( handle->ports[0] );
2506 if ( handle->ports[1] ) free( handle->ports[1] );
2507 pthread_cond_destroy( &handle->condition );
2508 delete handle;
2509 stream_.apiHandle = 0;
2510 }
2511
2512 for ( int i=0; i<2; i++ ) {
2513 if ( stream_.userBuffer[i] ) {
2514 free( stream_.userBuffer[i] );
2515 stream_.userBuffer[i] = 0;
2516 }
2517 }
2518
2519 if ( stream_.deviceBuffer ) {
2520 free( stream_.deviceBuffer );
2521 stream_.deviceBuffer = 0;
2522 }
2523
2524 stream_.mode = UNINITIALIZED;
2525 stream_.state = STREAM_CLOSED;
2526 }
2527
2528 void RtApiJack :: startStream( void )
2529 {
2530 verifyStream();
2531 if ( stream_.state == STREAM_RUNNING ) {
2532 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2533 error( RtAudioError::WARNING );
2534 return;
2535 }
2536
2537 #if defined( HAVE_GETTIMEOFDAY )
2538 gettimeofday( &stream_.lastTickTimestamp, NULL );
2539 #endif
2540
2541 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2542 int result = jack_activate( handle->client );
2543 if ( result ) {
2544 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2545 goto unlock;
2546 }
2547
2548 const char **ports;
2549
2550 // Get the list of available ports.
2551 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2552 result = 1;
2553 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2554 if ( ports == NULL) {
2555 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2556 goto unlock;
2557 }
2558
2559 // Now make the port connections. Since RtAudio wasn't designed to
2560 // allow the user to select particular channels of a device, we'll
2561 // just open the first "nChannels" ports with offset.
2562 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2563 result = 1;
2564 if ( ports[ stream_.channelOffset[0] + i ] )
2565 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2566 if ( result ) {
2567 free( ports );
2568 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2569 goto unlock;
2570 }
2571 }
2572 free(ports);
2573 }
2574
2575 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2576 result = 1;
2577 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2578 if ( ports == NULL) {
2579 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2580 goto unlock;
2581 }
2582
2583 // Now make the port connections. See note above.
2584 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2585 result = 1;
2586 if ( ports[ stream_.channelOffset[1] + i ] )
2587 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2588 if ( result ) {
2589 free( ports );
2590 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2591 goto unlock;
2592 }
2593 }
2594 free(ports);
2595 }
2596
2597 handle->drainCounter = 0;
2598 handle->internalDrain = false;
2599 stream_.state = STREAM_RUNNING;
2600
2601 unlock:
2602 if ( result == 0 ) return;
2603 error( RtAudioError::SYSTEM_ERROR );
2604 }
2605
2606 void RtApiJack :: stopStream( void )
2607 {
2608 verifyStream();
2609 if ( stream_.state == STREAM_STOPPED ) {
2610 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2611 error( RtAudioError::WARNING );
2612 return;
2613 }
2614
2615 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2616 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2617
2618 if ( handle->drainCounter == 0 ) {
2619 handle->drainCounter = 2;
2620 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2621 }
2622 }
2623
2624 jack_deactivate( handle->client );
2625 stream_.state = STREAM_STOPPED;
2626 }
2627
2628 void RtApiJack :: abortStream( void )
2629 {
2630 verifyStream();
2631 if ( stream_.state == STREAM_STOPPED ) {
2632 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2633 error( RtAudioError::WARNING );
2634 return;
2635 }
2636
2637 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2638 handle->drainCounter = 2;
2639
2640 stopStream();
2641 }
2642
2643 // This function will be called by a spawned thread when the user
2644 // callback function signals that the stream should be stopped or
2645 // aborted. It is necessary to handle it this way because the
2646 // callbackEvent() function must return before the jack_deactivate()
2647 // function will return.
2648 static void *jackStopStream( void *ptr )
2649 {
2650 CallbackInfo *info = (CallbackInfo *) ptr;
2651 RtApiJack *object = (RtApiJack *) info->object;
2652
2653 object->stopStream();
2654 pthread_exit( NULL );
2655 }
2656
2657 bool RtApiJack :: callbackEvent( unsigned long nframes )
2658 {
2659 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2660 if ( stream_.state == STREAM_CLOSED ) {
2661 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2662 error( RtAudioError::WARNING );
2663 return FAILURE;
2664 }
2665 if ( stream_.bufferSize != nframes ) {
2666 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2667 error( RtAudioError::WARNING );
2668 return FAILURE;
2669 }
2670
2671 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2672 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2673
2674 // Check if we were draining the stream and signal is finished.
2675 if ( handle->drainCounter > 3 ) {
2676 ThreadHandle threadId;
2677
2678 stream_.state = STREAM_STOPPING;
2679 if ( handle->internalDrain == true )
2680 pthread_create( &threadId, NULL, jackStopStream, info );
2681 else
2682 pthread_cond_signal( &handle->condition );
2683 return SUCCESS;
2684 }
2685
2686 // Invoke user callback first, to get fresh output data.
2687 if ( handle->drainCounter == 0 ) {
2688 RtAudioCallback callback = (RtAudioCallback) info->callback;
2689 double streamTime = getStreamTime();
2690 RtAudioStreamStatus status = 0;
2691 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2692 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2693 handle->xrun[0] = false;
2694 }
2695 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2696 status |= RTAUDIO_INPUT_OVERFLOW;
2697 handle->xrun[1] = false;
2698 }
2699 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2700 stream_.bufferSize, streamTime, status, info->userData );
2701 if ( cbReturnValue == 2 ) {
2702 stream_.state = STREAM_STOPPING;
2703 handle->drainCounter = 2;
2704 ThreadHandle id;
2705 pthread_create( &id, NULL, jackStopStream, info );
2706 return SUCCESS;
2707 }
2708 else if ( cbReturnValue == 1 ) {
2709 handle->drainCounter = 1;
2710 handle->internalDrain = true;
2711 }
2712 }
2713
2714 jack_default_audio_sample_t *jackbuffer;
2715 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2716 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2717
2718 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2719
2720 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2721 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2722 memset( jackbuffer, 0, bufferBytes );
2723 }
2724
2725 }
2726 else if ( stream_.doConvertBuffer[0] ) {
2727
2728 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2729
2730 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2731 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2732 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2733 }
2734 }
2735 else { // no buffer conversion
2736 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2737 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2738 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2739 }
2740 }
2741 }
2742
2743 // Don't bother draining input
2744 if ( handle->drainCounter ) {
2745 handle->drainCounter++;
2746 goto unlock;
2747 }
2748
2749 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2750
2751 if ( stream_.doConvertBuffer[1] ) {
2752 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2755 }
2756 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2757 }
2758 else { // no buffer conversion
2759 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2760 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2761 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2762 }
2763 }
2764 }
2765
2766 unlock:
2767 RtApi::tickStreamTime();
2768 return SUCCESS;
2769 }
2770 //******************** End of __UNIX_JACK__ *********************//
2771 #endif
2772
2773 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2774
2775 // The ASIO API is designed around a callback scheme, so this
2776 // implementation is similar to that used for OS-X CoreAudio and Linux
2777 // Jack. The primary constraint with ASIO is that it only allows
2778 // access to a single driver at a time. Thus, it is not possible to
2779 // have more than one simultaneous RtAudio stream.
2780 //
2781 // This implementation also requires a number of external ASIO files
2782 // and a few global variables. The ASIO callback scheme does not
2783 // allow for the passing of user data, so we must create a global
2784 // pointer to our callbackInfo structure.
2785 //
2786 // On unix systems, we make use of a pthread condition variable.
2787 // Since there is no equivalent in Windows, I hacked something based
2788 // on information found in
2789 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2790
2791 #include "asiosys.h"
2792 #include "asio.h"
2793 #include "iasiothiscallresolver.h"
2794 #include "asiodrivers.h"
2795 #include <cmath>
2796
2797 static AsioDrivers drivers;
2798 static ASIOCallbacks asioCallbacks;
2799 static ASIODriverInfo driverInfo;
2800 static CallbackInfo *asioCallbackInfo;
2801 static bool asioXRun;
2802
2803 struct AsioHandle {
2804 int drainCounter; // Tracks callback counts when draining
2805 bool internalDrain; // Indicates if stop is initiated from callback or not.
2806 ASIOBufferInfo *bufferInfos;
2807 HANDLE condition;
2808
2809 AsioHandle()
2810 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2811 };
2812
2813 // Function declarations (definitions at end of section)
2814 static const char* getAsioErrorString( ASIOError result );
2815 static void sampleRateChanged( ASIOSampleRate sRate );
2816 static long asioMessages( long selector, long value, void* message, double* opt );
2817
2818 RtApiAsio :: RtApiAsio()
2819 {
2820 // ASIO cannot run on a multi-threaded appartment. You can call
2821 // CoInitialize beforehand, but it must be for appartment threading
2822 // (in which case, CoInitilialize will return S_FALSE here).
2823 coInitialized_ = false;
2824 HRESULT hr = CoInitialize( NULL );
2825 if ( FAILED(hr) ) {
2826 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2827 error( RtAudioError::WARNING );
2828 }
2829 coInitialized_ = true;
2830
2831 drivers.removeCurrentDriver();
2832 driverInfo.asioVersion = 2;
2833
2834 // See note in DirectSound implementation about GetDesktopWindow().
2835 driverInfo.sysRef = GetForegroundWindow();
2836 }
2837
2838 RtApiAsio :: ~RtApiAsio()
2839 {
2840 if ( stream_.state != STREAM_CLOSED ) closeStream();
2841 if ( coInitialized_ ) CoUninitialize();
2842 }
2843
2844 unsigned int RtApiAsio :: getDeviceCount( void )
2845 {
2846 return (unsigned int) drivers.asioGetNumDev();
2847 }
2848
2849 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2850 {
2851 RtAudio::DeviceInfo info;
2852 info.probed = false;
2853
2854 // Get device ID
2855 unsigned int nDevices = getDeviceCount();
2856 if ( nDevices == 0 ) {
2857 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2858 error( RtAudioError::INVALID_USE );
2859 return info;
2860 }
2861
2862 if ( device >= nDevices ) {
2863 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2864 error( RtAudioError::INVALID_USE );
2865 return info;
2866 }
2867
2868 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2869 if ( stream_.state != STREAM_CLOSED ) {
2870 if ( device >= devices_.size() ) {
2871 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2872 error( RtAudioError::WARNING );
2873 return info;
2874 }
2875 return devices_[ device ];
2876 }
2877
2878 char driverName[32];
2879 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2880 if ( result != ASE_OK ) {
2881 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2882 errorText_ = errorStream_.str();
2883 error( RtAudioError::WARNING );
2884 return info;
2885 }
2886
2887 info.name = driverName;
2888
2889 if ( !drivers.loadDriver( driverName ) ) {
2890 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2891 errorText_ = errorStream_.str();
2892 error( RtAudioError::WARNING );
2893 return info;
2894 }
2895
2896 result = ASIOInit( &driverInfo );
2897 if ( result != ASE_OK ) {
2898 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2899 errorText_ = errorStream_.str();
2900 error( RtAudioError::WARNING );
2901 return info;
2902 }
2903
2904 // Determine the device channel information.
2905 long inputChannels, outputChannels;
2906 result = ASIOGetChannels( &inputChannels, &outputChannels );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2912 return info;
2913 }
2914
2915 info.outputChannels = outputChannels;
2916 info.inputChannels = inputChannels;
2917 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2918 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2919
2920 // Determine the supported sample rates.
2921 info.sampleRates.clear();
2922 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2923 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2924 if ( result == ASE_OK ) {
2925 info.sampleRates.push_back( SAMPLE_RATES[i] );
2926
2927 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2928 info.preferredSampleRate = SAMPLE_RATES[i];
2929 }
2930 }
2931
2932 // Determine supported data types ... just check first channel and assume rest are the same.
2933 ASIOChannelInfo channelInfo;
2934 channelInfo.channel = 0;
2935 channelInfo.isInput = true;
2936 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2937 result = ASIOGetChannelInfo( &channelInfo );
2938 if ( result != ASE_OK ) {
2939 drivers.removeCurrentDriver();
2940 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2941 errorText_ = errorStream_.str();
2942 error( RtAudioError::WARNING );
2943 return info;
2944 }
2945
2946 info.nativeFormats = 0;
2947 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2948 info.nativeFormats |= RTAUDIO_SINT16;
2949 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2950 info.nativeFormats |= RTAUDIO_SINT32;
2951 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2952 info.nativeFormats |= RTAUDIO_FLOAT32;
2953 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2954 info.nativeFormats |= RTAUDIO_FLOAT64;
2955 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2956 info.nativeFormats |= RTAUDIO_SINT24;
2957
2958 if ( info.outputChannels > 0 )
2959 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2960 if ( info.inputChannels > 0 )
2961 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2962
2963 info.probed = true;
2964 drivers.removeCurrentDriver();
2965 return info;
2966 }
2967
2968 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2969 {
2970 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2971 object->callbackEvent( index );
2972 }
2973
2974 void RtApiAsio :: saveDeviceInfo( void )
2975 {
2976 devices_.clear();
2977
2978 unsigned int nDevices = getDeviceCount();
2979 devices_.resize( nDevices );
2980 for ( unsigned int i=0; i<nDevices; i++ )
2981 devices_[i] = getDeviceInfo( i );
2982 }
2983
2984 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2985 unsigned int firstChannel, unsigned int sampleRate,
2986 RtAudioFormat format, unsigned int *bufferSize,
2987 RtAudio::StreamOptions *options )
2988 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2989
2990 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2991
2992 // For ASIO, a duplex stream MUST use the same driver.
2993 if ( isDuplexInput && stream_.device[0] != device ) {
2994 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2995 return FAILURE;
2996 }
2997
2998 char driverName[32];
2999 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3000 if ( result != ASE_OK ) {
3001 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3002 errorText_ = errorStream_.str();
3003 return FAILURE;
3004 }
3005
3006 // Only load the driver once for duplex stream.
3007 if ( !isDuplexInput ) {
3008 // The getDeviceInfo() function will not work when a stream is open
3009 // because ASIO does not allow multiple devices to run at the same
3010 // time. Thus, we'll probe the system before opening a stream and
3011 // save the results for use by getDeviceInfo().
3012 this->saveDeviceInfo();
3013
3014 if ( !drivers.loadDriver( driverName ) ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3017 return FAILURE;
3018 }
3019
3020 result = ASIOInit( &driverInfo );
3021 if ( result != ASE_OK ) {
3022 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3023 errorText_ = errorStream_.str();
3024 return FAILURE;
3025 }
3026 }
3027
3028 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3029 bool buffersAllocated = false;
3030 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3031 unsigned int nChannels;
3032
3033
3034 // Check the device channel count.
3035 long inputChannels, outputChannels;
3036 result = ASIOGetChannels( &inputChannels, &outputChannels );
3037 if ( result != ASE_OK ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3039 errorText_ = errorStream_.str();
3040 goto error;
3041 }
3042
3043 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3044 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3046 errorText_ = errorStream_.str();
3047 goto error;
3048 }
3049 stream_.nDeviceChannels[mode] = channels;
3050 stream_.nUserChannels[mode] = channels;
3051 stream_.channelOffset[mode] = firstChannel;
3052
3053 // Verify the sample rate is supported.
3054 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3057 errorText_ = errorStream_.str();
3058 goto error;
3059 }
3060
3061 // Get the current sample rate
3062 ASIOSampleRate currentRate;
3063 result = ASIOGetSampleRate( &currentRate );
3064 if ( result != ASE_OK ) {
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3066 errorText_ = errorStream_.str();
3067 goto error;
3068 }
3069
3070 // Set the sample rate only if necessary
3071 if ( currentRate != sampleRate ) {
3072 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3073 if ( result != ASE_OK ) {
3074 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3075 errorText_ = errorStream_.str();
3076 goto error;
3077 }
3078 }
3079
3080 // Determine the driver data type.
3081 ASIOChannelInfo channelInfo;
3082 channelInfo.channel = 0;
3083 if ( mode == OUTPUT ) channelInfo.isInput = false;
3084 else channelInfo.isInput = true;
3085 result = ASIOGetChannelInfo( &channelInfo );
3086 if ( result != ASE_OK ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3088 errorText_ = errorStream_.str();
3089 goto error;
3090 }
3091
3092 // Assuming WINDOWS host is always little-endian.
3093 stream_.doByteSwap[mode] = false;
3094 stream_.userFormat = format;
3095 stream_.deviceFormat[mode] = 0;
3096 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3097 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3098 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3099 }
3100 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3101 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3102 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3103 }
3104 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3105 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3106 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3107 }
3108 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3109 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3110 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3111 }
3112 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3113 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3114 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3115 }
3116
3117 if ( stream_.deviceFormat[mode] == 0 ) {
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3119 errorText_ = errorStream_.str();
3120 goto error;
3121 }
3122
3123 // Set the buffer size. For a duplex stream, this will end up
3124 // setting the buffer size based on the input constraints, which
3125 // should be ok.
3126 long minSize, maxSize, preferSize, granularity;
3127 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3128 if ( result != ASE_OK ) {
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3130 errorText_ = errorStream_.str();
3131 goto error;
3132 }
3133
3134 if ( isDuplexInput ) {
3135 // When this is the duplex input (output was opened before), then we have to use the same
3136 // buffersize as the output, because it might use the preferred buffer size, which most
3137 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3138 // So instead of throwing an error, make them equal. The caller uses the reference
3139 // to the "bufferSize" param as usual to set up processing buffers.
3140
3141 *bufferSize = stream_.bufferSize;
3142
3143 } else {
3144 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3145 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3146 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3147 else if ( granularity == -1 ) {
3148 // Make sure bufferSize is a power of two.
3149 int log2_of_min_size = 0;
3150 int log2_of_max_size = 0;
3151
3152 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3153 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3154 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3155 }
3156
3157 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3158 int min_delta_num = log2_of_min_size;
3159
3160 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3161 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3162 if (current_delta < min_delta) {
3163 min_delta = current_delta;
3164 min_delta_num = i;
3165 }
3166 }
3167
3168 *bufferSize = ( (unsigned int)1 << min_delta_num );
3169 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3170 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3171 }
3172 else if ( granularity != 0 ) {
3173 // Set to an even multiple of granularity, rounding up.
3174 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3175 }
3176 }
3177
3178 /*
3179 // we don't use it anymore, see above!
3180 // Just left it here for the case...
3181 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3182 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3183 goto error;
3184 }
3185 */
3186
3187 stream_.bufferSize = *bufferSize;
3188 stream_.nBuffers = 2;
3189
3190 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3191 else stream_.userInterleaved = true;
3192
3193 // ASIO always uses non-interleaved buffers.
3194 stream_.deviceInterleaved[mode] = false;
3195
3196 // Allocate, if necessary, our AsioHandle structure for the stream.
3197 if ( handle == 0 ) {
3198 try {
3199 handle = new AsioHandle;
3200 }
3201 catch ( std::bad_alloc& ) {
3202 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3203 goto error;
3204 }
3205 handle->bufferInfos = 0;
3206
3207 // Create a manual-reset event.
3208 handle->condition = CreateEvent( NULL, // no security
3209 TRUE, // manual-reset
3210 FALSE, // non-signaled initially
3211 NULL ); // unnamed
3212 stream_.apiHandle = (void *) handle;
3213 }
3214
3215 // Create the ASIO internal buffers. Since RtAudio sets up input
3216 // and output separately, we'll have to dispose of previously
3217 // created output buffers for a duplex stream.
3218 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3219 ASIODisposeBuffers();
3220 if ( handle->bufferInfos ) free( handle->bufferInfos );
3221 }
3222
3223 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3224 unsigned int i;
3225 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3226 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3227 if ( handle->bufferInfos == NULL ) {
3228 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3229 errorText_ = errorStream_.str();
3230 goto error;
3231 }
3232
3233 ASIOBufferInfo *infos;
3234 infos = handle->bufferInfos;
3235 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3236 infos->isInput = ASIOFalse;
3237 infos->channelNum = i + stream_.channelOffset[0];
3238 infos->buffers[0] = infos->buffers[1] = 0;
3239 }
3240 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3241 infos->isInput = ASIOTrue;
3242 infos->channelNum = i + stream_.channelOffset[1];
3243 infos->buffers[0] = infos->buffers[1] = 0;
3244 }
3245
3246 // prepare for callbacks
3247 stream_.sampleRate = sampleRate;
3248 stream_.device[mode] = device;
3249 stream_.mode = isDuplexInput ? DUPLEX : mode;
3250
3251 // store this class instance before registering callbacks, that are going to use it
3252 asioCallbackInfo = &stream_.callbackInfo;
3253 stream_.callbackInfo.object = (void *) this;
3254
3255 // Set up the ASIO callback structure and create the ASIO data buffers.
3256 asioCallbacks.bufferSwitch = &bufferSwitch;
3257 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3258 asioCallbacks.asioMessage = &asioMessages;
3259 asioCallbacks.bufferSwitchTimeInfo = NULL;
3260 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3261 if ( result != ASE_OK ) {
3262 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3263 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3264 // In that case, let's be naïve and try that instead.
3265 *bufferSize = preferSize;
3266 stream_.bufferSize = *bufferSize;
3267 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3268 }
3269
3270 if ( result != ASE_OK ) {
3271 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3272 errorText_ = errorStream_.str();
3273 goto error;
3274 }
3275 buffersAllocated = true;
3276 stream_.state = STREAM_STOPPED;
3277
3278 // Set flags for buffer conversion.
3279 stream_.doConvertBuffer[mode] = false;
3280 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3281 stream_.doConvertBuffer[mode] = true;
3282 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3283 stream_.nUserChannels[mode] > 1 )
3284 stream_.doConvertBuffer[mode] = true;
3285
3286 // Allocate necessary internal buffers
3287 unsigned long bufferBytes;
3288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3290 if ( stream_.userBuffer[mode] == NULL ) {
3291 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3292 goto error;
3293 }
3294
3295 if ( stream_.doConvertBuffer[mode] ) {
3296
3297 bool makeBuffer = true;
3298 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3299 if ( isDuplexInput && stream_.deviceBuffer ) {
3300 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3301 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3302 }
3303
3304 if ( makeBuffer ) {
3305 bufferBytes *= *bufferSize;
3306 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3308 if ( stream_.deviceBuffer == NULL ) {
3309 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3310 goto error;
3311 }
3312 }
3313 }
3314
3315 // Determine device latencies
3316 long inputLatency, outputLatency;
3317 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3318 if ( result != ASE_OK ) {
3319 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3320 errorText_ = errorStream_.str();
3321 error( RtAudioError::WARNING); // warn but don't fail
3322 }
3323 else {
3324 stream_.latency[0] = outputLatency;
3325 stream_.latency[1] = inputLatency;
3326 }
3327
3328 // Setup the buffer conversion information structure. We don't use
3329 // buffers to do channel offsets, so we override that parameter
3330 // here.
3331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3332
3333 return SUCCESS;
3334
3335 error:
3336 if ( !isDuplexInput ) {
3337 // the cleanup for error in the duplex input, is done by RtApi::openStream
3338 // So we clean up for single channel only
3339
3340 if ( buffersAllocated )
3341 ASIODisposeBuffers();
3342
3343 drivers.removeCurrentDriver();
3344
3345 if ( handle ) {
3346 CloseHandle( handle->condition );
3347 if ( handle->bufferInfos )
3348 free( handle->bufferInfos );
3349
3350 delete handle;
3351 stream_.apiHandle = 0;
3352 }
3353
3354
3355 if ( stream_.userBuffer[mode] ) {
3356 free( stream_.userBuffer[mode] );
3357 stream_.userBuffer[mode] = 0;
3358 }
3359
3360 if ( stream_.deviceBuffer ) {
3361 free( stream_.deviceBuffer );
3362 stream_.deviceBuffer = 0;
3363 }
3364 }
3365
3366 return FAILURE;
3367 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3368
3369 void RtApiAsio :: closeStream()
3370 {
3371 if ( stream_.state == STREAM_CLOSED ) {
3372 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3373 error( RtAudioError::WARNING );
3374 return;
3375 }
3376
3377 if ( stream_.state == STREAM_RUNNING ) {
3378 stream_.state = STREAM_STOPPED;
3379 ASIOStop();
3380 }
3381 ASIODisposeBuffers();
3382 drivers.removeCurrentDriver();
3383
3384 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3385 if ( handle ) {
3386 CloseHandle( handle->condition );
3387 if ( handle->bufferInfos )
3388 free( handle->bufferInfos );
3389 delete handle;
3390 stream_.apiHandle = 0;
3391 }
3392
3393 for ( int i=0; i<2; i++ ) {
3394 if ( stream_.userBuffer[i] ) {
3395 free( stream_.userBuffer[i] );
3396 stream_.userBuffer[i] = 0;
3397 }
3398 }
3399
3400 if ( stream_.deviceBuffer ) {
3401 free( stream_.deviceBuffer );
3402 stream_.deviceBuffer = 0;
3403 }
3404
3405 stream_.mode = UNINITIALIZED;
3406 stream_.state = STREAM_CLOSED;
3407 }
3408
3409 bool stopThreadCalled = false;
3410
3411 void RtApiAsio :: startStream()
3412 {
3413 verifyStream();
3414 if ( stream_.state == STREAM_RUNNING ) {
3415 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3416 error( RtAudioError::WARNING );
3417 return;
3418 }
3419
3420 #if defined( HAVE_GETTIMEOFDAY )
3421 gettimeofday( &stream_.lastTickTimestamp, NULL );
3422 #endif
3423
3424 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3425 ASIOError result = ASIOStart();
3426 if ( result != ASE_OK ) {
3427 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3428 errorText_ = errorStream_.str();
3429 goto unlock;
3430 }
3431
3432 handle->drainCounter = 0;
3433 handle->internalDrain = false;
3434 ResetEvent( handle->condition );
3435 stream_.state = STREAM_RUNNING;
3436 asioXRun = false;
3437
3438 unlock:
3439 stopThreadCalled = false;
3440
3441 if ( result == ASE_OK ) return;
3442 error( RtAudioError::SYSTEM_ERROR );
3443 }
3444
3445 void RtApiAsio :: stopStream()
3446 {
3447 verifyStream();
3448 if ( stream_.state == STREAM_STOPPED ) {
3449 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3450 error( RtAudioError::WARNING );
3451 return;
3452 }
3453
3454 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3455 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3456 if ( handle->drainCounter == 0 ) {
3457 handle->drainCounter = 2;
3458 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3459 }
3460 }
3461
3462 stream_.state = STREAM_STOPPED;
3463
3464 ASIOError result = ASIOStop();
3465 if ( result != ASE_OK ) {
3466 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3467 errorText_ = errorStream_.str();
3468 }
3469
3470 if ( result == ASE_OK ) return;
3471 error( RtAudioError::SYSTEM_ERROR );
3472 }
3473
3474 void RtApiAsio :: abortStream()
3475 {
3476 verifyStream();
3477 if ( stream_.state == STREAM_STOPPED ) {
3478 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3479 error( RtAudioError::WARNING );
3480 return;
3481 }
3482
3483 // The following lines were commented-out because some behavior was
3484 // noted where the device buffers need to be zeroed to avoid
3485 // continuing sound, even when the device buffers are completely
3486 // disposed. So now, calling abort is the same as calling stop.
3487 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // handle->drainCounter = 2;
3489 stopStream();
3490 }
3491
3492 // This function will be called by a spawned thread when the user
3493 // callback function signals that the stream should be stopped or
3494 // aborted. It is necessary to handle it this way because the
3495 // callbackEvent() function must return before the ASIOStop()
3496 // function will return.
3497 static unsigned __stdcall asioStopStream( void *ptr )
3498 {
3499 CallbackInfo *info = (CallbackInfo *) ptr;
3500 RtApiAsio *object = (RtApiAsio *) info->object;
3501
3502 object->stopStream();
3503 _endthreadex( 0 );
3504 return 0;
3505 }
3506
3507 bool RtApiAsio :: callbackEvent( long bufferIndex )
3508 {
3509 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3510 if ( stream_.state == STREAM_CLOSED ) {
3511 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3512 error( RtAudioError::WARNING );
3513 return FAILURE;
3514 }
3515
3516 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3517 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3518
3519 // Check if we were draining the stream and signal if finished.
3520 if ( handle->drainCounter > 3 ) {
3521
3522 stream_.state = STREAM_STOPPING;
3523 if ( handle->internalDrain == false )
3524 SetEvent( handle->condition );
3525 else { // spawn a thread to stop the stream
3526 unsigned threadId;
3527 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3528 &stream_.callbackInfo, 0, &threadId );
3529 }
3530 return SUCCESS;
3531 }
3532
3533 // Invoke user callback to get fresh output data UNLESS we are
3534 // draining stream.
3535 if ( handle->drainCounter == 0 ) {
3536 RtAudioCallback callback = (RtAudioCallback) info->callback;
3537 double streamTime = getStreamTime();
3538 RtAudioStreamStatus status = 0;
3539 if ( stream_.mode != INPUT && asioXRun == true ) {
3540 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3541 asioXRun = false;
3542 }
3543 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3544 status |= RTAUDIO_INPUT_OVERFLOW;
3545 asioXRun = false;
3546 }
3547 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3548 stream_.bufferSize, streamTime, status, info->userData );
3549 if ( cbReturnValue == 2 ) {
3550 stream_.state = STREAM_STOPPING;
3551 handle->drainCounter = 2;
3552 unsigned threadId;
3553 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3554 &stream_.callbackInfo, 0, &threadId );
3555 return SUCCESS;
3556 }
3557 else if ( cbReturnValue == 1 ) {
3558 handle->drainCounter = 1;
3559 handle->internalDrain = true;
3560 }
3561 }
3562
3563 unsigned int nChannels, bufferBytes, i, j;
3564 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3565 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3566
3567 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3568
3569 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3570
3571 for ( i=0, j=0; i<nChannels; i++ ) {
3572 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3573 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3574 }
3575
3576 }
3577 else if ( stream_.doConvertBuffer[0] ) {
3578
3579 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3580 if ( stream_.doByteSwap[0] )
3581 byteSwapBuffer( stream_.deviceBuffer,
3582 stream_.bufferSize * stream_.nDeviceChannels[0],
3583 stream_.deviceFormat[0] );
3584
3585 for ( i=0, j=0; i<nChannels; i++ ) {
3586 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3587 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3588 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3589 }
3590
3591 }
3592 else {
3593
3594 if ( stream_.doByteSwap[0] )
3595 byteSwapBuffer( stream_.userBuffer[0],
3596 stream_.bufferSize * stream_.nUserChannels[0],
3597 stream_.userFormat );
3598
3599 for ( i=0, j=0; i<nChannels; i++ ) {
3600 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3601 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3602 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3603 }
3604
3605 }
3606 }
3607
3608 // Don't bother draining input
3609 if ( handle->drainCounter ) {
3610 handle->drainCounter++;
3611 goto unlock;
3612 }
3613
3614 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3615
3616 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3617
3618 if (stream_.doConvertBuffer[1]) {
3619
3620 // Always interleave ASIO input data.
3621 for ( i=0, j=0; i<nChannels; i++ ) {
3622 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3623 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3624 handle->bufferInfos[i].buffers[bufferIndex],
3625 bufferBytes );
3626 }
3627
3628 if ( stream_.doByteSwap[1] )
3629 byteSwapBuffer( stream_.deviceBuffer,
3630 stream_.bufferSize * stream_.nDeviceChannels[1],
3631 stream_.deviceFormat[1] );
3632 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3633
3634 }
3635 else {
3636 for ( i=0, j=0; i<nChannels; i++ ) {
3637 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3638 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3639 handle->bufferInfos[i].buffers[bufferIndex],
3640 bufferBytes );
3641 }
3642 }
3643
3644 if ( stream_.doByteSwap[1] )
3645 byteSwapBuffer( stream_.userBuffer[1],
3646 stream_.bufferSize * stream_.nUserChannels[1],
3647 stream_.userFormat );
3648 }
3649 }
3650
3651 unlock:
3652 // The following call was suggested by Malte Clasen. While the API
3653 // documentation indicates it should not be required, some device
3654 // drivers apparently do not function correctly without it.
3655 ASIOOutputReady();
3656
3657 RtApi::tickStreamTime();
3658 return SUCCESS;
3659 }
3660
3661 static void sampleRateChanged( ASIOSampleRate sRate )
3662 {
3663 // The ASIO documentation says that this usually only happens during
3664 // external sync. Audio processing is not stopped by the driver,
3665 // actual sample rate might not have even changed, maybe only the
3666 // sample rate status of an AES/EBU or S/PDIF digital input at the
3667 // audio device.
3668
3669 RtApi *object = (RtApi *) asioCallbackInfo->object;
3670 try {
3671 object->stopStream();
3672 }
3673 catch ( RtAudioError &exception ) {
3674 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3675 return;
3676 }
3677
3678 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3679 }
3680
3681 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3682 {
3683 long ret = 0;
3684
3685 switch( selector ) {
3686 case kAsioSelectorSupported:
3687 if ( value == kAsioResetRequest
3688 || value == kAsioEngineVersion
3689 || value == kAsioResyncRequest
3690 || value == kAsioLatenciesChanged
3691 // The following three were added for ASIO 2.0, you don't
3692 // necessarily have to support them.
3693 || value == kAsioSupportsTimeInfo
3694 || value == kAsioSupportsTimeCode
3695 || value == kAsioSupportsInputMonitor)
3696 ret = 1L;
3697 break;
3698 case kAsioResetRequest:
3699 // Defer the task and perform the reset of the driver during the
3700 // next "safe" situation. You cannot reset the driver right now,
3701 // as this code is called from the driver. Reset the driver is
3702 // done by completely destruct is. I.e. ASIOStop(),
3703 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3704 // driver again.
3705 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3706 ret = 1L;
3707 break;
3708 case kAsioResyncRequest:
3709 // This informs the application that the driver encountered some
3710 // non-fatal data loss. It is used for synchronization purposes
3711 // of different media. Added mainly to work around the Win16Mutex
3712 // problems in Windows 95/98 with the Windows Multimedia system,
3713 // which could lose data because the Mutex was held too long by
3714 // another thread. However a driver can issue it in other
3715 // situations, too.
3716 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3717 asioXRun = true;
3718 ret = 1L;
3719 break;
3720 case kAsioLatenciesChanged:
3721 // This will inform the host application that the drivers were
3722 // latencies changed. Beware, it this does not mean that the
3723 // buffer sizes have changed! You might need to update internal
3724 // delay data.
3725 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3726 ret = 1L;
3727 break;
3728 case kAsioEngineVersion:
3729 // Return the supported ASIO version of the host application. If
3730 // a host application does not implement this selector, ASIO 1.0
3731 // is assumed by the driver.
3732 ret = 2L;
3733 break;
3734 case kAsioSupportsTimeInfo:
3735 // Informs the driver whether the
3736 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3737 // For compatibility with ASIO 1.0 drivers the host application
3738 // should always support the "old" bufferSwitch method, too.
3739 ret = 0;
3740 break;
3741 case kAsioSupportsTimeCode:
3742 // Informs the driver whether application is interested in time
3743 // code info. If an application does not need to know about time
3744 // code, the driver has less work to do.
3745 ret = 0;
3746 break;
3747 }
3748 return ret;
3749 }
3750
3751 static const char* getAsioErrorString( ASIOError result )
3752 {
3753 struct Messages
3754 {
3755 ASIOError value;
3756 const char*message;
3757 };
3758
3759 static const Messages m[] =
3760 {
3761 { ASE_NotPresent, "Hardware input or output is not present or available." },
3762 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3763 { ASE_InvalidParameter, "Invalid input parameter." },
3764 { ASE_InvalidMode, "Invalid mode." },
3765 { ASE_SPNotAdvancing, "Sample position not advancing." },
3766 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3767 { ASE_NoMemory, "Not enough memory to complete the request." }
3768 };
3769
3770 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3771 if ( m[i].value == result ) return m[i].message;
3772
3773 return "Unknown error.";
3774 }
3775
3776 //******************** End of __WINDOWS_ASIO__ *********************//
3777 #endif
3778
3779
3780 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3781
3782 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3783 // - Introduces support for the Windows WASAPI API
3784 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3785 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3786 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3787
3788 #ifndef INITGUID
3789 #define INITGUID
3790 #endif
3791
3792 #include <mfapi.h>
3793 #include <mferror.h>
3794 #include <mfplay.h>
3795 #include <mftransform.h>
3796 #include <wmcodecdsp.h>
3797
3798 #include <audioclient.h>
3799 #include <avrt.h>
3800 #include <mmdeviceapi.h>
3801 #include <functiondiscoverykeys_devpkey.h>
3802
3803 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3804 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3805 #endif
3806
3807 #ifndef MFSTARTUP_NOSOCKET
3808 #define MFSTARTUP_NOSOCKET 0x1
3809 #endif
3810
3811 #ifdef _MSC_VER
3812 #pragma comment( lib, "ksuser" )
3813 #pragma comment( lib, "mfplat.lib" )
3814 #pragma comment( lib, "mfuuid.lib" )
3815 #pragma comment( lib, "wmcodecdspuuid" )
3816 #endif
3817
3818 //=============================================================================
3819
3820 #define SAFE_RELEASE( objectPtr )\
3821 if ( objectPtr )\
3822 {\
3823 objectPtr->Release();\
3824 objectPtr = NULL;\
3825 }
3826
3827 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3828
3829 //-----------------------------------------------------------------------------
3830
3831 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3832 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3833 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3834 // provide intermediate storage for read / write synchronization.
3835 class WasapiBuffer
3836 {
3837 public:
3838 WasapiBuffer()
3839 : buffer_( NULL ),
3840 bufferSize_( 0 ),
3841 inIndex_( 0 ),
3842 outIndex_( 0 ) {}
3843
3844 ~WasapiBuffer() {
3845 free( buffer_ );
3846 }
3847
3848 // sets the length of the internal ring buffer
3849 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3850 free( buffer_ );
3851
3852 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3853
3854 bufferSize_ = bufferSize;
3855 inIndex_ = 0;
3856 outIndex_ = 0;
3857 }
3858
3859 // attempt to push a buffer into the ring buffer at the current "in" index
3860 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3861 {
3862 if ( !buffer || // incoming buffer is NULL
3863 bufferSize == 0 || // incoming buffer has no data
3864 bufferSize > bufferSize_ ) // incoming buffer too large
3865 {
3866 return false;
3867 }
3868
3869 unsigned int relOutIndex = outIndex_;
3870 unsigned int inIndexEnd = inIndex_ + bufferSize;
3871 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3872 relOutIndex += bufferSize_;
3873 }
3874
3875 // the "IN" index CAN BEGIN at the "OUT" index
3876 // the "IN" index CANNOT END at the "OUT" index
3877 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3878 return false; // not enough space between "in" index and "out" index
3879 }
3880
3881 // copy buffer from external to internal
3882 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3883 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3884 int fromInSize = bufferSize - fromZeroSize;
3885
3886 switch( format )
3887 {
3888 case RTAUDIO_SINT8:
3889 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3890 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3891 break;
3892 case RTAUDIO_SINT16:
3893 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3894 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3895 break;
3896 case RTAUDIO_SINT24:
3897 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3898 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3899 break;
3900 case RTAUDIO_SINT32:
3901 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3902 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3903 break;
3904 case RTAUDIO_FLOAT32:
3905 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3906 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3907 break;
3908 case RTAUDIO_FLOAT64:
3909 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3910 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3911 break;
3912 }
3913
3914 // update "in" index
3915 inIndex_ += bufferSize;
3916 inIndex_ %= bufferSize_;
3917
3918 return true;
3919 }
3920
3921 // attempt to pull a buffer from the ring buffer from the current "out" index
3922 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3923 {
3924 if ( !buffer || // incoming buffer is NULL
3925 bufferSize == 0 || // incoming buffer has no data
3926 bufferSize > bufferSize_ ) // incoming buffer too large
3927 {
3928 return false;
3929 }
3930
3931 unsigned int relInIndex = inIndex_;
3932 unsigned int outIndexEnd = outIndex_ + bufferSize;
3933 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3934 relInIndex += bufferSize_;
3935 }
3936
3937 // the "OUT" index CANNOT BEGIN at the "IN" index
3938 // the "OUT" index CAN END at the "IN" index
3939 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3940 return false; // not enough space between "out" index and "in" index
3941 }
3942
3943 // copy buffer from internal to external
3944 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3945 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3946 int fromOutSize = bufferSize - fromZeroSize;
3947
3948 switch( format )
3949 {
3950 case RTAUDIO_SINT8:
3951 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3952 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3953 break;
3954 case RTAUDIO_SINT16:
3955 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3956 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3957 break;
3958 case RTAUDIO_SINT24:
3959 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3960 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3961 break;
3962 case RTAUDIO_SINT32:
3963 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3964 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3965 break;
3966 case RTAUDIO_FLOAT32:
3967 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3968 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3969 break;
3970 case RTAUDIO_FLOAT64:
3971 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3972 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3973 break;
3974 }
3975
3976 // update "out" index
3977 outIndex_ += bufferSize;
3978 outIndex_ %= bufferSize_;
3979
3980 return true;
3981 }
3982
3983 private:
3984 char* buffer_;
3985 unsigned int bufferSize_;
3986 unsigned int inIndex_;
3987 unsigned int outIndex_;
3988 };
3989
3990 //-----------------------------------------------------------------------------
3991
3992 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3993 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3994 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3995 class WasapiResampler
3996 {
3997 public:
3998 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3999 unsigned int inSampleRate, unsigned int outSampleRate )
4000 : _bytesPerSample( bitsPerSample / 8 )
4001 , _channelCount( channelCount )
4002 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4003 , _transformUnk( NULL )
4004 , _transform( NULL )
4005 , _mediaType( NULL )
4006 , _inputMediaType( NULL )
4007 , _outputMediaType( NULL )
4008
4009 #ifdef __IWMResamplerProps_FWD_DEFINED__
4010 , _resamplerProps( NULL )
4011 #endif
4012 {
4013 // 1. Initialization
4014
4015 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4016
4017 // 2. Create Resampler Transform Object
4018
4019 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4020 IID_IUnknown, ( void** ) &_transformUnk );
4021
4022 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4023
4024 #ifdef __IWMResamplerProps_FWD_DEFINED__
4025 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4026 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4027 #endif
4028
4029 // 3. Specify input / output format
4030
4031 MFCreateMediaType( &_mediaType );
4032 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4033 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4034 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4035 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4036 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4037 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4038 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4039 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4040
4041 MFCreateMediaType( &_inputMediaType );
4042 _mediaType->CopyAllItems( _inputMediaType );
4043
4044 _transform->SetInputType( 0, _inputMediaType, 0 );
4045
4046 MFCreateMediaType( &_outputMediaType );
4047 _mediaType->CopyAllItems( _outputMediaType );
4048
4049 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4050 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4051
4052 _transform->SetOutputType( 0, _outputMediaType, 0 );
4053
4054 // 4. Send stream start messages to Resampler
4055
4056 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4057 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4058 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4059 }
4060
4061 ~WasapiResampler()
4062 {
4063 // 8. Send stream stop messages to Resampler
4064
4065 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4066 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4067
4068 // 9. Cleanup
4069
4070 MFShutdown();
4071
4072 SAFE_RELEASE( _transformUnk );
4073 SAFE_RELEASE( _transform );
4074 SAFE_RELEASE( _mediaType );
4075 SAFE_RELEASE( _inputMediaType );
4076 SAFE_RELEASE( _outputMediaType );
4077
4078 #ifdef __IWMResamplerProps_FWD_DEFINED__
4079 SAFE_RELEASE( _resamplerProps );
4080 #endif
4081 }
4082
4083 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4084 {
4085 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4086 if ( _sampleRatio == 1 )
4087 {
4088 // no sample rate conversion required
4089 memcpy( outBuffer, inBuffer, inputBufferSize );
4090 outSampleCount = inSampleCount;
4091 return;
4092 }
4093
4094 unsigned int outputBufferSize = 0;
4095 if ( maxOutSampleCount != -1 )
4096 {
4097 outputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4098 }
4099 else
4100 {
4101 outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4102 }
4103
4104 IMFMediaBuffer* rInBuffer;
4105 IMFSample* rInSample;
4106 BYTE* rInByteBuffer = NULL;
4107
4108 // 5. Create Sample object from input data
4109
4110 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4111
4112 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4113 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4114 rInBuffer->Unlock();
4115 rInByteBuffer = NULL;
4116
4117 rInBuffer->SetCurrentLength( inputBufferSize );
4118
4119 MFCreateSample( &rInSample );
4120 rInSample->AddBuffer( rInBuffer );
4121
4122 // 6. Pass input data to Resampler
4123
4124 _transform->ProcessInput( 0, rInSample, 0 );
4125
4126 SAFE_RELEASE( rInBuffer );
4127 SAFE_RELEASE( rInSample );
4128
4129 // 7. Perform sample rate conversion
4130
4131 IMFMediaBuffer* rOutBuffer = NULL;
4132 BYTE* rOutByteBuffer = NULL;
4133
4134 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4135 DWORD rStatus;
4136 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4137
4138 // 7.1 Create Sample object for output data
4139
4140 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4141 MFCreateSample( &( rOutDataBuffer.pSample ) );
4142 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4143 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4144 rOutDataBuffer.dwStreamID = 0;
4145 rOutDataBuffer.dwStatus = 0;
4146 rOutDataBuffer.pEvents = NULL;
4147
4148 // 7.2 Get output data from Resampler
4149
4150 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4151 {
4152 outSampleCount = 0;
4153 SAFE_RELEASE( rOutBuffer );
4154 SAFE_RELEASE( rOutDataBuffer.pSample );
4155 return;
4156 }
4157
4158 // 7.3 Write output data to outBuffer
4159
4160 SAFE_RELEASE( rOutBuffer );
4161 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4162 rOutBuffer->GetCurrentLength( &rBytes );
4163
4164 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4165 memcpy( outBuffer, rOutByteBuffer, rBytes );
4166 rOutBuffer->Unlock();
4167 rOutByteBuffer = NULL;
4168
4169 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4170 SAFE_RELEASE( rOutBuffer );
4171 SAFE_RELEASE( rOutDataBuffer.pSample );
4172 }
4173
4174 private:
4175 unsigned int _bytesPerSample;
4176 unsigned int _channelCount;
4177 float _sampleRatio;
4178
4179 IUnknown* _transformUnk;
4180 IMFTransform* _transform;
4181 IMFMediaType* _mediaType;
4182 IMFMediaType* _inputMediaType;
4183 IMFMediaType* _outputMediaType;
4184
4185 #ifdef __IWMResamplerProps_FWD_DEFINED__
4186 IWMResamplerProps* _resamplerProps;
4187 #endif
4188 };
4189
4190 //-----------------------------------------------------------------------------
4191
4192 // A structure to hold various information related to the WASAPI implementation.
4193 struct WasapiHandle
4194 {
4195 IAudioClient* captureAudioClient;
4196 IAudioClient* renderAudioClient;
4197 IAudioCaptureClient* captureClient;
4198 IAudioRenderClient* renderClient;
4199 HANDLE captureEvent;
4200 HANDLE renderEvent;
4201
4202 WasapiHandle()
4203 : captureAudioClient( NULL ),
4204 renderAudioClient( NULL ),
4205 captureClient( NULL ),
4206 renderClient( NULL ),
4207 captureEvent( NULL ),
4208 renderEvent( NULL ) {}
4209 };
4210
4211 //=============================================================================
4212
4213 RtApiWasapi::RtApiWasapi()
4214 : coInitialized_( false ), deviceEnumerator_( NULL )
4215 {
4216 // WASAPI can run either apartment or multi-threaded
4217 HRESULT hr = CoInitialize( NULL );
4218 if ( !FAILED( hr ) )
4219 coInitialized_ = true;
4220
4221 // Instantiate device enumerator
4222 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4223 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4224 ( void** ) &deviceEnumerator_ );
4225
4226 // If this runs on an old Windows, it will fail. Ignore and proceed.
4227 if ( FAILED( hr ) )
4228 deviceEnumerator_ = NULL;
4229 }
4230
4231 //-----------------------------------------------------------------------------
4232
4233 RtApiWasapi::~RtApiWasapi()
4234 {
4235 if ( stream_.state != STREAM_CLOSED )
4236 closeStream();
4237
4238 SAFE_RELEASE( deviceEnumerator_ );
4239
4240 // If this object previously called CoInitialize()
4241 if ( coInitialized_ )
4242 CoUninitialize();
4243 }
4244
4245 //=============================================================================
4246
4247 unsigned int RtApiWasapi::getDeviceCount( void )
4248 {
4249 unsigned int captureDeviceCount = 0;
4250 unsigned int renderDeviceCount = 0;
4251
4252 IMMDeviceCollection* captureDevices = NULL;
4253 IMMDeviceCollection* renderDevices = NULL;
4254
4255 if ( !deviceEnumerator_ )
4256 return 0;
4257
4258 // Count capture devices
4259 errorText_.clear();
4260 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4263 goto Exit;
4264 }
4265
4266 hr = captureDevices->GetCount( &captureDeviceCount );
4267 if ( FAILED( hr ) ) {
4268 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4269 goto Exit;
4270 }
4271
4272 // Count render devices
4273 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4276 goto Exit;
4277 }
4278
4279 hr = renderDevices->GetCount( &renderDeviceCount );
4280 if ( FAILED( hr ) ) {
4281 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4282 goto Exit;
4283 }
4284
4285 Exit:
4286 // release all references
4287 SAFE_RELEASE( captureDevices );
4288 SAFE_RELEASE( renderDevices );
4289
4290 if ( errorText_.empty() )
4291 return captureDeviceCount + renderDeviceCount;
4292
4293 error( RtAudioError::DRIVER_ERROR );
4294 return 0;
4295 }
4296
4297 //-----------------------------------------------------------------------------
4298
4299 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4300 {
4301 RtAudio::DeviceInfo info;
4302 unsigned int captureDeviceCount = 0;
4303 unsigned int renderDeviceCount = 0;
4304 std::string defaultDeviceName;
4305 bool isCaptureDevice = false;
4306
4307 PROPVARIANT deviceNameProp;
4308 PROPVARIANT defaultDeviceNameProp;
4309
4310 IMMDeviceCollection* captureDevices = NULL;
4311 IMMDeviceCollection* renderDevices = NULL;
4312 IMMDevice* devicePtr = NULL;
4313 IMMDevice* defaultDevicePtr = NULL;
4314 IAudioClient* audioClient = NULL;
4315 IPropertyStore* devicePropStore = NULL;
4316 IPropertyStore* defaultDevicePropStore = NULL;
4317
4318 WAVEFORMATEX* deviceFormat = NULL;
4319 WAVEFORMATEX* closestMatchFormat = NULL;
4320
4321 // probed
4322 info.probed = false;
4323
4324 // Count capture devices
4325 errorText_.clear();
4326 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4327 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4330 goto Exit;
4331 }
4332
4333 hr = captureDevices->GetCount( &captureDeviceCount );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4336 goto Exit;
4337 }
4338
4339 // Count render devices
4340 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4343 goto Exit;
4344 }
4345
4346 hr = renderDevices->GetCount( &renderDeviceCount );
4347 if ( FAILED( hr ) ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4349 goto Exit;
4350 }
4351
4352 // validate device index
4353 if ( device >= captureDeviceCount + renderDeviceCount ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4355 errorType = RtAudioError::INVALID_USE;
4356 goto Exit;
4357 }
4358
4359 // determine whether index falls within capture or render devices
4360 if ( device >= renderDeviceCount ) {
4361 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4364 goto Exit;
4365 }
4366 isCaptureDevice = true;
4367 }
4368 else {
4369 hr = renderDevices->Item( device, &devicePtr );
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4372 goto Exit;
4373 }
4374 isCaptureDevice = false;
4375 }
4376
4377 // get default device name
4378 if ( isCaptureDevice ) {
4379 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4382 goto Exit;
4383 }
4384 }
4385 else {
4386 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4389 goto Exit;
4390 }
4391 }
4392
4393 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4394 if ( FAILED( hr ) ) {
4395 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4396 goto Exit;
4397 }
4398 PropVariantInit( &defaultDeviceNameProp );
4399
4400 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4403 goto Exit;
4404 }
4405
4406 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4407
4408 // name
4409 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4412 goto Exit;
4413 }
4414
4415 PropVariantInit( &deviceNameProp );
4416
4417 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4418 if ( FAILED( hr ) ) {
4419 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4420 goto Exit;
4421 }
4422
4423 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4424
4425 // is default
4426 if ( isCaptureDevice ) {
4427 info.isDefaultInput = info.name == defaultDeviceName;
4428 info.isDefaultOutput = false;
4429 }
4430 else {
4431 info.isDefaultInput = false;
4432 info.isDefaultOutput = info.name == defaultDeviceName;
4433 }
4434
4435 // channel count
4436 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4439 goto Exit;
4440 }
4441
4442 hr = audioClient->GetMixFormat( &deviceFormat );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4445 goto Exit;
4446 }
4447
4448 if ( isCaptureDevice ) {
4449 info.inputChannels = deviceFormat->nChannels;
4450 info.outputChannels = 0;
4451 info.duplexChannels = 0;
4452 }
4453 else {
4454 info.inputChannels = 0;
4455 info.outputChannels = deviceFormat->nChannels;
4456 info.duplexChannels = 0;
4457 }
4458
4459 // sample rates
4460 info.sampleRates.clear();
4461
4462 // allow support for all sample rates as we have a built-in sample rate converter
4463 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4464 info.sampleRates.push_back( SAMPLE_RATES[i] );
4465 }
4466 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4467
4468 // native format
4469 info.nativeFormats = 0;
4470
4471 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4472 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4473 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4474 {
4475 if ( deviceFormat->wBitsPerSample == 32 ) {
4476 info.nativeFormats |= RTAUDIO_FLOAT32;
4477 }
4478 else if ( deviceFormat->wBitsPerSample == 64 ) {
4479 info.nativeFormats |= RTAUDIO_FLOAT64;
4480 }
4481 }
4482 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4483 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4484 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4485 {
4486 if ( deviceFormat->wBitsPerSample == 8 ) {
4487 info.nativeFormats |= RTAUDIO_SINT8;
4488 }
4489 else if ( deviceFormat->wBitsPerSample == 16 ) {
4490 info.nativeFormats |= RTAUDIO_SINT16;
4491 }
4492 else if ( deviceFormat->wBitsPerSample == 24 ) {
4493 info.nativeFormats |= RTAUDIO_SINT24;
4494 }
4495 else if ( deviceFormat->wBitsPerSample == 32 ) {
4496 info.nativeFormats |= RTAUDIO_SINT32;
4497 }
4498 }
4499
4500 // probed
4501 info.probed = true;
4502
4503 Exit:
4504 // release all references
4505 PropVariantClear( &deviceNameProp );
4506 PropVariantClear( &defaultDeviceNameProp );
4507
4508 SAFE_RELEASE( captureDevices );
4509 SAFE_RELEASE( renderDevices );
4510 SAFE_RELEASE( devicePtr );
4511 SAFE_RELEASE( defaultDevicePtr );
4512 SAFE_RELEASE( audioClient );
4513 SAFE_RELEASE( devicePropStore );
4514 SAFE_RELEASE( defaultDevicePropStore );
4515
4516 CoTaskMemFree( deviceFormat );
4517 CoTaskMemFree( closestMatchFormat );
4518
4519 if ( !errorText_.empty() )
4520 error( errorType );
4521 return info;
4522 }
4523
4524 //-----------------------------------------------------------------------------
4525
4526 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4527 {
4528 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4529 if ( getDeviceInfo( i ).isDefaultOutput ) {
4530 return i;
4531 }
4532 }
4533
4534 return 0;
4535 }
4536
4537 //-----------------------------------------------------------------------------
4538
4539 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4540 {
4541 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4542 if ( getDeviceInfo( i ).isDefaultInput ) {
4543 return i;
4544 }
4545 }
4546
4547 return 0;
4548 }
4549
4550 //-----------------------------------------------------------------------------
4551
4552 void RtApiWasapi::closeStream( void )
4553 {
4554 if ( stream_.state == STREAM_CLOSED ) {
4555 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4556 error( RtAudioError::WARNING );
4557 return;
4558 }
4559
4560 if ( stream_.state != STREAM_STOPPED )
4561 stopStream();
4562
4563 // clean up stream memory
4564 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4565 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4566
4567 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4568 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4569
4570 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4571 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4572
4573 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4574 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4575
4576 delete ( WasapiHandle* ) stream_.apiHandle;
4577 stream_.apiHandle = NULL;
4578
4579 for ( int i = 0; i < 2; i++ ) {
4580 if ( stream_.userBuffer[i] ) {
4581 free( stream_.userBuffer[i] );
4582 stream_.userBuffer[i] = 0;
4583 }
4584 }
4585
4586 if ( stream_.deviceBuffer ) {
4587 free( stream_.deviceBuffer );
4588 stream_.deviceBuffer = 0;
4589 }
4590
4591 // update stream state
4592 stream_.state = STREAM_CLOSED;
4593 }
4594
4595 //-----------------------------------------------------------------------------
4596
4597 void RtApiWasapi::startStream( void )
4598 {
4599 verifyStream();
4600
4601 if ( stream_.state == STREAM_RUNNING ) {
4602 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4603 error( RtAudioError::WARNING );
4604 return;
4605 }
4606
4607 #if defined( HAVE_GETTIMEOFDAY )
4608 gettimeofday( &stream_.lastTickTimestamp, NULL );
4609 #endif
4610
4611 // update stream state
4612 stream_.state = STREAM_RUNNING;
4613
4614 // create WASAPI stream thread
4615 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4616
4617 if ( !stream_.callbackInfo.thread ) {
4618 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4619 error( RtAudioError::THREAD_ERROR );
4620 }
4621 else {
4622 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4623 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4624 }
4625 }
4626
4627 //-----------------------------------------------------------------------------
4628
4629 void RtApiWasapi::stopStream( void )
4630 {
4631 verifyStream();
4632
4633 if ( stream_.state == STREAM_STOPPED ) {
4634 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4635 error( RtAudioError::WARNING );
4636 return;
4637 }
4638
4639 // inform stream thread by setting stream state to STREAM_STOPPING
4640 stream_.state = STREAM_STOPPING;
4641
4642 // wait until stream thread is stopped
4643 for (int i=0; i < 2 && stream_.state != STREAM_STOPPED; ++i ) {
4644 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4645 }
4646
4647 // close thread handle
4648 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4649 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4650 error( RtAudioError::THREAD_ERROR );
4651 return;
4652 }
4653
4654 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4655 }
4656
4657 //-----------------------------------------------------------------------------
4658
4659 void RtApiWasapi::abortStream( void )
4660 {
4661 verifyStream();
4662
4663 if ( stream_.state == STREAM_STOPPED ) {
4664 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4665 error( RtAudioError::WARNING );
4666 return;
4667 }
4668
4669 // inform stream thread by setting stream state to STREAM_STOPPING
4670 stream_.state = STREAM_STOPPING;
4671
4672 // wait until stream thread is stopped
4673 while ( stream_.state != STREAM_STOPPED ) {
4674 Sleep( 1 );
4675 }
4676
4677 // close thread handle
4678 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4679 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4680 error( RtAudioError::THREAD_ERROR );
4681 return;
4682 }
4683
4684 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4685 }
4686
4687 //-----------------------------------------------------------------------------
4688
4689 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4690 unsigned int firstChannel, unsigned int sampleRate,
4691 RtAudioFormat format, unsigned int* bufferSize,
4692 RtAudio::StreamOptions* options )
4693 {
4694 bool methodResult = FAILURE;
4695 unsigned int captureDeviceCount = 0;
4696 unsigned int renderDeviceCount = 0;
4697
4698 IMMDeviceCollection* captureDevices = NULL;
4699 IMMDeviceCollection* renderDevices = NULL;
4700 IMMDevice* devicePtr = NULL;
4701 WAVEFORMATEX* deviceFormat = NULL;
4702 unsigned int bufferBytes;
4703 stream_.state = STREAM_STOPPED;
4704
4705 // create API Handle if not already created
4706 if ( !stream_.apiHandle )
4707 stream_.apiHandle = ( void* ) new WasapiHandle();
4708
4709 // Count capture devices
4710 errorText_.clear();
4711 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4712 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4713 if ( FAILED( hr ) ) {
4714 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4715 goto Exit;
4716 }
4717
4718 hr = captureDevices->GetCount( &captureDeviceCount );
4719 if ( FAILED( hr ) ) {
4720 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4721 goto Exit;
4722 }
4723
4724 // Count render devices
4725 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4726 if ( FAILED( hr ) ) {
4727 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4728 goto Exit;
4729 }
4730
4731 hr = renderDevices->GetCount( &renderDeviceCount );
4732 if ( FAILED( hr ) ) {
4733 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4734 goto Exit;
4735 }
4736
4737 // validate device index
4738 if ( device >= captureDeviceCount + renderDeviceCount ) {
4739 errorType = RtAudioError::INVALID_USE;
4740 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4741 goto Exit;
4742 }
4743
4744 // if device index falls within capture devices
4745 if ( device >= renderDeviceCount ) {
4746 if ( mode != INPUT ) {
4747 errorType = RtAudioError::INVALID_USE;
4748 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4749 goto Exit;
4750 }
4751
4752 // retrieve captureAudioClient from devicePtr
4753 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4754
4755 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4756 if ( FAILED( hr ) ) {
4757 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4758 goto Exit;
4759 }
4760
4761 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4762 NULL, ( void** ) &captureAudioClient );
4763 if ( FAILED( hr ) ) {
4764 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4765 goto Exit;
4766 }
4767
4768 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4771 goto Exit;
4772 }
4773
4774 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4775 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4776 }
4777
4778 // if device index falls within render devices and is configured for loopback
4779 if ( device < renderDeviceCount && mode == INPUT )
4780 {
4781 // if renderAudioClient is not initialised, initialise it now
4782 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4783 if ( !renderAudioClient )
4784 {
4785 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4786 }
4787
4788 // retrieve captureAudioClient from devicePtr
4789 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4790
4791 hr = renderDevices->Item( device, &devicePtr );
4792 if ( FAILED( hr ) ) {
4793 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4794 goto Exit;
4795 }
4796
4797 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4798 NULL, ( void** ) &captureAudioClient );
4799 if ( FAILED( hr ) ) {
4800 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4801 goto Exit;
4802 }
4803
4804 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4805 if ( FAILED( hr ) ) {
4806 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4807 goto Exit;
4808 }
4809
4810 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4811 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4812 }
4813
4814 // if device index falls within render devices and is configured for output
4815 if ( device < renderDeviceCount && mode == OUTPUT )
4816 {
4817 // if renderAudioClient is already initialised, don't initialise it again
4818 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4819 if ( renderAudioClient )
4820 {
4821 methodResult = SUCCESS;
4822 goto Exit;
4823 }
4824
4825 hr = renderDevices->Item( device, &devicePtr );
4826 if ( FAILED( hr ) ) {
4827 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4828 goto Exit;
4829 }
4830
4831 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4832 NULL, ( void** ) &renderAudioClient );
4833 if ( FAILED( hr ) ) {
4834 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4835 goto Exit;
4836 }
4837
4838 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4839 if ( FAILED( hr ) ) {
4840 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4841 goto Exit;
4842 }
4843
4844 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4845 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4846 }
4847
4848 // fill stream data
4849 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4850 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4851 stream_.mode = DUPLEX;
4852 }
4853 else {
4854 stream_.mode = mode;
4855 }
4856
4857 stream_.device[mode] = device;
4858 stream_.doByteSwap[mode] = false;
4859 stream_.sampleRate = sampleRate;
4860 stream_.bufferSize = *bufferSize;
4861 stream_.nBuffers = 1;
4862 stream_.nUserChannels[mode] = channels;
4863 stream_.channelOffset[mode] = firstChannel;
4864 stream_.userFormat = format;
4865 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4866
4867 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4868 stream_.userInterleaved = false;
4869 else
4870 stream_.userInterleaved = true;
4871 stream_.deviceInterleaved[mode] = true;
4872
4873 // Set flags for buffer conversion.
4874 stream_.doConvertBuffer[mode] = false;
4875 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4876 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4877 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4878 stream_.doConvertBuffer[mode] = true;
4879 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4880 stream_.nUserChannels[mode] > 1 )
4881 stream_.doConvertBuffer[mode] = true;
4882
4883 if ( stream_.doConvertBuffer[mode] )
4884 setConvertInfo( mode, firstChannel );
4885
4886 // Allocate necessary internal buffers
4887 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4888
4889 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4890 if ( !stream_.userBuffer[mode] ) {
4891 errorType = RtAudioError::MEMORY_ERROR;
4892 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4893 goto Exit;
4894 }
4895
4896 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4897 stream_.callbackInfo.priority = 15;
4898 else
4899 stream_.callbackInfo.priority = 0;
4900
4901 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4902 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4903
4904 methodResult = SUCCESS;
4905
4906 Exit:
4907 //clean up
4908 SAFE_RELEASE( captureDevices );
4909 SAFE_RELEASE( renderDevices );
4910 SAFE_RELEASE( devicePtr );
4911 CoTaskMemFree( deviceFormat );
4912
4913 // if method failed, close the stream
4914 if ( methodResult == FAILURE )
4915 closeStream();
4916
4917 if ( !errorText_.empty() )
4918 error( errorType );
4919 return methodResult;
4920 }
4921
4922 //=============================================================================
4923
4924 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4925 {
4926 if ( wasapiPtr )
4927 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4928
4929 return 0;
4930 }
4931
4932 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4933 {
4934 if ( wasapiPtr )
4935 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4936
4937 return 0;
4938 }
4939
4940 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4941 {
4942 if ( wasapiPtr )
4943 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4944
4945 return 0;
4946 }
4947
4948 //-----------------------------------------------------------------------------
4949
4950 void RtApiWasapi::wasapiThread()
4951 {
4952 // as this is a new thread, we must CoInitialize it
4953 CoInitialize( NULL );
4954
4955 HRESULT hr;
4956
4957 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4958 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4959 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4960 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4961 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4962 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4963
4964 WAVEFORMATEX* captureFormat = NULL;
4965 WAVEFORMATEX* renderFormat = NULL;
4966 float captureSrRatio = 0.0f;
4967 float renderSrRatio = 0.0f;
4968 WasapiBuffer captureBuffer;
4969 WasapiBuffer renderBuffer;
4970 WasapiResampler* captureResampler = NULL;
4971 WasapiResampler* renderResampler = NULL;
4972
4973 // declare local stream variables
4974 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4975 BYTE* streamBuffer = NULL;
4976 DWORD captureFlags = 0;
4977 unsigned int bufferFrameCount = 0;
4978 unsigned int numFramesPadding = 0;
4979 unsigned int convBufferSize = 0;
4980 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4981 bool callbackPushed = true;
4982 bool callbackPulled = false;
4983 bool callbackStopped = false;
4984 int callbackResult = 0;
4985
4986 // convBuffer is used to store converted buffers between WASAPI and the user
4987 char* convBuffer = NULL;
4988 unsigned int convBuffSize = 0;
4989 unsigned int deviceBuffSize = 0;
4990
4991 std::string errorText;
4992 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4993
4994 // Attempt to assign "Pro Audio" characteristic to thread
4995 HMODULE AvrtDll = LoadLibraryW( L"AVRT.dll" );
4996 if ( AvrtDll ) {
4997 DWORD taskIndex = 0;
4998 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4999 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5000 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5001 FreeLibrary( AvrtDll );
5002 }
5003
5004 // start capture stream if applicable
5005 if ( captureAudioClient ) {
5006 hr = captureAudioClient->GetMixFormat( &captureFormat );
5007 if ( FAILED( hr ) ) {
5008 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5009 goto Exit;
5010 }
5011
5012 // init captureResampler
5013 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5014 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5015 captureFormat->nSamplesPerSec, stream_.sampleRate );
5016
5017 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5018
5019 if ( !captureClient ) {
5020 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5021 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5022 0,
5023 0,
5024 captureFormat,
5025 NULL );
5026 if ( FAILED( hr ) ) {
5027 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5028 goto Exit;
5029 }
5030
5031 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5032 ( void** ) &captureClient );
5033 if ( FAILED( hr ) ) {
5034 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5035 goto Exit;
5036 }
5037
5038 // don't configure captureEvent if in loopback mode
5039 if ( !loopbackEnabled )
5040 {
5041 // configure captureEvent to trigger on every available capture buffer
5042 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5043 if ( !captureEvent ) {
5044 errorType = RtAudioError::SYSTEM_ERROR;
5045 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5046 goto Exit;
5047 }
5048
5049 hr = captureAudioClient->SetEventHandle( captureEvent );
5050 if ( FAILED( hr ) ) {
5051 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5052 goto Exit;
5053 }
5054
5055 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5056 }
5057
5058 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5059
5060 // reset the capture stream
5061 hr = captureAudioClient->Reset();
5062 if ( FAILED( hr ) ) {
5063 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5064 goto Exit;
5065 }
5066
5067 // start the capture stream
5068 hr = captureAudioClient->Start();
5069 if ( FAILED( hr ) ) {
5070 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5071 goto Exit;
5072 }
5073 }
5074
5075 unsigned int inBufferSize = 0;
5076 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5077 if ( FAILED( hr ) ) {
5078 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5079 goto Exit;
5080 }
5081
5082 // scale outBufferSize according to stream->user sample rate ratio
5083 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5084 inBufferSize *= stream_.nDeviceChannels[INPUT];
5085
5086 // set captureBuffer size
5087 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5088 }
5089
5090 // start render stream if applicable
5091 if ( renderAudioClient ) {
5092 hr = renderAudioClient->GetMixFormat( &renderFormat );
5093 if ( FAILED( hr ) ) {
5094 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5095 goto Exit;
5096 }
5097
5098 // init renderResampler
5099 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5100 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5101 stream_.sampleRate, renderFormat->nSamplesPerSec );
5102
5103 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5104
5105 if ( !renderClient ) {
5106 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5107 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5108 0,
5109 0,
5110 renderFormat,
5111 NULL );
5112 if ( FAILED( hr ) ) {
5113 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5114 goto Exit;
5115 }
5116
5117 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5118 ( void** ) &renderClient );
5119 if ( FAILED( hr ) ) {
5120 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5121 goto Exit;
5122 }
5123
5124 // configure renderEvent to trigger on every available render buffer
5125 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5126 if ( !renderEvent ) {
5127 errorType = RtAudioError::SYSTEM_ERROR;
5128 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5129 goto Exit;
5130 }
5131
5132 hr = renderAudioClient->SetEventHandle( renderEvent );
5133 if ( FAILED( hr ) ) {
5134 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5135 goto Exit;
5136 }
5137
5138 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5139 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5140
5141 // reset the render stream
5142 hr = renderAudioClient->Reset();
5143 if ( FAILED( hr ) ) {
5144 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5145 goto Exit;
5146 }
5147
5148 // start the render stream
5149 hr = renderAudioClient->Start();
5150 if ( FAILED( hr ) ) {
5151 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5152 goto Exit;
5153 }
5154 }
5155
5156 unsigned int outBufferSize = 0;
5157 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5158 if ( FAILED( hr ) ) {
5159 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5160 goto Exit;
5161 }
5162
5163 // scale inBufferSize according to user->stream sample rate ratio
5164 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5165 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5166
5167 // set renderBuffer size
5168 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5169 }
5170
5171 // malloc buffer memory
5172 if ( stream_.mode == INPUT )
5173 {
5174 using namespace std; // for ceilf
5175 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5176 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5177 }
5178 else if ( stream_.mode == OUTPUT )
5179 {
5180 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5181 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5182 }
5183 else if ( stream_.mode == DUPLEX )
5184 {
5185 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5186 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5187 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5188 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5189 }
5190
5191 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5192 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5193 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5194 if ( !convBuffer || !stream_.deviceBuffer ) {
5195 errorType = RtAudioError::MEMORY_ERROR;
5196 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5197 goto Exit;
5198 }
5199
5200 // stream process loop
5201 while ( stream_.state != STREAM_STOPPING ) {
5202 if ( !callbackPulled ) {
5203 // Callback Input
5204 // ==============
5205 // 1. Pull callback buffer from inputBuffer
5206 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5207 // Convert callback buffer to user format
5208
5209 if ( captureAudioClient )
5210 {
5211 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5212
5213 convBufferSize = 0;
5214 while ( convBufferSize < stream_.bufferSize )
5215 {
5216 // Pull callback buffer from inputBuffer
5217 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5218 samplesToPull * stream_.nDeviceChannels[INPUT],
5219 stream_.deviceFormat[INPUT] );
5220
5221 if ( !callbackPulled )
5222 {
5223 break;
5224 }
5225
5226 // Convert callback buffer to user sample rate
5227 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5228 unsigned int convSamples = 0;
5229
5230 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5231 convBuffer,
5232 samplesToPull,
5233 convSamples,
5234 convBufferSize == 0 ? -1 : stream_.bufferSize - convBufferSize );
5235
5236 convBufferSize += convSamples;
5237 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5238 }
5239
5240 if ( callbackPulled )
5241 {
5242 if ( stream_.doConvertBuffer[INPUT] ) {
5243 // Convert callback buffer to user format
5244 convertBuffer( stream_.userBuffer[INPUT],
5245 stream_.deviceBuffer,
5246 stream_.convertInfo[INPUT] );
5247 }
5248 else {
5249 // no further conversion, simple copy deviceBuffer to userBuffer
5250 memcpy( stream_.userBuffer[INPUT],
5251 stream_.deviceBuffer,
5252 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5253 }
5254 }
5255 }
5256 else {
5257 // if there is no capture stream, set callbackPulled flag
5258 callbackPulled = true;
5259 }
5260
5261 // Execute Callback
5262 // ================
5263 // 1. Execute user callback method
5264 // 2. Handle return value from callback
5265
5266 // if callback has not requested the stream to stop
5267 if ( callbackPulled && !callbackStopped ) {
5268 // Execute user callback method
5269 callbackResult = callback( stream_.userBuffer[OUTPUT],
5270 stream_.userBuffer[INPUT],
5271 stream_.bufferSize,
5272 getStreamTime(),
5273 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5274 stream_.callbackInfo.userData );
5275
5276 // tick stream time
5277 RtApi::tickStreamTime();
5278
5279 // Handle return value from callback
5280 if ( callbackResult == 1 ) {
5281 // instantiate a thread to stop this thread
5282 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5283 if ( !threadHandle ) {
5284 errorType = RtAudioError::THREAD_ERROR;
5285 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5286 goto Exit;
5287 }
5288 else if ( !CloseHandle( threadHandle ) ) {
5289 errorType = RtAudioError::THREAD_ERROR;
5290 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5291 goto Exit;
5292 }
5293
5294 callbackStopped = true;
5295 }
5296 else if ( callbackResult == 2 ) {
5297 // instantiate a thread to stop this thread
5298 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5299 if ( !threadHandle ) {
5300 errorType = RtAudioError::THREAD_ERROR;
5301 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5302 goto Exit;
5303 }
5304 else if ( !CloseHandle( threadHandle ) ) {
5305 errorType = RtAudioError::THREAD_ERROR;
5306 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5307 goto Exit;
5308 }
5309
5310 callbackStopped = true;
5311 }
5312 }
5313 }
5314
5315 // Callback Output
5316 // ===============
5317 // 1. Convert callback buffer to stream format
5318 // 2. Convert callback buffer to stream sample rate and channel count
5319 // 3. Push callback buffer into outputBuffer
5320
5321 if ( renderAudioClient && callbackPulled )
5322 {
5323 // if the last call to renderBuffer.PushBuffer() was successful
5324 if ( callbackPushed || convBufferSize == 0 )
5325 {
5326 if ( stream_.doConvertBuffer[OUTPUT] )
5327 {
5328 // Convert callback buffer to stream format
5329 convertBuffer( stream_.deviceBuffer,
5330 stream_.userBuffer[OUTPUT],
5331 stream_.convertInfo[OUTPUT] );
5332
5333 }
5334 else {
5335 // no further conversion, simple copy userBuffer to deviceBuffer
5336 memcpy( stream_.deviceBuffer,
5337 stream_.userBuffer[OUTPUT],
5338 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5339 }
5340
5341 // Convert callback buffer to stream sample rate
5342 renderResampler->Convert( convBuffer,
5343 stream_.deviceBuffer,
5344 stream_.bufferSize,
5345 convBufferSize );
5346 }
5347
5348 // Push callback buffer into outputBuffer
5349 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5350 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5351 stream_.deviceFormat[OUTPUT] );
5352 }
5353 else {
5354 // if there is no render stream, set callbackPushed flag
5355 callbackPushed = true;
5356 }
5357
5358 // Stream Capture
5359 // ==============
5360 // 1. Get capture buffer from stream
5361 // 2. Push capture buffer into inputBuffer
5362 // 3. If 2. was successful: Release capture buffer
5363
5364 if ( captureAudioClient ) {
5365 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5366 if ( !callbackPulled ) {
5367 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5368 }
5369
5370 // Get capture buffer from stream
5371 hr = captureClient->GetBuffer( &streamBuffer,
5372 &bufferFrameCount,
5373 &captureFlags, NULL, NULL );
5374 if ( FAILED( hr ) ) {
5375 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5376 goto Exit;
5377 }
5378
5379 if ( bufferFrameCount != 0 ) {
5380 // Push capture buffer into inputBuffer
5381 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5382 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5383 stream_.deviceFormat[INPUT] ) )
5384 {
5385 // Release capture buffer
5386 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5387 if ( FAILED( hr ) ) {
5388 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5389 goto Exit;
5390 }
5391 }
5392 else
5393 {
5394 // Inform WASAPI that capture was unsuccessful
5395 hr = captureClient->ReleaseBuffer( 0 );
5396 if ( FAILED( hr ) ) {
5397 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5398 goto Exit;
5399 }
5400 }
5401 }
5402 else
5403 {
5404 // Inform WASAPI that capture was unsuccessful
5405 hr = captureClient->ReleaseBuffer( 0 );
5406 if ( FAILED( hr ) ) {
5407 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5408 goto Exit;
5409 }
5410 }
5411 }
5412
5413 // Stream Render
5414 // =============
5415 // 1. Get render buffer from stream
5416 // 2. Pull next buffer from outputBuffer
5417 // 3. If 2. was successful: Fill render buffer with next buffer
5418 // Release render buffer
5419
5420 if ( renderAudioClient ) {
5421 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5422 if ( callbackPulled && !callbackPushed ) {
5423 WaitForSingleObject( renderEvent, INFINITE );
5424 }
5425
5426 // Get render buffer from stream
5427 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5430 goto Exit;
5431 }
5432
5433 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5434 if ( FAILED( hr ) ) {
5435 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5436 goto Exit;
5437 }
5438
5439 bufferFrameCount -= numFramesPadding;
5440
5441 if ( bufferFrameCount != 0 ) {
5442 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5443 if ( FAILED( hr ) ) {
5444 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5445 goto Exit;
5446 }
5447
5448 // Pull next buffer from outputBuffer
5449 // Fill render buffer with next buffer
5450 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5451 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5452 stream_.deviceFormat[OUTPUT] ) )
5453 {
5454 // Release render buffer
5455 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5456 if ( FAILED( hr ) ) {
5457 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5458 goto Exit;
5459 }
5460 }
5461 else
5462 {
5463 // Inform WASAPI that render was unsuccessful
5464 hr = renderClient->ReleaseBuffer( 0, 0 );
5465 if ( FAILED( hr ) ) {
5466 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5467 goto Exit;
5468 }
5469 }
5470 }
5471 else
5472 {
5473 // Inform WASAPI that render was unsuccessful
5474 hr = renderClient->ReleaseBuffer( 0, 0 );
5475 if ( FAILED( hr ) ) {
5476 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5477 goto Exit;
5478 }
5479 }
5480 }
5481
5482 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5483 if ( callbackPushed ) {
5484 // unsetting the callbackPulled flag lets the stream know that
5485 // the audio device is ready for another callback output buffer.
5486 callbackPulled = false;
5487 }
5488
5489 }
5490
5491 Exit:
5492 // clean up
5493 CoTaskMemFree( captureFormat );
5494 CoTaskMemFree( renderFormat );
5495
5496 free ( convBuffer );
5497 delete renderResampler;
5498 delete captureResampler;
5499
5500 CoUninitialize();
5501
5502 // update stream state
5503 stream_.state = STREAM_STOPPED;
5504
5505 if ( !errorText.empty() )
5506 {
5507 errorText_ = errorText;
5508 error( errorType );
5509 }
5510 }
5511
5512 //******************** End of __WINDOWS_WASAPI__ *********************//
5513 #endif
5514
5515
5516 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5517
5518 // Modified by Robin Davies, October 2005
5519 // - Improvements to DirectX pointer chasing.
5520 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5521 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5522 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5523 // Changed device query structure for RtAudio 4.0.7, January 2010
5524
5525 #include <windows.h>
5526 #include <process.h>
5527 #include <mmsystem.h>
5528 #include <mmreg.h>
5529 #include <dsound.h>
5530 #include <assert.h>
5531 #include <algorithm>
5532
5533 #if defined(__MINGW32__)
5534 // missing from latest mingw winapi
5535 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5536 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5537 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5538 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5539 #endif
5540
5541 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5542
5543 #ifdef _MSC_VER // if Microsoft Visual C++
5544 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5545 #endif
5546
5547 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5548 {
5549 if ( pointer > bufferSize ) pointer -= bufferSize;
5550 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5551 if ( pointer < earlierPointer ) pointer += bufferSize;
5552 return pointer >= earlierPointer && pointer < laterPointer;
5553 }
5554
5555 // A structure to hold various information related to the DirectSound
5556 // API implementation.
5557 struct DsHandle {
5558 unsigned int drainCounter; // Tracks callback counts when draining
5559 bool internalDrain; // Indicates if stop is initiated from callback or not.
5560 void *id[2];
5561 void *buffer[2];
5562 bool xrun[2];
5563 UINT bufferPointer[2];
5564 DWORD dsBufferSize[2];
5565 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5566 HANDLE condition;
5567
5568 DsHandle()
5569 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5570 };
5571
5572 // Declarations for utility functions, callbacks, and structures
5573 // specific to the DirectSound implementation.
5574 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5575 LPCTSTR description,
5576 LPCTSTR module,
5577 LPVOID lpContext );
5578
5579 static const char* getErrorString( int code );
5580
5581 static unsigned __stdcall callbackHandler( void *ptr );
5582
5583 struct DsDevice {
5584 LPGUID id[2];
5585 bool validId[2];
5586 bool found;
5587 std::string name;
5588
5589 DsDevice()
5590 : found(false) { validId[0] = false; validId[1] = false; }
5591 };
5592
5593 struct DsProbeData {
5594 bool isInput;
5595 std::vector<struct DsDevice>* dsDevices;
5596 };
5597
5598 RtApiDs :: RtApiDs()
5599 {
5600 // Dsound will run both-threaded. If CoInitialize fails, then just
5601 // accept whatever the mainline chose for a threading model.
5602 coInitialized_ = false;
5603 HRESULT hr = CoInitialize( NULL );
5604 if ( !FAILED( hr ) ) coInitialized_ = true;
5605 }
5606
5607 RtApiDs :: ~RtApiDs()
5608 {
5609 if ( stream_.state != STREAM_CLOSED ) closeStream();
5610 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5611 }
5612
5613 // The DirectSound default output is always the first device.
5614 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5615 {
5616 return 0;
5617 }
5618
5619 // The DirectSound default input is always the first input device,
5620 // which is the first capture device enumerated.
5621 unsigned int RtApiDs :: getDefaultInputDevice( void )
5622 {
5623 return 0;
5624 }
5625
5626 unsigned int RtApiDs :: getDeviceCount( void )
5627 {
5628 // Set query flag for previously found devices to false, so that we
5629 // can check for any devices that have disappeared.
5630 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5631 dsDevices[i].found = false;
5632
5633 // Query DirectSound devices.
5634 struct DsProbeData probeInfo;
5635 probeInfo.isInput = false;
5636 probeInfo.dsDevices = &dsDevices;
5637 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5638 if ( FAILED( result ) ) {
5639 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5640 errorText_ = errorStream_.str();
5641 error( RtAudioError::WARNING );
5642 }
5643
5644 // Query DirectSoundCapture devices.
5645 probeInfo.isInput = true;
5646 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5647 if ( FAILED( result ) ) {
5648 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5649 errorText_ = errorStream_.str();
5650 error( RtAudioError::WARNING );
5651 }
5652
5653 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5654 for ( unsigned int i=0; i<dsDevices.size(); ) {
5655 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5656 else i++;
5657 }
5658
5659 return static_cast<unsigned int>(dsDevices.size());
5660 }
5661
5662 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5663 {
5664 RtAudio::DeviceInfo info;
5665 info.probed = false;
5666
5667 if ( dsDevices.size() == 0 ) {
5668 // Force a query of all devices
5669 getDeviceCount();
5670 if ( dsDevices.size() == 0 ) {
5671 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5672 error( RtAudioError::INVALID_USE );
5673 return info;
5674 }
5675 }
5676
5677 if ( device >= dsDevices.size() ) {
5678 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5679 error( RtAudioError::INVALID_USE );
5680 return info;
5681 }
5682
5683 HRESULT result;
5684 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5685
5686 LPDIRECTSOUND output;
5687 DSCAPS outCaps;
5688 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5689 if ( FAILED( result ) ) {
5690 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5691 errorText_ = errorStream_.str();
5692 error( RtAudioError::WARNING );
5693 goto probeInput;
5694 }
5695
5696 outCaps.dwSize = sizeof( outCaps );
5697 result = output->GetCaps( &outCaps );
5698 if ( FAILED( result ) ) {
5699 output->Release();
5700 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5701 errorText_ = errorStream_.str();
5702 error( RtAudioError::WARNING );
5703 goto probeInput;
5704 }
5705
5706 // Get output channel information.
5707 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5708
5709 // Get sample rate information.
5710 info.sampleRates.clear();
5711 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5712 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5713 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5714 info.sampleRates.push_back( SAMPLE_RATES[k] );
5715
5716 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5717 info.preferredSampleRate = SAMPLE_RATES[k];
5718 }
5719 }
5720
5721 // Get format information.
5722 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5723 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5724
5725 output->Release();
5726
5727 if ( getDefaultOutputDevice() == device )
5728 info.isDefaultOutput = true;
5729
5730 if ( dsDevices[ device ].validId[1] == false ) {
5731 info.name = dsDevices[ device ].name;
5732 info.probed = true;
5733 return info;
5734 }
5735
5736 probeInput:
5737
5738 LPDIRECTSOUNDCAPTURE input;
5739 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5740 if ( FAILED( result ) ) {
5741 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5742 errorText_ = errorStream_.str();
5743 error( RtAudioError::WARNING );
5744 return info;
5745 }
5746
5747 DSCCAPS inCaps;
5748 inCaps.dwSize = sizeof( inCaps );
5749 result = input->GetCaps( &inCaps );
5750 if ( FAILED( result ) ) {
5751 input->Release();
5752 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5753 errorText_ = errorStream_.str();
5754 error( RtAudioError::WARNING );
5755 return info;
5756 }
5757
5758 // Get input channel information.
5759 info.inputChannels = inCaps.dwChannels;
5760
5761 // Get sample rate and format information.
5762 std::vector<unsigned int> rates;
5763 if ( inCaps.dwChannels >= 2 ) {
5764 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5765 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5766 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5767 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5768 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5772
5773 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5774 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5775 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5776 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5777 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5778 }
5779 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5780 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5782 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5783 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5784 }
5785 }
5786 else if ( inCaps.dwChannels == 1 ) {
5787 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5789 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5790 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5791 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5792 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5795
5796 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5797 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5798 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5799 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5800 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5801 }
5802 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5803 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5804 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5806 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5807 }
5808 }
5809 else info.inputChannels = 0; // technically, this would be an error
5810
5811 input->Release();
5812
5813 if ( info.inputChannels == 0 ) return info;
5814
5815 // Copy the supported rates to the info structure but avoid duplication.
5816 bool found;
5817 for ( unsigned int i=0; i<rates.size(); i++ ) {
5818 found = false;
5819 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5820 if ( rates[i] == info.sampleRates[j] ) {
5821 found = true;
5822 break;
5823 }
5824 }
5825 if ( found == false ) info.sampleRates.push_back( rates[i] );
5826 }
5827 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5828
5829 // If device opens for both playback and capture, we determine the channels.
5830 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5831 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5832
5833 if ( device == 0 ) info.isDefaultInput = true;
5834
5835 // Copy name and return.
5836 info.name = dsDevices[ device ].name;
5837 info.probed = true;
5838 return info;
5839 }
5840
5841 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5842 unsigned int firstChannel, unsigned int sampleRate,
5843 RtAudioFormat format, unsigned int *bufferSize,
5844 RtAudio::StreamOptions *options )
5845 {
5846 if ( channels + firstChannel > 2 ) {
5847 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5848 return FAILURE;
5849 }
5850
5851 size_t nDevices = dsDevices.size();
5852 if ( nDevices == 0 ) {
5853 // This should not happen because a check is made before this function is called.
5854 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5855 return FAILURE;
5856 }
5857
5858 if ( device >= nDevices ) {
5859 // This should not happen because a check is made before this function is called.
5860 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5861 return FAILURE;
5862 }
5863
5864 if ( mode == OUTPUT ) {
5865 if ( dsDevices[ device ].validId[0] == false ) {
5866 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5867 errorText_ = errorStream_.str();
5868 return FAILURE;
5869 }
5870 }
5871 else { // mode == INPUT
5872 if ( dsDevices[ device ].validId[1] == false ) {
5873 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5874 errorText_ = errorStream_.str();
5875 return FAILURE;
5876 }
5877 }
5878
5879 // According to a note in PortAudio, using GetDesktopWindow()
5880 // instead of GetForegroundWindow() is supposed to avoid problems
5881 // that occur when the application's window is not the foreground
5882 // window. Also, if the application window closes before the
5883 // DirectSound buffer, DirectSound can crash. In the past, I had
5884 // problems when using GetDesktopWindow() but it seems fine now
5885 // (January 2010). I'll leave it commented here.
5886 // HWND hWnd = GetForegroundWindow();
5887 HWND hWnd = GetDesktopWindow();
5888
5889 // Check the numberOfBuffers parameter and limit the lowest value to
5890 // two. This is a judgement call and a value of two is probably too
5891 // low for capture, but it should work for playback.
5892 int nBuffers = 0;
5893 if ( options ) nBuffers = options->numberOfBuffers;
5894 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5895 if ( nBuffers < 2 ) nBuffers = 3;
5896
5897 // Check the lower range of the user-specified buffer size and set
5898 // (arbitrarily) to a lower bound of 32.
5899 if ( *bufferSize < 32 ) *bufferSize = 32;
5900
5901 // Create the wave format structure. The data format setting will
5902 // be determined later.
5903 WAVEFORMATEX waveFormat;
5904 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5905 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5906 waveFormat.nChannels = channels + firstChannel;
5907 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5908
5909 // Determine the device buffer size. By default, we'll use the value
5910 // defined above (32K), but we will grow it to make allowances for
5911 // very large software buffer sizes.
5912 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5913 DWORD dsPointerLeadTime = 0;
5914
5915 void *ohandle = 0, *bhandle = 0;
5916 HRESULT result;
5917 if ( mode == OUTPUT ) {
5918
5919 LPDIRECTSOUND output;
5920 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5921 if ( FAILED( result ) ) {
5922 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5923 errorText_ = errorStream_.str();
5924 return FAILURE;
5925 }
5926
5927 DSCAPS outCaps;
5928 outCaps.dwSize = sizeof( outCaps );
5929 result = output->GetCaps( &outCaps );
5930 if ( FAILED( result ) ) {
5931 output->Release();
5932 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5933 errorText_ = errorStream_.str();
5934 return FAILURE;
5935 }
5936
5937 // Check channel information.
5938 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5939 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5940 errorText_ = errorStream_.str();
5941 return FAILURE;
5942 }
5943
5944 // Check format information. Use 16-bit format unless not
5945 // supported or user requests 8-bit.
5946 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5947 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5948 waveFormat.wBitsPerSample = 16;
5949 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5950 }
5951 else {
5952 waveFormat.wBitsPerSample = 8;
5953 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5954 }
5955 stream_.userFormat = format;
5956
5957 // Update wave format structure and buffer information.
5958 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5959 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5960 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5961
5962 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5963 while ( dsPointerLeadTime * 2U > dsBufferSize )
5964 dsBufferSize *= 2;
5965
5966 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5967 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5968 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5969 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5970 if ( FAILED( result ) ) {
5971 output->Release();
5972 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5973 errorText_ = errorStream_.str();
5974 return FAILURE;
5975 }
5976
5977 // Even though we will write to the secondary buffer, we need to
5978 // access the primary buffer to set the correct output format
5979 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5980 // buffer description.
5981 DSBUFFERDESC bufferDescription;
5982 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5983 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5984 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5985
5986 // Obtain the primary buffer
5987 LPDIRECTSOUNDBUFFER buffer;
5988 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5989 if ( FAILED( result ) ) {
5990 output->Release();
5991 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5992 errorText_ = errorStream_.str();
5993 return FAILURE;
5994 }
5995
5996 // Set the primary DS buffer sound format.
5997 result = buffer->SetFormat( &waveFormat );
5998 if ( FAILED( result ) ) {
5999 output->Release();
6000 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6001 errorText_ = errorStream_.str();
6002 return FAILURE;
6003 }
6004
6005 // Setup the secondary DS buffer description.
6006 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6007 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6008 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6009 DSBCAPS_GLOBALFOCUS |
6010 DSBCAPS_GETCURRENTPOSITION2 |
6011 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6012 bufferDescription.dwBufferBytes = dsBufferSize;
6013 bufferDescription.lpwfxFormat = &waveFormat;
6014
6015 // Try to create the secondary DS buffer. If that doesn't work,
6016 // try to use software mixing. Otherwise, there's a problem.
6017 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6018 if ( FAILED( result ) ) {
6019 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6020 DSBCAPS_GLOBALFOCUS |
6021 DSBCAPS_GETCURRENTPOSITION2 |
6022 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6023 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6024 if ( FAILED( result ) ) {
6025 output->Release();
6026 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6027 errorText_ = errorStream_.str();
6028 return FAILURE;
6029 }
6030 }
6031
6032 // Get the buffer size ... might be different from what we specified.
6033 DSBCAPS dsbcaps;
6034 dsbcaps.dwSize = sizeof( DSBCAPS );
6035 result = buffer->GetCaps( &dsbcaps );
6036 if ( FAILED( result ) ) {
6037 output->Release();
6038 buffer->Release();
6039 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6040 errorText_ = errorStream_.str();
6041 return FAILURE;
6042 }
6043
6044 dsBufferSize = dsbcaps.dwBufferBytes;
6045
6046 // Lock the DS buffer
6047 LPVOID audioPtr;
6048 DWORD dataLen;
6049 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6050 if ( FAILED( result ) ) {
6051 output->Release();
6052 buffer->Release();
6053 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6054 errorText_ = errorStream_.str();
6055 return FAILURE;
6056 }
6057
6058 // Zero the DS buffer
6059 ZeroMemory( audioPtr, dataLen );
6060
6061 // Unlock the DS buffer
6062 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6063 if ( FAILED( result ) ) {
6064 output->Release();
6065 buffer->Release();
6066 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6067 errorText_ = errorStream_.str();
6068 return FAILURE;
6069 }
6070
6071 ohandle = (void *) output;
6072 bhandle = (void *) buffer;
6073 }
6074
6075 if ( mode == INPUT ) {
6076
6077 LPDIRECTSOUNDCAPTURE input;
6078 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6079 if ( FAILED( result ) ) {
6080 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6081 errorText_ = errorStream_.str();
6082 return FAILURE;
6083 }
6084
6085 DSCCAPS inCaps;
6086 inCaps.dwSize = sizeof( inCaps );
6087 result = input->GetCaps( &inCaps );
6088 if ( FAILED( result ) ) {
6089 input->Release();
6090 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6091 errorText_ = errorStream_.str();
6092 return FAILURE;
6093 }
6094
6095 // Check channel information.
6096 if ( inCaps.dwChannels < channels + firstChannel ) {
6097 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6098 return FAILURE;
6099 }
6100
6101 // Check format information. Use 16-bit format unless user
6102 // requests 8-bit.
6103 DWORD deviceFormats;
6104 if ( channels + firstChannel == 2 ) {
6105 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6106 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6107 waveFormat.wBitsPerSample = 8;
6108 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6109 }
6110 else { // assume 16-bit is supported
6111 waveFormat.wBitsPerSample = 16;
6112 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6113 }
6114 }
6115 else { // channel == 1
6116 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6117 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6118 waveFormat.wBitsPerSample = 8;
6119 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6120 }
6121 else { // assume 16-bit is supported
6122 waveFormat.wBitsPerSample = 16;
6123 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6124 }
6125 }
6126 stream_.userFormat = format;
6127
6128 // Update wave format structure and buffer information.
6129 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6130 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6131 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6132
6133 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6134 while ( dsPointerLeadTime * 2U > dsBufferSize )
6135 dsBufferSize *= 2;
6136
6137 // Setup the secondary DS buffer description.
6138 DSCBUFFERDESC bufferDescription;
6139 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6140 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6141 bufferDescription.dwFlags = 0;
6142 bufferDescription.dwReserved = 0;
6143 bufferDescription.dwBufferBytes = dsBufferSize;
6144 bufferDescription.lpwfxFormat = &waveFormat;
6145
6146 // Create the capture buffer.
6147 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6148 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6149 if ( FAILED( result ) ) {
6150 input->Release();
6151 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6152 errorText_ = errorStream_.str();
6153 return FAILURE;
6154 }
6155
6156 // Get the buffer size ... might be different from what we specified.
6157 DSCBCAPS dscbcaps;
6158 dscbcaps.dwSize = sizeof( DSCBCAPS );
6159 result = buffer->GetCaps( &dscbcaps );
6160 if ( FAILED( result ) ) {
6161 input->Release();
6162 buffer->Release();
6163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6164 errorText_ = errorStream_.str();
6165 return FAILURE;
6166 }
6167
6168 dsBufferSize = dscbcaps.dwBufferBytes;
6169
6170 // NOTE: We could have a problem here if this is a duplex stream
6171 // and the play and capture hardware buffer sizes are different
6172 // (I'm actually not sure if that is a problem or not).
6173 // Currently, we are not verifying that.
6174
6175 // Lock the capture buffer
6176 LPVOID audioPtr;
6177 DWORD dataLen;
6178 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6179 if ( FAILED( result ) ) {
6180 input->Release();
6181 buffer->Release();
6182 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6183 errorText_ = errorStream_.str();
6184 return FAILURE;
6185 }
6186
6187 // Zero the buffer
6188 ZeroMemory( audioPtr, dataLen );
6189
6190 // Unlock the buffer
6191 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6192 if ( FAILED( result ) ) {
6193 input->Release();
6194 buffer->Release();
6195 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6196 errorText_ = errorStream_.str();
6197 return FAILURE;
6198 }
6199
6200 ohandle = (void *) input;
6201 bhandle = (void *) buffer;
6202 }
6203
6204 // Set various stream parameters
6205 DsHandle *handle = 0;
6206 stream_.nDeviceChannels[mode] = channels + firstChannel;
6207 stream_.nUserChannels[mode] = channels;
6208 stream_.bufferSize = *bufferSize;
6209 stream_.channelOffset[mode] = firstChannel;
6210 stream_.deviceInterleaved[mode] = true;
6211 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6212 else stream_.userInterleaved = true;
6213
6214 // Set flag for buffer conversion
6215 stream_.doConvertBuffer[mode] = false;
6216 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6217 stream_.doConvertBuffer[mode] = true;
6218 if (stream_.userFormat != stream_.deviceFormat[mode])
6219 stream_.doConvertBuffer[mode] = true;
6220 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6221 stream_.nUserChannels[mode] > 1 )
6222 stream_.doConvertBuffer[mode] = true;
6223
6224 // Allocate necessary internal buffers
6225 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6226 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6227 if ( stream_.userBuffer[mode] == NULL ) {
6228 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6229 goto error;
6230 }
6231
6232 if ( stream_.doConvertBuffer[mode] ) {
6233
6234 bool makeBuffer = true;
6235 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6236 if ( mode == INPUT ) {
6237 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6238 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6239 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6240 }
6241 }
6242
6243 if ( makeBuffer ) {
6244 bufferBytes *= *bufferSize;
6245 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6246 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6247 if ( stream_.deviceBuffer == NULL ) {
6248 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6249 goto error;
6250 }
6251 }
6252 }
6253
6254 // Allocate our DsHandle structures for the stream.
6255 if ( stream_.apiHandle == 0 ) {
6256 try {
6257 handle = new DsHandle;
6258 }
6259 catch ( std::bad_alloc& ) {
6260 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6261 goto error;
6262 }
6263
6264 // Create a manual-reset event.
6265 handle->condition = CreateEvent( NULL, // no security
6266 TRUE, // manual-reset
6267 FALSE, // non-signaled initially
6268 NULL ); // unnamed
6269 stream_.apiHandle = (void *) handle;
6270 }
6271 else
6272 handle = (DsHandle *) stream_.apiHandle;
6273 handle->id[mode] = ohandle;
6274 handle->buffer[mode] = bhandle;
6275 handle->dsBufferSize[mode] = dsBufferSize;
6276 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6277
6278 stream_.device[mode] = device;
6279 stream_.state = STREAM_STOPPED;
6280 if ( stream_.mode == OUTPUT && mode == INPUT )
6281 // We had already set up an output stream.
6282 stream_.mode = DUPLEX;
6283 else
6284 stream_.mode = mode;
6285 stream_.nBuffers = nBuffers;
6286 stream_.sampleRate = sampleRate;
6287
6288 // Setup the buffer conversion information structure.
6289 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6290
6291 // Setup the callback thread.
6292 if ( stream_.callbackInfo.isRunning == false ) {
6293 unsigned threadId;
6294 stream_.callbackInfo.isRunning = true;
6295 stream_.callbackInfo.object = (void *) this;
6296 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6297 &stream_.callbackInfo, 0, &threadId );
6298 if ( stream_.callbackInfo.thread == 0 ) {
6299 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6300 goto error;
6301 }
6302
6303 // Boost DS thread priority
6304 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6305 }
6306 return SUCCESS;
6307
6308 error:
6309 if ( handle ) {
6310 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6311 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6312 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6313 if ( buffer ) buffer->Release();
6314 object->Release();
6315 }
6316 if ( handle->buffer[1] ) {
6317 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6318 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6319 if ( buffer ) buffer->Release();
6320 object->Release();
6321 }
6322 CloseHandle( handle->condition );
6323 delete handle;
6324 stream_.apiHandle = 0;
6325 }
6326
6327 for ( int i=0; i<2; i++ ) {
6328 if ( stream_.userBuffer[i] ) {
6329 free( stream_.userBuffer[i] );
6330 stream_.userBuffer[i] = 0;
6331 }
6332 }
6333
6334 if ( stream_.deviceBuffer ) {
6335 free( stream_.deviceBuffer );
6336 stream_.deviceBuffer = 0;
6337 }
6338
6339 stream_.state = STREAM_CLOSED;
6340 return FAILURE;
6341 }
6342
6343 void RtApiDs :: closeStream()
6344 {
6345 if ( stream_.state == STREAM_CLOSED ) {
6346 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6347 error( RtAudioError::WARNING );
6348 return;
6349 }
6350
6351 // Stop the callback thread.
6352 stream_.callbackInfo.isRunning = false;
6353 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6354 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6355
6356 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6357 if ( handle ) {
6358 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6359 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6360 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6361 if ( buffer ) {
6362 buffer->Stop();
6363 buffer->Release();
6364 }
6365 object->Release();
6366 }
6367 if ( handle->buffer[1] ) {
6368 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6369 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6370 if ( buffer ) {
6371 buffer->Stop();
6372 buffer->Release();
6373 }
6374 object->Release();
6375 }
6376 CloseHandle( handle->condition );
6377 delete handle;
6378 stream_.apiHandle = 0;
6379 }
6380
6381 for ( int i=0; i<2; i++ ) {
6382 if ( stream_.userBuffer[i] ) {
6383 free( stream_.userBuffer[i] );
6384 stream_.userBuffer[i] = 0;
6385 }
6386 }
6387
6388 if ( stream_.deviceBuffer ) {
6389 free( stream_.deviceBuffer );
6390 stream_.deviceBuffer = 0;
6391 }
6392
6393 stream_.mode = UNINITIALIZED;
6394 stream_.state = STREAM_CLOSED;
6395 }
6396
6397 void RtApiDs :: startStream()
6398 {
6399 verifyStream();
6400 if ( stream_.state == STREAM_RUNNING ) {
6401 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6402 error( RtAudioError::WARNING );
6403 return;
6404 }
6405
6406 #if defined( HAVE_GETTIMEOFDAY )
6407 gettimeofday( &stream_.lastTickTimestamp, NULL );
6408 #endif
6409
6410 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6411
6412 // Increase scheduler frequency on lesser windows (a side-effect of
6413 // increasing timer accuracy). On greater windows (Win2K or later),
6414 // this is already in effect.
6415 timeBeginPeriod( 1 );
6416
6417 buffersRolling = false;
6418 duplexPrerollBytes = 0;
6419
6420 if ( stream_.mode == DUPLEX ) {
6421 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6422 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6423 }
6424
6425 HRESULT result = 0;
6426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6427
6428 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6429 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6430 if ( FAILED( result ) ) {
6431 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6432 errorText_ = errorStream_.str();
6433 goto unlock;
6434 }
6435 }
6436
6437 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6438
6439 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6440 result = buffer->Start( DSCBSTART_LOOPING );
6441 if ( FAILED( result ) ) {
6442 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6443 errorText_ = errorStream_.str();
6444 goto unlock;
6445 }
6446 }
6447
6448 handle->drainCounter = 0;
6449 handle->internalDrain = false;
6450 ResetEvent( handle->condition );
6451 stream_.state = STREAM_RUNNING;
6452
6453 unlock:
6454 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6455 }
6456
6457 void RtApiDs :: stopStream()
6458 {
6459 verifyStream();
6460 if ( stream_.state == STREAM_STOPPED ) {
6461 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6462 error( RtAudioError::WARNING );
6463 return;
6464 }
6465
6466 HRESULT result = 0;
6467 LPVOID audioPtr;
6468 DWORD dataLen;
6469 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6470 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6471 if ( handle->drainCounter == 0 ) {
6472 handle->drainCounter = 2;
6473 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6474 }
6475
6476 stream_.state = STREAM_STOPPED;
6477
6478 MUTEX_LOCK( &stream_.mutex );
6479
6480 // Stop the buffer and clear memory
6481 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6482 result = buffer->Stop();
6483 if ( FAILED( result ) ) {
6484 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6485 errorText_ = errorStream_.str();
6486 goto unlock;
6487 }
6488
6489 // Lock the buffer and clear it so that if we start to play again,
6490 // we won't have old data playing.
6491 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6492 if ( FAILED( result ) ) {
6493 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6494 errorText_ = errorStream_.str();
6495 goto unlock;
6496 }
6497
6498 // Zero the DS buffer
6499 ZeroMemory( audioPtr, dataLen );
6500
6501 // Unlock the DS buffer
6502 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6503 if ( FAILED( result ) ) {
6504 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6505 errorText_ = errorStream_.str();
6506 goto unlock;
6507 }
6508
6509 // If we start playing again, we must begin at beginning of buffer.
6510 handle->bufferPointer[0] = 0;
6511 }
6512
6513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6514 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6515 audioPtr = NULL;
6516 dataLen = 0;
6517
6518 stream_.state = STREAM_STOPPED;
6519
6520 if ( stream_.mode != DUPLEX )
6521 MUTEX_LOCK( &stream_.mutex );
6522
6523 result = buffer->Stop();
6524 if ( FAILED( result ) ) {
6525 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6526 errorText_ = errorStream_.str();
6527 goto unlock;
6528 }
6529
6530 // Lock the buffer and clear it so that if we start to play again,
6531 // we won't have old data playing.
6532 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6533 if ( FAILED( result ) ) {
6534 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6535 errorText_ = errorStream_.str();
6536 goto unlock;
6537 }
6538
6539 // Zero the DS buffer
6540 ZeroMemory( audioPtr, dataLen );
6541
6542 // Unlock the DS buffer
6543 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6544 if ( FAILED( result ) ) {
6545 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6546 errorText_ = errorStream_.str();
6547 goto unlock;
6548 }
6549
6550 // If we start recording again, we must begin at beginning of buffer.
6551 handle->bufferPointer[1] = 0;
6552 }
6553
6554 unlock:
6555 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6556 MUTEX_UNLOCK( &stream_.mutex );
6557
6558 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6559 }
6560
6561 void RtApiDs :: abortStream()
6562 {
6563 verifyStream();
6564 if ( stream_.state == STREAM_STOPPED ) {
6565 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6566 error( RtAudioError::WARNING );
6567 return;
6568 }
6569
6570 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6571 handle->drainCounter = 2;
6572
6573 stopStream();
6574 }
6575
6576 void RtApiDs :: callbackEvent()
6577 {
6578 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6579 Sleep( 50 ); // sleep 50 milliseconds
6580 return;
6581 }
6582
6583 if ( stream_.state == STREAM_CLOSED ) {
6584 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6585 error( RtAudioError::WARNING );
6586 return;
6587 }
6588
6589 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6590 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6591
6592 // Check if we were draining the stream and signal is finished.
6593 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6594
6595 stream_.state = STREAM_STOPPING;
6596 if ( handle->internalDrain == false )
6597 SetEvent( handle->condition );
6598 else
6599 stopStream();
6600 return;
6601 }
6602
6603 // Invoke user callback to get fresh output data UNLESS we are
6604 // draining stream.
6605 if ( handle->drainCounter == 0 ) {
6606 RtAudioCallback callback = (RtAudioCallback) info->callback;
6607 double streamTime = getStreamTime();
6608 RtAudioStreamStatus status = 0;
6609 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6610 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6611 handle->xrun[0] = false;
6612 }
6613 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6614 status |= RTAUDIO_INPUT_OVERFLOW;
6615 handle->xrun[1] = false;
6616 }
6617 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6618 stream_.bufferSize, streamTime, status, info->userData );
6619 if ( cbReturnValue == 2 ) {
6620 stream_.state = STREAM_STOPPING;
6621 handle->drainCounter = 2;
6622 abortStream();
6623 return;
6624 }
6625 else if ( cbReturnValue == 1 ) {
6626 handle->drainCounter = 1;
6627 handle->internalDrain = true;
6628 }
6629 }
6630
6631 HRESULT result;
6632 DWORD currentWritePointer, safeWritePointer;
6633 DWORD currentReadPointer, safeReadPointer;
6634 UINT nextWritePointer;
6635
6636 LPVOID buffer1 = NULL;
6637 LPVOID buffer2 = NULL;
6638 DWORD bufferSize1 = 0;
6639 DWORD bufferSize2 = 0;
6640
6641 char *buffer;
6642 long bufferBytes;
6643
6644 MUTEX_LOCK( &stream_.mutex );
6645 if ( stream_.state == STREAM_STOPPED ) {
6646 MUTEX_UNLOCK( &stream_.mutex );
6647 return;
6648 }
6649
6650 if ( buffersRolling == false ) {
6651 if ( stream_.mode == DUPLEX ) {
6652 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6653
6654 // It takes a while for the devices to get rolling. As a result,
6655 // there's no guarantee that the capture and write device pointers
6656 // will move in lockstep. Wait here for both devices to start
6657 // rolling, and then set our buffer pointers accordingly.
6658 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6659 // bytes later than the write buffer.
6660
6661 // Stub: a serious risk of having a pre-emptive scheduling round
6662 // take place between the two GetCurrentPosition calls... but I'm
6663 // really not sure how to solve the problem. Temporarily boost to
6664 // Realtime priority, maybe; but I'm not sure what priority the
6665 // DirectSound service threads run at. We *should* be roughly
6666 // within a ms or so of correct.
6667
6668 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6669 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6670
6671 DWORD startSafeWritePointer, startSafeReadPointer;
6672
6673 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6674 if ( FAILED( result ) ) {
6675 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6676 errorText_ = errorStream_.str();
6677 MUTEX_UNLOCK( &stream_.mutex );
6678 error( RtAudioError::SYSTEM_ERROR );
6679 return;
6680 }
6681 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6682 if ( FAILED( result ) ) {
6683 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6684 errorText_ = errorStream_.str();
6685 MUTEX_UNLOCK( &stream_.mutex );
6686 error( RtAudioError::SYSTEM_ERROR );
6687 return;
6688 }
6689 while ( true ) {
6690 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6691 if ( FAILED( result ) ) {
6692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6693 errorText_ = errorStream_.str();
6694 MUTEX_UNLOCK( &stream_.mutex );
6695 error( RtAudioError::SYSTEM_ERROR );
6696 return;
6697 }
6698 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6699 if ( FAILED( result ) ) {
6700 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6701 errorText_ = errorStream_.str();
6702 MUTEX_UNLOCK( &stream_.mutex );
6703 error( RtAudioError::SYSTEM_ERROR );
6704 return;
6705 }
6706 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6707 Sleep( 1 );
6708 }
6709
6710 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6711
6712 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6713 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6714 handle->bufferPointer[1] = safeReadPointer;
6715 }
6716 else if ( stream_.mode == OUTPUT ) {
6717
6718 // Set the proper nextWritePosition after initial startup.
6719 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6720 result = dsWriteBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6721 if ( FAILED( result ) ) {
6722 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6723 errorText_ = errorStream_.str();
6724 MUTEX_UNLOCK( &stream_.mutex );
6725 error( RtAudioError::SYSTEM_ERROR );
6726 return;
6727 }
6728 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6729 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6730 }
6731
6732 buffersRolling = true;
6733 }
6734
6735 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6736
6737 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6738
6739 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6740 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6741 bufferBytes *= formatBytes( stream_.userFormat );
6742 memset( stream_.userBuffer[0], 0, bufferBytes );
6743 }
6744
6745 // Setup parameters and do buffer conversion if necessary.
6746 if ( stream_.doConvertBuffer[0] ) {
6747 buffer = stream_.deviceBuffer;
6748 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6749 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6750 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6751 }
6752 else {
6753 buffer = stream_.userBuffer[0];
6754 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6755 bufferBytes *= formatBytes( stream_.userFormat );
6756 }
6757
6758 // No byte swapping necessary in DirectSound implementation.
6759
6760 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6761 // unsigned. So, we need to convert our signed 8-bit data here to
6762 // unsigned.
6763 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6764 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6765
6766 DWORD dsBufferSize = handle->dsBufferSize[0];
6767 nextWritePointer = handle->bufferPointer[0];
6768
6769 DWORD endWrite, leadPointer;
6770 while ( true ) {
6771 // Find out where the read and "safe write" pointers are.
6772 result = dsBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6773 if ( FAILED( result ) ) {
6774 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6775 errorText_ = errorStream_.str();
6776 MUTEX_UNLOCK( &stream_.mutex );
6777 error( RtAudioError::SYSTEM_ERROR );
6778 return;
6779 }
6780
6781 // We will copy our output buffer into the region between
6782 // safeWritePointer and leadPointer. If leadPointer is not
6783 // beyond the next endWrite position, wait until it is.
6784 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6785 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6786 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6787 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6788 endWrite = nextWritePointer + bufferBytes;
6789
6790 // Check whether the entire write region is behind the play pointer.
6791 if ( leadPointer >= endWrite ) break;
6792
6793 // If we are here, then we must wait until the leadPointer advances
6794 // beyond the end of our next write region. We use the
6795 // Sleep() function to suspend operation until that happens.
6796 double millis = ( endWrite - leadPointer ) * 1000.0;
6797 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6798 if ( millis < 1.0 ) millis = 1.0;
6799 Sleep( (DWORD) millis );
6800 }
6801
6802 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6803 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6804 // We've strayed into the forbidden zone ... resync the read pointer.
6805 handle->xrun[0] = true;
6806 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6807 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6808 handle->bufferPointer[0] = nextWritePointer;
6809 endWrite = nextWritePointer + bufferBytes;
6810 }
6811
6812 // Lock free space in the buffer
6813 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6814 &bufferSize1, &buffer2, &bufferSize2, 0 );
6815 if ( FAILED( result ) ) {
6816 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6817 errorText_ = errorStream_.str();
6818 MUTEX_UNLOCK( &stream_.mutex );
6819 error( RtAudioError::SYSTEM_ERROR );
6820 return;
6821 }
6822
6823 // Copy our buffer into the DS buffer
6824 CopyMemory( buffer1, buffer, bufferSize1 );
6825 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6826
6827 // Update our buffer offset and unlock sound buffer
6828 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6829 if ( FAILED( result ) ) {
6830 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6831 errorText_ = errorStream_.str();
6832 MUTEX_UNLOCK( &stream_.mutex );
6833 error( RtAudioError::SYSTEM_ERROR );
6834 return;
6835 }
6836 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6837 handle->bufferPointer[0] = nextWritePointer;
6838 }
6839
6840 // Don't bother draining input
6841 if ( handle->drainCounter ) {
6842 handle->drainCounter++;
6843 goto unlock;
6844 }
6845
6846 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6847
6848 // Setup parameters.
6849 if ( stream_.doConvertBuffer[1] ) {
6850 buffer = stream_.deviceBuffer;
6851 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6852 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6853 }
6854 else {
6855 buffer = stream_.userBuffer[1];
6856 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6857 bufferBytes *= formatBytes( stream_.userFormat );
6858 }
6859
6860 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6861 long nextReadPointer = handle->bufferPointer[1];
6862 DWORD dsBufferSize = handle->dsBufferSize[1];
6863
6864 // Find out where the write and "safe read" pointers are.
6865 result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6866 if ( FAILED( result ) ) {
6867 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6868 errorText_ = errorStream_.str();
6869 MUTEX_UNLOCK( &stream_.mutex );
6870 error( RtAudioError::SYSTEM_ERROR );
6871 return;
6872 }
6873
6874 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6875 DWORD endRead = nextReadPointer + bufferBytes;
6876
6877 // Handling depends on whether we are INPUT or DUPLEX.
6878 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6879 // then a wait here will drag the write pointers into the forbidden zone.
6880 //
6881 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6882 // it's in a safe position. This causes dropouts, but it seems to be the only
6883 // practical way to sync up the read and write pointers reliably, given the
6884 // the very complex relationship between phase and increment of the read and write
6885 // pointers.
6886 //
6887 // In order to minimize audible dropouts in DUPLEX mode, we will
6888 // provide a pre-roll period of 0.5 seconds in which we return
6889 // zeros from the read buffer while the pointers sync up.
6890
6891 if ( stream_.mode == DUPLEX ) {
6892 if ( safeReadPointer < endRead ) {
6893 if ( duplexPrerollBytes <= 0 ) {
6894 // Pre-roll time over. Be more agressive.
6895 int adjustment = endRead-safeReadPointer;
6896
6897 handle->xrun[1] = true;
6898 // Two cases:
6899 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6900 // and perform fine adjustments later.
6901 // - small adjustments: back off by twice as much.
6902 if ( adjustment >= 2*bufferBytes )
6903 nextReadPointer = safeReadPointer-2*bufferBytes;
6904 else
6905 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6906
6907 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6908
6909 }
6910 else {
6911 // In pre=roll time. Just do it.
6912 nextReadPointer = safeReadPointer - bufferBytes;
6913 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6914 }
6915 endRead = nextReadPointer + bufferBytes;
6916 }
6917 }
6918 else { // mode == INPUT
6919 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6920 // See comments for playback.
6921 double millis = (endRead - safeReadPointer) * 1000.0;
6922 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6923 if ( millis < 1.0 ) millis = 1.0;
6924 Sleep( (DWORD) millis );
6925
6926 // Wake up and find out where we are now.
6927 result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6928 if ( FAILED( result ) ) {
6929 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6930 errorText_ = errorStream_.str();
6931 MUTEX_UNLOCK( &stream_.mutex );
6932 error( RtAudioError::SYSTEM_ERROR );
6933 return;
6934 }
6935
6936 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6937 }
6938 }
6939
6940 // Lock free space in the buffer
6941 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6942 &bufferSize1, &buffer2, &bufferSize2, 0 );
6943 if ( FAILED( result ) ) {
6944 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6945 errorText_ = errorStream_.str();
6946 MUTEX_UNLOCK( &stream_.mutex );
6947 error( RtAudioError::SYSTEM_ERROR );
6948 return;
6949 }
6950
6951 if ( duplexPrerollBytes <= 0 ) {
6952 // Copy our buffer into the DS buffer
6953 CopyMemory( buffer, buffer1, bufferSize1 );
6954 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6955 }
6956 else {
6957 memset( buffer, 0, bufferSize1 );
6958 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6959 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6960 }
6961
6962 // Update our buffer offset and unlock sound buffer
6963 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6964 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6965 if ( FAILED( result ) ) {
6966 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6967 errorText_ = errorStream_.str();
6968 MUTEX_UNLOCK( &stream_.mutex );
6969 error( RtAudioError::SYSTEM_ERROR );
6970 return;
6971 }
6972 handle->bufferPointer[1] = nextReadPointer;
6973
6974 // No byte swapping necessary in DirectSound implementation.
6975
6976 // If necessary, convert 8-bit data from unsigned to signed.
6977 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6978 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6979
6980 // Do buffer conversion if necessary.
6981 if ( stream_.doConvertBuffer[1] )
6982 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6983 }
6984
6985 unlock:
6986 MUTEX_UNLOCK( &stream_.mutex );
6987 RtApi::tickStreamTime();
6988 }
6989
6990 // Definitions for utility functions and callbacks
6991 // specific to the DirectSound implementation.
6992
6993 static unsigned __stdcall callbackHandler( void *ptr )
6994 {
6995 CallbackInfo *info = (CallbackInfo *) ptr;
6996 RtApiDs *object = (RtApiDs *) info->object;
6997 bool* isRunning = &info->isRunning;
6998
6999 while ( *isRunning == true ) {
7000 object->callbackEvent();
7001 }
7002
7003 _endthreadex( 0 );
7004 return 0;
7005 }
7006
7007 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7008 LPCTSTR description,
7009 LPCTSTR /*module*/,
7010 LPVOID lpContext )
7011 {
7012 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7013 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7014
7015 HRESULT hr;
7016 bool validDevice = false;
7017 if ( probeInfo.isInput == true ) {
7018 DSCCAPS caps;
7019 LPDIRECTSOUNDCAPTURE object;
7020
7021 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7022 if ( hr != DS_OK ) return TRUE;
7023
7024 caps.dwSize = sizeof(caps);
7025 hr = object->GetCaps( &caps );
7026 if ( hr == DS_OK ) {
7027 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7028 validDevice = true;
7029 }
7030 object->Release();
7031 }
7032 else {
7033 DSCAPS caps;
7034 LPDIRECTSOUND object;
7035 hr = DirectSoundCreate( lpguid, &object, NULL );
7036 if ( hr != DS_OK ) return TRUE;
7037
7038 caps.dwSize = sizeof(caps);
7039 hr = object->GetCaps( &caps );
7040 if ( hr == DS_OK ) {
7041 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7042 validDevice = true;
7043 }
7044 object->Release();
7045 }
7046
7047 // If good device, then save its name and guid.
7048 std::string name = convertCharPointerToStdString( description );
7049 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7050 if ( lpguid == NULL )
7051 name = "Default Device";
7052 if ( validDevice ) {
7053 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7054 if ( dsDevices[i].name == name ) {
7055 dsDevices[i].found = true;
7056 if ( probeInfo.isInput ) {
7057 dsDevices[i].id[1] = lpguid;
7058 dsDevices[i].validId[1] = true;
7059 }
7060 else {
7061 dsDevices[i].id[0] = lpguid;
7062 dsDevices[i].validId[0] = true;
7063 }
7064 return TRUE;
7065 }
7066 }
7067
7068 DsDevice device;
7069 device.name = name;
7070 device.found = true;
7071 if ( probeInfo.isInput ) {
7072 device.id[1] = lpguid;
7073 device.validId[1] = true;
7074 }
7075 else {
7076 device.id[0] = lpguid;
7077 device.validId[0] = true;
7078 }
7079 dsDevices.push_back( device );
7080 }
7081
7082 return TRUE;
7083 }
7084
7085 static const char* getErrorString( int code )
7086 {
7087 switch ( code ) {
7088
7089 case DSERR_ALLOCATED:
7090 return "Already allocated";
7091
7092 case DSERR_CONTROLUNAVAIL:
7093 return "Control unavailable";
7094
7095 case DSERR_INVALIDPARAM:
7096 return "Invalid parameter";
7097
7098 case DSERR_INVALIDCALL:
7099 return "Invalid call";
7100
7101 case DSERR_GENERIC:
7102 return "Generic error";
7103
7104 case DSERR_PRIOLEVELNEEDED:
7105 return "Priority level needed";
7106
7107 case DSERR_OUTOFMEMORY:
7108 return "Out of memory";
7109
7110 case DSERR_BADFORMAT:
7111 return "The sample rate or the channel format is not supported";
7112
7113 case DSERR_UNSUPPORTED:
7114 return "Not supported";
7115
7116 case DSERR_NODRIVER:
7117 return "No driver";
7118
7119 case DSERR_ALREADYINITIALIZED:
7120 return "Already initialized";
7121
7122 case DSERR_NOAGGREGATION:
7123 return "No aggregation";
7124
7125 case DSERR_BUFFERLOST:
7126 return "Buffer lost";
7127
7128 case DSERR_OTHERAPPHASPRIO:
7129 return "Another application already has priority";
7130
7131 case DSERR_UNINITIALIZED:
7132 return "Uninitialized";
7133
7134 default:
7135 return "DirectSound unknown error";
7136 }
7137 }
7138 //******************** End of __WINDOWS_DS__ *********************//
7139 #endif
7140
7141
7142 #if defined(__LINUX_ALSA__)
7143
7144 #include <alsa/asoundlib.h>
7145 #include <unistd.h>
7146
7147 // A structure to hold various information related to the ALSA API
7148 // implementation.
7149 struct AlsaHandle {
7150 snd_pcm_t *handles[2];
7151 bool synchronized;
7152 bool xrun[2];
7153 pthread_cond_t runnable_cv;
7154 bool runnable;
7155
7156 AlsaHandle()
7157 #if _cplusplus >= 201103L
7158 :handles{nullptr, nullptr}, synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7159 #else
7160 : synchronized(false), runnable(false) { handles[0] = NULL; handles[1] = NULL; xrun[0] = false; xrun[1] = false; }
7161 #endif
7162 };
7163
7164 static void *alsaCallbackHandler( void * ptr );
7165
7166 RtApiAlsa :: RtApiAlsa()
7167 {
7168 // Nothing to do here.
7169 }
7170
7171 RtApiAlsa :: ~RtApiAlsa()
7172 {
7173 if ( stream_.state != STREAM_CLOSED ) closeStream();
7174 }
7175
7176 unsigned int RtApiAlsa :: getDeviceCount( void )
7177 {
7178 unsigned nDevices = 0;
7179 int result, subdevice, card;
7180 char name[64];
7181 snd_ctl_t *handle = 0;
7182
7183 strcpy(name, "default");
7184 result = snd_ctl_open( &handle, "default", 0 );
7185 if (result == 0) {
7186 nDevices++;
7187 snd_ctl_close( handle );
7188 }
7189
7190 // Count cards and devices
7191 card = -1;
7192 snd_card_next( &card );
7193 while ( card >= 0 ) {
7194 sprintf( name, "hw:%d", card );
7195 result = snd_ctl_open( &handle, name, 0 );
7196 if ( result < 0 ) {
7197 handle = 0;
7198 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7199 errorText_ = errorStream_.str();
7200 error( RtAudioError::WARNING );
7201 goto nextcard;
7202 }
7203 subdevice = -1;
7204 while( 1 ) {
7205 result = snd_ctl_pcm_next_device( handle, &subdevice );
7206 if ( result < 0 ) {
7207 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7208 errorText_ = errorStream_.str();
7209 error( RtAudioError::WARNING );
7210 break;
7211 }
7212 if ( subdevice < 0 )
7213 break;
7214 nDevices++;
7215 }
7216 nextcard:
7217 if ( handle )
7218 snd_ctl_close( handle );
7219 snd_card_next( &card );
7220 }
7221
7222 return nDevices;
7223 }
7224
7225 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7226 {
7227 RtAudio::DeviceInfo info;
7228 info.probed = false;
7229
7230 unsigned nDevices = 0;
7231 int result=-1, subdevice=-1, card=-1;
7232 char name[64];
7233 snd_ctl_t *chandle = 0;
7234
7235 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7236 if ( result == 0 ) {
7237 if ( nDevices++ == device ) {
7238 strcpy( name, "default" );
7239 goto foundDevice;
7240 }
7241 }
7242 if ( chandle )
7243 snd_ctl_close( chandle );
7244
7245 // Count cards and devices
7246 snd_card_next( &card );
7247 while ( card >= 0 ) {
7248 sprintf( name, "hw:%d", card );
7249 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7250 if ( result < 0 ) {
7251 chandle = 0;
7252 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7253 errorText_ = errorStream_.str();
7254 error( RtAudioError::WARNING );
7255 goto nextcard;
7256 }
7257 subdevice = -1;
7258 while( 1 ) {
7259 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7260 if ( result < 0 ) {
7261 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7262 errorText_ = errorStream_.str();
7263 error( RtAudioError::WARNING );
7264 break;
7265 }
7266 if ( subdevice < 0 ) break;
7267 if ( nDevices == device ) {
7268 sprintf( name, "hw:%d,%d", card, subdevice );
7269 goto foundDevice;
7270 }
7271 nDevices++;
7272 }
7273 nextcard:
7274 if ( chandle )
7275 snd_ctl_close( chandle );
7276 snd_card_next( &card );
7277 }
7278
7279 if ( nDevices == 0 ) {
7280 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7281 error( RtAudioError::INVALID_USE );
7282 return info;
7283 }
7284
7285 if ( device >= nDevices ) {
7286 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7287 error( RtAudioError::INVALID_USE );
7288 return info;
7289 }
7290
7291 foundDevice:
7292
7293 // If a stream is already open, we cannot probe the stream devices.
7294 // Thus, use the saved results.
7295 if ( stream_.state != STREAM_CLOSED &&
7296 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7297 snd_ctl_close( chandle );
7298 if ( device >= devices_.size() ) {
7299 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7300 error( RtAudioError::WARNING );
7301 return info;
7302 }
7303 return devices_[ device ];
7304 }
7305
7306 int openMode = SND_PCM_ASYNC;
7307 snd_pcm_stream_t stream;
7308 snd_pcm_info_t *pcminfo;
7309 snd_pcm_info_alloca( &pcminfo );
7310 snd_pcm_t *phandle;
7311 snd_pcm_hw_params_t *params;
7312 snd_pcm_hw_params_alloca( &params );
7313
7314 // First try for playback unless default device (which has subdev -1)
7315 stream = SND_PCM_STREAM_PLAYBACK;
7316 snd_pcm_info_set_stream( pcminfo, stream );
7317 if ( subdevice != -1 ) {
7318 snd_pcm_info_set_device( pcminfo, subdevice );
7319 snd_pcm_info_set_subdevice( pcminfo, 0 );
7320
7321 result = snd_ctl_pcm_info( chandle, pcminfo );
7322 if ( result < 0 ) {
7323 // Device probably doesn't support playback.
7324 goto captureProbe;
7325 }
7326 }
7327
7328 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7329 if ( result < 0 ) {
7330 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7331 errorText_ = errorStream_.str();
7332 error( RtAudioError::WARNING );
7333 goto captureProbe;
7334 }
7335
7336 // The device is open ... fill the parameter structure.
7337 result = snd_pcm_hw_params_any( phandle, params );
7338 if ( result < 0 ) {
7339 snd_pcm_close( phandle );
7340 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7341 errorText_ = errorStream_.str();
7342 error( RtAudioError::WARNING );
7343 goto captureProbe;
7344 }
7345
7346 // Get output channel information.
7347 unsigned int value;
7348 result = snd_pcm_hw_params_get_channels_max( params, &value );
7349 if ( result < 0 ) {
7350 snd_pcm_close( phandle );
7351 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7352 errorText_ = errorStream_.str();
7353 error( RtAudioError::WARNING );
7354 goto captureProbe;
7355 }
7356 info.outputChannels = value;
7357 snd_pcm_close( phandle );
7358
7359 captureProbe:
7360 stream = SND_PCM_STREAM_CAPTURE;
7361 snd_pcm_info_set_stream( pcminfo, stream );
7362
7363 // Now try for capture unless default device (with subdev = -1)
7364 if ( subdevice != -1 ) {
7365 result = snd_ctl_pcm_info( chandle, pcminfo );
7366 snd_ctl_close( chandle );
7367 if ( result < 0 ) {
7368 // Device probably doesn't support capture.
7369 if ( info.outputChannels == 0 ) return info;
7370 goto probeParameters;
7371 }
7372 }
7373 else
7374 snd_ctl_close( chandle );
7375
7376 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7377 if ( result < 0 ) {
7378 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7379 errorText_ = errorStream_.str();
7380 error( RtAudioError::WARNING );
7381 if ( info.outputChannels == 0 ) return info;
7382 goto probeParameters;
7383 }
7384
7385 // The device is open ... fill the parameter structure.
7386 result = snd_pcm_hw_params_any( phandle, params );
7387 if ( result < 0 ) {
7388 snd_pcm_close( phandle );
7389 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7390 errorText_ = errorStream_.str();
7391 error( RtAudioError::WARNING );
7392 if ( info.outputChannels == 0 ) return info;
7393 goto probeParameters;
7394 }
7395
7396 result = snd_pcm_hw_params_get_channels_max( params, &value );
7397 if ( result < 0 ) {
7398 snd_pcm_close( phandle );
7399 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7400 errorText_ = errorStream_.str();
7401 error( RtAudioError::WARNING );
7402 if ( info.outputChannels == 0 ) return info;
7403 goto probeParameters;
7404 }
7405 info.inputChannels = value;
7406 snd_pcm_close( phandle );
7407
7408 // If device opens for both playback and capture, we determine the channels.
7409 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7410 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7411
7412 // ALSA doesn't provide default devices so we'll use the first available one.
7413 if ( device == 0 && info.outputChannels > 0 )
7414 info.isDefaultOutput = true;
7415 if ( device == 0 && info.inputChannels > 0 )
7416 info.isDefaultInput = true;
7417
7418 probeParameters:
7419 // At this point, we just need to figure out the supported data
7420 // formats and sample rates. We'll proceed by opening the device in
7421 // the direction with the maximum number of channels, or playback if
7422 // they are equal. This might limit our sample rate options, but so
7423 // be it.
7424
7425 if ( info.outputChannels >= info.inputChannels )
7426 stream = SND_PCM_STREAM_PLAYBACK;
7427 else
7428 stream = SND_PCM_STREAM_CAPTURE;
7429 snd_pcm_info_set_stream( pcminfo, stream );
7430
7431 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7432 if ( result < 0 ) {
7433 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7434 errorText_ = errorStream_.str();
7435 error( RtAudioError::WARNING );
7436 return info;
7437 }
7438
7439 // The device is open ... fill the parameter structure.
7440 result = snd_pcm_hw_params_any( phandle, params );
7441 if ( result < 0 ) {
7442 snd_pcm_close( phandle );
7443 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7444 errorText_ = errorStream_.str();
7445 error( RtAudioError::WARNING );
7446 return info;
7447 }
7448
7449 // Test our discrete set of sample rate values.
7450 info.sampleRates.clear();
7451 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7452 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7453 info.sampleRates.push_back( SAMPLE_RATES[i] );
7454
7455 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7456 info.preferredSampleRate = SAMPLE_RATES[i];
7457 }
7458 }
7459 if ( info.sampleRates.size() == 0 ) {
7460 snd_pcm_close( phandle );
7461 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7462 errorText_ = errorStream_.str();
7463 error( RtAudioError::WARNING );
7464 return info;
7465 }
7466
7467 // Probe the supported data formats ... we don't care about endian-ness just yet
7468 snd_pcm_format_t format;
7469 info.nativeFormats = 0;
7470 format = SND_PCM_FORMAT_S8;
7471 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7472 info.nativeFormats |= RTAUDIO_SINT8;
7473 format = SND_PCM_FORMAT_S16;
7474 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7475 info.nativeFormats |= RTAUDIO_SINT16;
7476 format = SND_PCM_FORMAT_S24;
7477 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7478 info.nativeFormats |= RTAUDIO_SINT24;
7479 format = SND_PCM_FORMAT_S32;
7480 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7481 info.nativeFormats |= RTAUDIO_SINT32;
7482 format = SND_PCM_FORMAT_FLOAT;
7483 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7484 info.nativeFormats |= RTAUDIO_FLOAT32;
7485 format = SND_PCM_FORMAT_FLOAT64;
7486 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7487 info.nativeFormats |= RTAUDIO_FLOAT64;
7488
7489 // Check that we have at least one supported format
7490 if ( info.nativeFormats == 0 ) {
7491 snd_pcm_close( phandle );
7492 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7493 errorText_ = errorStream_.str();
7494 error( RtAudioError::WARNING );
7495 return info;
7496 }
7497
7498 // Get the device name
7499 if (strncmp(name, "default", 7)!=0) {
7500 char *cardname;
7501 result = snd_card_get_name( card, &cardname );
7502 if ( result >= 0 ) {
7503 sprintf( name, "hw:%s,%d", cardname, subdevice );
7504 free( cardname );
7505 }
7506 }
7507 info.name = name;
7508
7509 // That's all ... close the device and return
7510 snd_pcm_close( phandle );
7511 info.probed = true;
7512 return info;
7513 }
7514
7515 void RtApiAlsa :: saveDeviceInfo( void )
7516 {
7517 devices_.clear();
7518
7519 unsigned int nDevices = getDeviceCount();
7520 devices_.resize( nDevices );
7521 for ( unsigned int i=0; i<nDevices; i++ )
7522 devices_[i] = getDeviceInfo( i );
7523 }
7524
7525 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7526 unsigned int firstChannel, unsigned int sampleRate,
7527 RtAudioFormat format, unsigned int *bufferSize,
7528 RtAudio::StreamOptions *options )
7529
7530 {
7531 #if defined(__RTAUDIO_DEBUG__)
7532 struct SndOutputTdealloc {
7533 SndOutputTdealloc() : _out(NULL) { snd_output_stdio_attach(&_out, stderr, 0); }
7534 ~SndOutputTdealloc() { snd_output_close(_out); }
7535 operator snd_output_t*() { return _out; }
7536 snd_output_t *_out;
7537 } out;
7538 #endif
7539
7540 // I'm not using the "plug" interface ... too much inconsistent behavior.
7541
7542 unsigned nDevices = 0;
7543 int result, subdevice, card;
7544 char name[64];
7545 snd_ctl_t *chandle;
7546
7547 if ( device == 0
7548 || (options && options->flags & RTAUDIO_ALSA_USE_DEFAULT) )
7549 {
7550 strcpy(name, "default");
7551 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7552 if ( result == 0 ) {
7553 if ( nDevices == device ) {
7554 strcpy( name, "default" );
7555 snd_ctl_close( chandle );
7556 goto foundDevice;
7557 }
7558 nDevices++;
7559 }
7560 }
7561
7562 else {
7563 nDevices++;
7564 // Count cards and devices
7565 card = -1;
7566 snd_card_next( &card );
7567 while ( card >= 0 ) {
7568 sprintf( name, "hw:%d", card );
7569 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7570 if ( result < 0 ) {
7571 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7572 errorText_ = errorStream_.str();
7573 return FAILURE;
7574 }
7575 subdevice = -1;
7576 while( 1 ) {
7577 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7578 if ( result < 0 ) break;
7579 if ( subdevice < 0 ) break;
7580 if ( nDevices == device ) {
7581 sprintf( name, "hw:%d,%d", card, subdevice );
7582 snd_ctl_close( chandle );
7583 goto foundDevice;
7584 }
7585 nDevices++;
7586 }
7587 snd_ctl_close( chandle );
7588 snd_card_next( &card );
7589 }
7590
7591 if ( nDevices == 0 ) {
7592 // This should not happen because a check is made before this function is called.
7593 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7594 return FAILURE;
7595 }
7596
7597 if ( device >= nDevices ) {
7598 // This should not happen because a check is made before this function is called.
7599 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7600 return FAILURE;
7601 }
7602 }
7603
7604 foundDevice:
7605
7606 // The getDeviceInfo() function will not work for a device that is
7607 // already open. Thus, we'll probe the system before opening a
7608 // stream and save the results for use by getDeviceInfo().
7609 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7610 this->saveDeviceInfo();
7611
7612 snd_pcm_stream_t stream;
7613 if ( mode == OUTPUT )
7614 stream = SND_PCM_STREAM_PLAYBACK;
7615 else
7616 stream = SND_PCM_STREAM_CAPTURE;
7617
7618 snd_pcm_t *phandle;
7619 int openMode = SND_PCM_ASYNC;
7620 result = snd_pcm_open( &phandle, name, stream, openMode );
7621 if ( result < 0 ) {
7622 if ( mode == OUTPUT )
7623 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7624 else
7625 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7626 errorText_ = errorStream_.str();
7627 return FAILURE;
7628 }
7629
7630 // Fill the parameter structure.
7631 snd_pcm_hw_params_t *hw_params;
7632 snd_pcm_hw_params_alloca( &hw_params );
7633 result = snd_pcm_hw_params_any( phandle, hw_params );
7634 if ( result < 0 ) {
7635 snd_pcm_close( phandle );
7636 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7637 errorText_ = errorStream_.str();
7638 return FAILURE;
7639 }
7640
7641 #if defined(__RTAUDIO_DEBUG__)
7642 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7643 snd_pcm_hw_params_dump( hw_params, out );
7644 #endif
7645
7646 // Set access ... check user preference.
7647 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7648 stream_.userInterleaved = false;
7649 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7650 if ( result < 0 ) {
7651 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7652 stream_.deviceInterleaved[mode] = true;
7653 }
7654 else
7655 stream_.deviceInterleaved[mode] = false;
7656 }
7657 else {
7658 stream_.userInterleaved = true;
7659 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7660 if ( result < 0 ) {
7661 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7662 stream_.deviceInterleaved[mode] = false;
7663 }
7664 else
7665 stream_.deviceInterleaved[mode] = true;
7666 }
7667
7668 if ( result < 0 ) {
7669 snd_pcm_close( phandle );
7670 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7671 errorText_ = errorStream_.str();
7672 return FAILURE;
7673 }
7674
7675 // Determine how to set the device format.
7676 stream_.userFormat = format;
7677 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7678
7679 if ( format == RTAUDIO_SINT8 )
7680 deviceFormat = SND_PCM_FORMAT_S8;
7681 else if ( format == RTAUDIO_SINT16 )
7682 deviceFormat = SND_PCM_FORMAT_S16;
7683 else if ( format == RTAUDIO_SINT24 )
7684 deviceFormat = SND_PCM_FORMAT_S24;
7685 else if ( format == RTAUDIO_SINT32 )
7686 deviceFormat = SND_PCM_FORMAT_S32;
7687 else if ( format == RTAUDIO_FLOAT32 )
7688 deviceFormat = SND_PCM_FORMAT_FLOAT;
7689 else if ( format == RTAUDIO_FLOAT64 )
7690 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7691
7692 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7693 stream_.deviceFormat[mode] = format;
7694 goto setFormat;
7695 }
7696
7697 // The user requested format is not natively supported by the device.
7698 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7699 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7700 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7701 goto setFormat;
7702 }
7703
7704 deviceFormat = SND_PCM_FORMAT_FLOAT;
7705 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7706 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7707 goto setFormat;
7708 }
7709
7710 deviceFormat = SND_PCM_FORMAT_S32;
7711 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7712 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7713 goto setFormat;
7714 }
7715
7716 deviceFormat = SND_PCM_FORMAT_S24;
7717 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7718 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7719 goto setFormat;
7720 }
7721
7722 deviceFormat = SND_PCM_FORMAT_S16;
7723 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7724 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7725 goto setFormat;
7726 }
7727
7728 deviceFormat = SND_PCM_FORMAT_S8;
7729 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7730 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7731 goto setFormat;
7732 }
7733
7734 // If we get here, no supported format was found.
7735 snd_pcm_close( phandle );
7736 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7737 errorText_ = errorStream_.str();
7738 return FAILURE;
7739
7740 setFormat:
7741 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7742 if ( result < 0 ) {
7743 snd_pcm_close( phandle );
7744 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7745 errorText_ = errorStream_.str();
7746 return FAILURE;
7747 }
7748
7749 // Determine whether byte-swaping is necessary.
7750 stream_.doByteSwap[mode] = false;
7751 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7752 result = snd_pcm_format_cpu_endian( deviceFormat );
7753 if ( result == 0 )
7754 stream_.doByteSwap[mode] = true;
7755 else if (result < 0) {
7756 snd_pcm_close( phandle );
7757 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7758 errorText_ = errorStream_.str();
7759 return FAILURE;
7760 }
7761 }
7762
7763 // Set the sample rate.
7764 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7765 if ( result < 0 ) {
7766 snd_pcm_close( phandle );
7767 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7768 errorText_ = errorStream_.str();
7769 return FAILURE;
7770 }
7771
7772 // Determine the number of channels for this device. We support a possible
7773 // minimum device channel number > than the value requested by the user.
7774 stream_.nUserChannels[mode] = channels;
7775 unsigned int value;
7776 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7777 unsigned int deviceChannels = value;
7778 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7779 snd_pcm_close( phandle );
7780 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7781 errorText_ = errorStream_.str();
7782 return FAILURE;
7783 }
7784
7785 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7786 if ( result < 0 ) {
7787 snd_pcm_close( phandle );
7788 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7789 errorText_ = errorStream_.str();
7790 return FAILURE;
7791 }
7792 deviceChannels = value;
7793 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7794 stream_.nDeviceChannels[mode] = deviceChannels;
7795
7796 // Set the device channels.
7797 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7798 if ( result < 0 ) {
7799 snd_pcm_close( phandle );
7800 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7801 errorText_ = errorStream_.str();
7802 return FAILURE;
7803 }
7804
7805 // Set the buffer (or period) size.
7806 int dir = 0;
7807 snd_pcm_uframes_t periodSize = *bufferSize;
7808 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7809 if ( result < 0 ) {
7810 snd_pcm_close( phandle );
7811 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7812 errorText_ = errorStream_.str();
7813 return FAILURE;
7814 }
7815 *bufferSize = periodSize;
7816
7817 // Set the buffer number, which in ALSA is referred to as the "period".
7818 unsigned int periods = 0;
7819 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7820 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7821 if ( periods < 2 ) periods = 4; // a fairly safe default value
7822 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7823 if ( result < 0 ) {
7824 snd_pcm_close( phandle );
7825 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7826 errorText_ = errorStream_.str();
7827 return FAILURE;
7828 }
7829
7830 // If attempting to setup a duplex stream, the bufferSize parameter
7831 // MUST be the same in both directions!
7832 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7835 errorText_ = errorStream_.str();
7836 return FAILURE;
7837 }
7838
7839 stream_.bufferSize = *bufferSize;
7840
7841 // Install the hardware configuration
7842 result = snd_pcm_hw_params( phandle, hw_params );
7843 if ( result < 0 ) {
7844 snd_pcm_close( phandle );
7845 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7846 errorText_ = errorStream_.str();
7847 return FAILURE;
7848 }
7849
7850 #if defined(__RTAUDIO_DEBUG__)
7851 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7852 snd_pcm_hw_params_dump( hw_params, out );
7853 #endif
7854
7855 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7856 snd_pcm_sw_params_t *sw_params = NULL;
7857 snd_pcm_sw_params_alloca( &sw_params );
7858 snd_pcm_sw_params_current( phandle, sw_params );
7859 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7860 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7861 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7862
7863 // The following two settings were suggested by Theo Veenker
7864 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7865 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7866
7867 // here are two options for a fix
7868 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7869 snd_pcm_uframes_t val;
7870 snd_pcm_sw_params_get_boundary( sw_params, &val );
7871 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7872
7873 result = snd_pcm_sw_params( phandle, sw_params );
7874 if ( result < 0 ) {
7875 snd_pcm_close( phandle );
7876 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7877 errorText_ = errorStream_.str();
7878 return FAILURE;
7879 }
7880
7881 #if defined(__RTAUDIO_DEBUG__)
7882 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7883 snd_pcm_sw_params_dump( sw_params, out );
7884 #endif
7885
7886 // Set flags for buffer conversion
7887 stream_.doConvertBuffer[mode] = false;
7888 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7889 stream_.doConvertBuffer[mode] = true;
7890 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7891 stream_.doConvertBuffer[mode] = true;
7892 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7893 stream_.nUserChannels[mode] > 1 )
7894 stream_.doConvertBuffer[mode] = true;
7895
7896 // Allocate the ApiHandle if necessary and then save.
7897 AlsaHandle *apiInfo = 0;
7898 if ( stream_.apiHandle == 0 ) {
7899 try {
7900 apiInfo = (AlsaHandle *) new AlsaHandle;
7901 }
7902 catch ( std::bad_alloc& ) {
7903 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7904 goto error;
7905 }
7906
7907 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7908 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7909 goto error;
7910 }
7911
7912 stream_.apiHandle = (void *) apiInfo;
7913 apiInfo->handles[0] = 0;
7914 apiInfo->handles[1] = 0;
7915 }
7916 else {
7917 apiInfo = (AlsaHandle *) stream_.apiHandle;
7918 }
7919 apiInfo->handles[mode] = phandle;
7920 phandle = 0;
7921
7922 // Allocate necessary internal buffers.
7923 unsigned long bufferBytes;
7924 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7925 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7926 if ( stream_.userBuffer[mode] == NULL ) {
7927 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7928 goto error;
7929 }
7930
7931 if ( stream_.doConvertBuffer[mode] ) {
7932
7933 bool makeBuffer = true;
7934 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7935 if ( mode == INPUT ) {
7936 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7937 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7938 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7939 }
7940 }
7941
7942 if ( makeBuffer ) {
7943 bufferBytes *= *bufferSize;
7944 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7945 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7946 if ( stream_.deviceBuffer == NULL ) {
7947 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7948 goto error;
7949 }
7950 }
7951 }
7952
7953 stream_.sampleRate = sampleRate;
7954 stream_.nBuffers = periods;
7955 stream_.device[mode] = device;
7956 stream_.state = STREAM_STOPPED;
7957
7958 // Setup the buffer conversion information structure.
7959 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7960
7961 // Setup thread if necessary.
7962 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7963 // We had already set up an output stream.
7964 stream_.mode = DUPLEX;
7965 // Link the streams if possible.
7966 apiInfo->synchronized = false;
7967 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7968 apiInfo->synchronized = true;
7969 else {
7970 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7971 error( RtAudioError::WARNING );
7972 }
7973 }
7974 else {
7975 stream_.mode = mode;
7976
7977 // Setup callback thread.
7978 stream_.callbackInfo.object = (void *) this;
7979
7980 // Set the thread attributes for joinable and realtime scheduling
7981 // priority (optional). The higher priority will only take affect
7982 // if the program is run as root or suid. Note, under Linux
7983 // processes with CAP_SYS_NICE privilege, a user can change
7984 // scheduling policy and priority (thus need not be root). See
7985 // POSIX "capabilities".
7986 pthread_attr_t attr;
7987 pthread_attr_init( &attr );
7988 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7989 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7990 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7991 stream_.callbackInfo.doRealtime = true;
7992 struct sched_param param;
7993 int priority = options->priority;
7994 int min = sched_get_priority_min( SCHED_RR );
7995 int max = sched_get_priority_max( SCHED_RR );
7996 if ( priority < min ) priority = min;
7997 else if ( priority > max ) priority = max;
7998 param.sched_priority = priority;
7999
8000 // Set the policy BEFORE the priority. Otherwise it fails.
8001 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8002 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8003 // This is definitely required. Otherwise it fails.
8004 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8005 pthread_attr_setschedparam(&attr, &param);
8006 }
8007 else
8008 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8009 #else
8010 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8011 #endif
8012
8013 stream_.callbackInfo.isRunning = true;
8014 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8015 pthread_attr_destroy( &attr );
8016 if ( result ) {
8017 // Failed. Try instead with default attributes.
8018 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8019 if ( result ) {
8020 stream_.callbackInfo.isRunning = false;
8021 errorText_ = "RtApiAlsa::error creating callback thread!";
8022 goto error;
8023 }
8024 }
8025 }
8026
8027 return SUCCESS;
8028
8029 error:
8030 if ( apiInfo ) {
8031 pthread_cond_destroy( &apiInfo->runnable_cv );
8032 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8033 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8034 delete apiInfo;
8035 stream_.apiHandle = 0;
8036 }
8037
8038 if ( phandle) snd_pcm_close( phandle );
8039
8040 for ( int i=0; i<2; i++ ) {
8041 if ( stream_.userBuffer[i] ) {
8042 free( stream_.userBuffer[i] );
8043 stream_.userBuffer[i] = 0;
8044 }
8045 }
8046
8047 if ( stream_.deviceBuffer ) {
8048 free( stream_.deviceBuffer );
8049 stream_.deviceBuffer = 0;
8050 }
8051
8052 stream_.state = STREAM_CLOSED;
8053 return FAILURE;
8054 }
8055
8056 void RtApiAlsa :: closeStream()
8057 {
8058 if ( stream_.state == STREAM_CLOSED ) {
8059 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8060 error( RtAudioError::WARNING );
8061 return;
8062 }
8063
8064 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8065 stream_.callbackInfo.isRunning = false;
8066 MUTEX_LOCK( &stream_.mutex );
8067 if ( stream_.state == STREAM_STOPPED ) {
8068 apiInfo->runnable = true;
8069 pthread_cond_signal( &apiInfo->runnable_cv );
8070 }
8071 MUTEX_UNLOCK( &stream_.mutex );
8072 pthread_join( stream_.callbackInfo.thread, NULL );
8073
8074 if ( stream_.state == STREAM_RUNNING ) {
8075 stream_.state = STREAM_STOPPED;
8076 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8077 snd_pcm_drop( apiInfo->handles[0] );
8078 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8079 snd_pcm_drop( apiInfo->handles[1] );
8080 }
8081
8082 if ( apiInfo ) {
8083 pthread_cond_destroy( &apiInfo->runnable_cv );
8084 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8085 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8086 delete apiInfo;
8087 stream_.apiHandle = 0;
8088 }
8089
8090 for ( int i=0; i<2; i++ ) {
8091 if ( stream_.userBuffer[i] ) {
8092 free( stream_.userBuffer[i] );
8093 stream_.userBuffer[i] = 0;
8094 }
8095 }
8096
8097 if ( stream_.deviceBuffer ) {
8098 free( stream_.deviceBuffer );
8099 stream_.deviceBuffer = 0;
8100 }
8101
8102 stream_.mode = UNINITIALIZED;
8103 stream_.state = STREAM_CLOSED;
8104 }
8105
8106 void RtApiAlsa :: startStream()
8107 {
8108 // This method calls snd_pcm_prepare if the device isn't already in that state.
8109
8110 verifyStream();
8111 if ( stream_.state == STREAM_RUNNING ) {
8112 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8113 error( RtAudioError::WARNING );
8114 return;
8115 }
8116
8117 MUTEX_LOCK( &stream_.mutex );
8118
8119 #if defined( HAVE_GETTIMEOFDAY )
8120 gettimeofday( &stream_.lastTickTimestamp, NULL );
8121 #endif
8122
8123 int result = 0;
8124 snd_pcm_state_t state;
8125 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8126 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8127 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8128 state = snd_pcm_state( handle[0] );
8129 if ( state != SND_PCM_STATE_PREPARED ) {
8130 result = snd_pcm_prepare( handle[0] );
8131 if ( result < 0 ) {
8132 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8133 errorText_ = errorStream_.str();
8134 goto unlock;
8135 }
8136 }
8137 }
8138
8139 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8140 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8141 state = snd_pcm_state( handle[1] );
8142 if ( state != SND_PCM_STATE_PREPARED ) {
8143 result = snd_pcm_prepare( handle[1] );
8144 if ( result < 0 ) {
8145 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8146 errorText_ = errorStream_.str();
8147 goto unlock;
8148 }
8149 }
8150 }
8151
8152 stream_.state = STREAM_RUNNING;
8153
8154 unlock:
8155 apiInfo->runnable = true;
8156 pthread_cond_signal( &apiInfo->runnable_cv );
8157 MUTEX_UNLOCK( &stream_.mutex );
8158
8159 if ( result >= 0 ) return;
8160 error( RtAudioError::SYSTEM_ERROR );
8161 }
8162
8163 void RtApiAlsa :: stopStream()
8164 {
8165 verifyStream();
8166 if ( stream_.state == STREAM_STOPPED ) {
8167 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8168 error( RtAudioError::WARNING );
8169 return;
8170 }
8171
8172 stream_.state = STREAM_STOPPED;
8173 MUTEX_LOCK( &stream_.mutex );
8174
8175 int result = 0;
8176 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8177 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8178 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8179 if ( apiInfo->synchronized )
8180 result = snd_pcm_drop( handle[0] );
8181 else
8182 result = snd_pcm_drain( handle[0] );
8183 if ( result < 0 ) {
8184 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8185 errorText_ = errorStream_.str();
8186 goto unlock;
8187 }
8188 }
8189
8190 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8191 result = snd_pcm_drop( handle[1] );
8192 if ( result < 0 ) {
8193 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8194 errorText_ = errorStream_.str();
8195 goto unlock;
8196 }
8197 }
8198
8199 unlock:
8200 apiInfo->runnable = false; // fixes high CPU usage when stopped
8201 MUTEX_UNLOCK( &stream_.mutex );
8202
8203 if ( result >= 0 ) return;
8204 error( RtAudioError::SYSTEM_ERROR );
8205 }
8206
8207 void RtApiAlsa :: abortStream()
8208 {
8209 verifyStream();
8210 if ( stream_.state == STREAM_STOPPED ) {
8211 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8212 error( RtAudioError::WARNING );
8213 return;
8214 }
8215
8216 stream_.state = STREAM_STOPPED;
8217 MUTEX_LOCK( &stream_.mutex );
8218
8219 int result = 0;
8220 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8221 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8222 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8223 result = snd_pcm_drop( handle[0] );
8224 if ( result < 0 ) {
8225 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8226 errorText_ = errorStream_.str();
8227 goto unlock;
8228 }
8229 }
8230
8231 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8232 result = snd_pcm_drop( handle[1] );
8233 if ( result < 0 ) {
8234 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8235 errorText_ = errorStream_.str();
8236 goto unlock;
8237 }
8238 }
8239
8240 unlock:
8241 apiInfo->runnable = false; // fixes high CPU usage when stopped
8242 MUTEX_UNLOCK( &stream_.mutex );
8243
8244 if ( result >= 0 ) return;
8245 error( RtAudioError::SYSTEM_ERROR );
8246 }
8247
8248 void RtApiAlsa :: callbackEvent()
8249 {
8250 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8251 if ( stream_.state == STREAM_STOPPED ) {
8252 MUTEX_LOCK( &stream_.mutex );
8253 while ( !apiInfo->runnable )
8254 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8255
8256 if ( stream_.state != STREAM_RUNNING ) {
8257 MUTEX_UNLOCK( &stream_.mutex );
8258 return;
8259 }
8260 MUTEX_UNLOCK( &stream_.mutex );
8261 }
8262
8263 if ( stream_.state == STREAM_CLOSED ) {
8264 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8265 error( RtAudioError::WARNING );
8266 return;
8267 }
8268
8269 int doStopStream = 0;
8270 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8271 double streamTime = getStreamTime();
8272 RtAudioStreamStatus status = 0;
8273 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8274 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8275 apiInfo->xrun[0] = false;
8276 }
8277 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8278 status |= RTAUDIO_INPUT_OVERFLOW;
8279 apiInfo->xrun[1] = false;
8280 }
8281 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8282 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8283
8284 if ( doStopStream == 2 ) {
8285 abortStream();
8286 return;
8287 }
8288
8289 MUTEX_LOCK( &stream_.mutex );
8290
8291 // The state might change while waiting on a mutex.
8292 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8293
8294 int result;
8295 char *buffer;
8296 int channels;
8297 snd_pcm_t **handle;
8298 snd_pcm_sframes_t frames;
8299 RtAudioFormat format;
8300 handle = (snd_pcm_t **) apiInfo->handles;
8301
8302 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8303
8304 // Setup parameters.
8305 if ( stream_.doConvertBuffer[1] ) {
8306 buffer = stream_.deviceBuffer;
8307 channels = stream_.nDeviceChannels[1];
8308 format = stream_.deviceFormat[1];
8309 }
8310 else {
8311 buffer = stream_.userBuffer[1];
8312 channels = stream_.nUserChannels[1];
8313 format = stream_.userFormat;
8314 }
8315
8316 // Read samples from device in interleaved/non-interleaved format.
8317 if ( stream_.deviceInterleaved[1] )
8318 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8319 else {
8320 void *bufs[channels];
8321 size_t offset = stream_.bufferSize * formatBytes( format );
8322 for ( int i=0; i<channels; i++ )
8323 bufs[i] = (void *) (buffer + (i * offset));
8324 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8325 }
8326
8327 if ( result < (int) stream_.bufferSize ) {
8328 // Either an error or overrun occured.
8329 if ( result == -EPIPE ) {
8330 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8331 if ( state == SND_PCM_STATE_XRUN ) {
8332 apiInfo->xrun[1] = true;
8333 result = snd_pcm_prepare( handle[1] );
8334 if ( result < 0 ) {
8335 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8336 errorText_ = errorStream_.str();
8337 }
8338 }
8339 else {
8340 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8341 errorText_ = errorStream_.str();
8342 }
8343 }
8344 else {
8345 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8346 errorText_ = errorStream_.str();
8347 }
8348 error( RtAudioError::WARNING );
8349 goto tryOutput;
8350 }
8351
8352 // Do byte swapping if necessary.
8353 if ( stream_.doByteSwap[1] )
8354 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8355
8356 // Do buffer conversion if necessary.
8357 if ( stream_.doConvertBuffer[1] )
8358 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8359
8360 // Check stream latency
8361 result = snd_pcm_delay( handle[1], &frames );
8362 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8363 }
8364
8365 tryOutput:
8366
8367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8368
8369 // Setup parameters and do buffer conversion if necessary.
8370 if ( stream_.doConvertBuffer[0] ) {
8371 buffer = stream_.deviceBuffer;
8372 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8373 channels = stream_.nDeviceChannels[0];
8374 format = stream_.deviceFormat[0];
8375 }
8376 else {
8377 buffer = stream_.userBuffer[0];
8378 channels = stream_.nUserChannels[0];
8379 format = stream_.userFormat;
8380 }
8381
8382 // Do byte swapping if necessary.
8383 if ( stream_.doByteSwap[0] )
8384 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8385
8386 // Write samples to device in interleaved/non-interleaved format.
8387 if ( stream_.deviceInterleaved[0] )
8388 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8389 else {
8390 void *bufs[channels];
8391 size_t offset = stream_.bufferSize * formatBytes( format );
8392 for ( int i=0; i<channels; i++ )
8393 bufs[i] = (void *) (buffer + (i * offset));
8394 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8395 }
8396
8397 if ( result < (int) stream_.bufferSize ) {
8398 // Either an error or underrun occured.
8399 if ( result == -EPIPE ) {
8400 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8401 if ( state == SND_PCM_STATE_XRUN ) {
8402 apiInfo->xrun[0] = true;
8403 result = snd_pcm_prepare( handle[0] );
8404 if ( result < 0 ) {
8405 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8406 errorText_ = errorStream_.str();
8407 }
8408 else
8409 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8410 }
8411 else {
8412 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8413 errorText_ = errorStream_.str();
8414 }
8415 }
8416 else {
8417 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8418 errorText_ = errorStream_.str();
8419 }
8420 error( RtAudioError::WARNING );
8421 goto unlock;
8422 }
8423
8424 // Check stream latency
8425 result = snd_pcm_delay( handle[0], &frames );
8426 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8427 }
8428
8429 unlock:
8430 MUTEX_UNLOCK( &stream_.mutex );
8431
8432 RtApi::tickStreamTime();
8433 if ( doStopStream == 1 ) this->stopStream();
8434 }
8435
8436 static void *alsaCallbackHandler( void *ptr )
8437 {
8438 CallbackInfo *info = (CallbackInfo *) ptr;
8439 RtApiAlsa *object = (RtApiAlsa *) info->object;
8440 bool *isRunning = &info->isRunning;
8441
8442 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8443 if ( info->doRealtime ) {
8444 std::cerr << "RtAudio alsa: " <<
8445 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8446 "running realtime scheduling" << std::endl;
8447 }
8448 #endif
8449
8450 while ( *isRunning == true ) {
8451 pthread_testcancel();
8452 object->callbackEvent();
8453 }
8454
8455 pthread_exit( NULL );
8456 }
8457
8458 //******************** End of __LINUX_ALSA__ *********************//
8459 #endif
8460
8461 #if defined(__LINUX_PULSE__)
8462
8463 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8464 // and Tristan Matthews.
8465
8466 #include <pulse/error.h>
8467 #include <pulse/simple.h>
8468 #include <pulse/pulseaudio.h>
8469 #include <cstdio>
8470
8471 static pa_mainloop_api *rt_pa_mainloop_api = NULL;
8472 struct PaDeviceInfo {
8473 PaDeviceInfo() : sink_index(-1), source_index(-1) {}
8474 int sink_index;
8475 int source_index;
8476 std::string sink_name;
8477 std::string source_name;
8478 RtAudio::DeviceInfo info;
8479 };
8480 static struct {
8481 std::vector<PaDeviceInfo> dev;
8482 std::string default_sink_name;
8483 std::string default_source_name;
8484 int default_rate;
8485 } rt_pa_info;
8486
8487 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8488 44100, 48000, 96000, 0};
8489
8490 struct rtaudio_pa_format_mapping_t {
8491 RtAudioFormat rtaudio_format;
8492 pa_sample_format_t pa_format;
8493 };
8494
8495 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8496 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8497 {RTAUDIO_SINT24, PA_SAMPLE_S24LE},
8498 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8499 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8500 {0, PA_SAMPLE_INVALID}};
8501
8502 struct PulseAudioHandle {
8503 pa_simple *s_play;
8504 pa_simple *s_rec;
8505 pthread_t thread;
8506 pthread_cond_t runnable_cv;
8507 bool runnable;
8508 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8509 };
8510
8511 static void rt_pa_mainloop_api_quit(int ret) {
8512 rt_pa_mainloop_api->quit(rt_pa_mainloop_api, ret);
8513 }
8514
8515 static void rt_pa_server_callback(pa_context *context, const pa_server_info *info, void *data){
8516 (void)context;
8517 (void)data;
8518 pa_sample_spec ss;
8519
8520 if (!info)
8521 rt_pa_mainloop_api_quit(1);
8522
8523 ss = info->sample_spec;
8524
8525 rt_pa_info.default_rate = ss.rate;
8526 rt_pa_info.default_sink_name = info->default_sink_name;
8527 rt_pa_info.default_source_name = info->default_source_name;
8528 rt_pa_mainloop_api_quit(0);
8529 }
8530
8531 static void rt_pa_sink_info_cb(pa_context * /*c*/, const pa_sink_info *i,
8532 int eol, void * /*userdata*/)
8533 {
8534 if (eol) return;
8535 PaDeviceInfo inf;
8536 inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8537 inf.info.probed = true;
8538 inf.info.outputChannels = i->sample_spec.channels;
8539 inf.info.preferredSampleRate = i->sample_spec.rate;
8540 inf.info.isDefaultOutput = (rt_pa_info.default_sink_name == i->name);
8541 inf.sink_index = i->index;
8542 inf.sink_name = i->name;
8543 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8544 inf.info.sampleRates.push_back( *sr );
8545 for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8546 fm->rtaudio_format; ++fm )
8547 inf.info.nativeFormats |= fm->rtaudio_format;
8548 for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8549 {
8550 /* Attempt to match up sink and source records by device description. */
8551 if (rt_pa_info.dev[i].info.name == inf.info.name) {
8552 rt_pa_info.dev[i].sink_index = inf.sink_index;
8553 rt_pa_info.dev[i].sink_name = inf.sink_name;
8554 rt_pa_info.dev[i].info.outputChannels = inf.info.outputChannels;
8555 rt_pa_info.dev[i].info.isDefaultOutput = inf.info.isDefaultOutput;
8556 /* Assume duplex channels are minimum of input and output channels. */
8557 /* Uncomment if we add support for DUPLEX
8558 if (rt_pa_info.dev[i].source_index > -1)
8559 (inf.info.outputChannels < rt_pa_info.dev[i].info.inputChannels)
8560 ? inf.info.outputChannels : rt_pa_info.dev[i].info.inputChannels;
8561 */
8562 return;
8563 }
8564 }
8565 /* try to ensure device #0 is the default */
8566 if (inf.info.isDefaultOutput)
8567 rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8568 else
8569 rt_pa_info.dev.push_back(inf);
8570 }
8571
8572 static void rt_pa_source_info_cb(pa_context * /*c*/, const pa_source_info *i,
8573 int eol, void * /*userdata*/)
8574 {
8575 if (eol) return;
8576 PaDeviceInfo inf;
8577 inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8578 inf.info.probed = true;
8579 inf.info.inputChannels = i->sample_spec.channels;
8580 inf.info.preferredSampleRate = i->sample_spec.rate;
8581 inf.info.isDefaultInput = (rt_pa_info.default_source_name == i->name);
8582 inf.source_index = i->index;
8583 inf.source_name = i->name;
8584 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8585 inf.info.sampleRates.push_back( *sr );
8586 for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8587 fm->rtaudio_format; ++fm )
8588 inf.info.nativeFormats |= fm->rtaudio_format;
8589
8590 for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8591 {
8592 /* Attempt to match up sink and source records by device description. */
8593 if (rt_pa_info.dev[i].info.name == inf.info.name) {
8594 rt_pa_info.dev[i].source_index = inf.source_index;
8595 rt_pa_info.dev[i].source_name = inf.source_name;
8596 rt_pa_info.dev[i].info.inputChannels = inf.info.inputChannels;
8597 rt_pa_info.dev[i].info.isDefaultInput = inf.info.isDefaultInput;
8598 /* Assume duplex channels are minimum of input and output channels. */
8599 /* Uncomment if we add support for DUPLEX
8600 if (rt_pa_info.dev[i].sink_index > -1) {
8601 rt_pa_info.dev[i].info.duplexChannels =
8602 (inf.info.inputChannels < rt_pa_info.dev[i].info.outputChannels)
8603 ? inf.info.inputChannels : rt_pa_info.dev[i].info.outputChannels;
8604 }
8605 */
8606 return;
8607 }
8608 }
8609 /* try to ensure device #0 is the default */
8610 if (inf.info.isDefaultInput)
8611 rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8612 else
8613 rt_pa_info.dev.push_back(inf);
8614 }
8615
8616 static void rt_pa_context_state_callback(pa_context *context, void *userdata) {
8617 (void)userdata;
8618
8619 switch (pa_context_get_state(context)) {
8620 case PA_CONTEXT_CONNECTING:
8621 case PA_CONTEXT_AUTHORIZING:
8622 case PA_CONTEXT_SETTING_NAME:
8623 break;
8624
8625 case PA_CONTEXT_READY:
8626 rt_pa_info.dev.clear();
8627 pa_context_get_server_info(context, rt_pa_server_callback, NULL);
8628 pa_context_get_sink_info_list(context, rt_pa_sink_info_cb, NULL);
8629 pa_context_get_source_info_list(context, rt_pa_source_info_cb, NULL);
8630 break;
8631
8632 case PA_CONTEXT_TERMINATED:
8633 rt_pa_mainloop_api_quit(0);
8634 break;
8635
8636 case PA_CONTEXT_FAILED:
8637 default:
8638 rt_pa_mainloop_api_quit(1);
8639 }
8640 }
8641
8642 RtApiPulse::~RtApiPulse()
8643 {
8644 if ( stream_.state != STREAM_CLOSED )
8645 closeStream();
8646 }
8647
8648 void RtApiPulse::collectDeviceInfo( void )
8649 {
8650 pa_context *context = NULL;
8651 pa_mainloop *m = NULL;
8652 char *server = NULL;
8653 int ret = 1;
8654
8655 if (!(m = pa_mainloop_new())) {
8656 errorStream_ << "RtApiPulse::DeviceInfo pa_mainloop_new() failed.";
8657 errorText_ = errorStream_.str();
8658 error( RtAudioError::WARNING );
8659 goto quit;
8660 }
8661
8662 rt_pa_mainloop_api = pa_mainloop_get_api(m);
8663
8664 if (!(context = pa_context_new_with_proplist(rt_pa_mainloop_api, NULL, NULL))) {
8665 errorStream_ << "pa_context_new() failed.";
8666 errorText_ = errorStream_.str();
8667 error( RtAudioError::WARNING );
8668 goto quit;
8669 }
8670
8671 pa_context_set_state_callback(context, rt_pa_context_state_callback, NULL);
8672
8673 if (pa_context_connect(context, server, PA_CONTEXT_NOFLAGS, NULL) < 0) {
8674 errorStream_ << "RtApiPulse::DeviceInfo pa_context_connect() failed: "
8675 << pa_strerror(pa_context_errno(context));
8676 errorText_ = errorStream_.str();
8677 error( RtAudioError::WARNING );
8678 goto quit;
8679 }
8680
8681 if (pa_mainloop_run(m, &ret) < 0) {
8682 errorStream_ << "pa_mainloop_run() failed.";
8683 errorText_ = errorStream_.str();
8684 error( RtAudioError::WARNING );
8685 goto quit;
8686 }
8687
8688 quit:
8689 if (context)
8690 pa_context_unref(context);
8691
8692 if (m) {
8693 pa_mainloop_free(m);
8694 }
8695
8696 pa_xfree(server);
8697 }
8698
8699 unsigned int RtApiPulse::getDeviceCount( void )
8700 {
8701 collectDeviceInfo();
8702 return rt_pa_info.dev.size();
8703 }
8704
8705 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
8706 {
8707 if (rt_pa_info.dev.size()==0)
8708 collectDeviceInfo();
8709 if (device < rt_pa_info.dev.size())
8710 return rt_pa_info.dev[device].info;
8711 return RtAudio::DeviceInfo();
8712 }
8713
8714 static void *pulseaudio_callback( void * user )
8715 {
8716 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8717 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8718 volatile bool *isRunning = &cbi->isRunning;
8719
8720 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8721 if (cbi->doRealtime) {
8722 std::cerr << "RtAudio pulse: " <<
8723 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8724 "running realtime scheduling" << std::endl;
8725 }
8726 #endif
8727
8728 while ( *isRunning ) {
8729 pthread_testcancel();
8730 context->callbackEvent();
8731 }
8732
8733 pthread_exit( NULL );
8734 }
8735
8736 void RtApiPulse::closeStream( void )
8737 {
8738 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8739
8740 stream_.callbackInfo.isRunning = false;
8741 if ( pah ) {
8742 MUTEX_LOCK( &stream_.mutex );
8743 if ( stream_.state == STREAM_STOPPED ) {
8744 pah->runnable = true;
8745 pthread_cond_signal( &pah->runnable_cv );
8746 }
8747 MUTEX_UNLOCK( &stream_.mutex );
8748
8749 pthread_join( pah->thread, 0 );
8750 if ( pah->s_play ) {
8751 pa_simple_flush( pah->s_play, NULL );
8752 pa_simple_free( pah->s_play );
8753 }
8754 if ( pah->s_rec )
8755 pa_simple_free( pah->s_rec );
8756
8757 pthread_cond_destroy( &pah->runnable_cv );
8758 delete pah;
8759 stream_.apiHandle = 0;
8760 }
8761
8762 if ( stream_.userBuffer[0] ) {
8763 free( stream_.userBuffer[0] );
8764 stream_.userBuffer[0] = 0;
8765 }
8766 if ( stream_.userBuffer[1] ) {
8767 free( stream_.userBuffer[1] );
8768 stream_.userBuffer[1] = 0;
8769 }
8770
8771 stream_.state = STREAM_CLOSED;
8772 stream_.mode = UNINITIALIZED;
8773 }
8774
8775 void RtApiPulse::callbackEvent( void )
8776 {
8777 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8778
8779 if ( stream_.state == STREAM_STOPPED ) {
8780 MUTEX_LOCK( &stream_.mutex );
8781 while ( !pah->runnable )
8782 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8783
8784 if ( stream_.state != STREAM_RUNNING ) {
8785 MUTEX_UNLOCK( &stream_.mutex );
8786 return;
8787 }
8788 MUTEX_UNLOCK( &stream_.mutex );
8789 }
8790
8791 if ( stream_.state == STREAM_CLOSED ) {
8792 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8793 "this shouldn't happen!";
8794 error( RtAudioError::WARNING );
8795 return;
8796 }
8797
8798 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8799 double streamTime = getStreamTime();
8800 RtAudioStreamStatus status = 0;
8801 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8802 stream_.bufferSize, streamTime, status,
8803 stream_.callbackInfo.userData );
8804
8805 if ( doStopStream == 2 ) {
8806 abortStream();
8807 return;
8808 }
8809
8810 MUTEX_LOCK( &stream_.mutex );
8811 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8812 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8813
8814 if ( stream_.state != STREAM_RUNNING )
8815 goto unlock;
8816
8817 int pa_error;
8818 size_t bytes;
8819 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8820 if ( stream_.doConvertBuffer[OUTPUT] ) {
8821 convertBuffer( stream_.deviceBuffer,
8822 stream_.userBuffer[OUTPUT],
8823 stream_.convertInfo[OUTPUT] );
8824 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8825 formatBytes( stream_.deviceFormat[OUTPUT] );
8826 } else
8827 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8828 formatBytes( stream_.userFormat );
8829
8830 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8831 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8832 pa_strerror( pa_error ) << ".";
8833 errorText_ = errorStream_.str();
8834 error( RtAudioError::WARNING );
8835 }
8836 }
8837
8838 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8839 if ( stream_.doConvertBuffer[INPUT] )
8840 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8841 formatBytes( stream_.deviceFormat[INPUT] );
8842 else
8843 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8844 formatBytes( stream_.userFormat );
8845
8846 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8847 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8848 pa_strerror( pa_error ) << ".";
8849 errorText_ = errorStream_.str();
8850 error( RtAudioError::WARNING );
8851 }
8852 if ( stream_.doConvertBuffer[INPUT] ) {
8853 convertBuffer( stream_.userBuffer[INPUT],
8854 stream_.deviceBuffer,
8855 stream_.convertInfo[INPUT] );
8856 }
8857 }
8858
8859 unlock:
8860 MUTEX_UNLOCK( &stream_.mutex );
8861 RtApi::tickStreamTime();
8862
8863 if ( doStopStream == 1 )
8864 stopStream();
8865 }
8866
8867 void RtApiPulse::startStream( void )
8868 {
8869 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8870
8871 if ( stream_.state == STREAM_CLOSED ) {
8872 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8873 error( RtAudioError::INVALID_USE );
8874 return;
8875 }
8876 if ( stream_.state == STREAM_RUNNING ) {
8877 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8878 error( RtAudioError::WARNING );
8879 return;
8880 }
8881
8882 MUTEX_LOCK( &stream_.mutex );
8883
8884 #if defined( HAVE_GETTIMEOFDAY )
8885 gettimeofday( &stream_.lastTickTimestamp, NULL );
8886 #endif
8887
8888 stream_.state = STREAM_RUNNING;
8889
8890 pah->runnable = true;
8891 pthread_cond_signal( &pah->runnable_cv );
8892 MUTEX_UNLOCK( &stream_.mutex );
8893 }
8894
8895 void RtApiPulse::stopStream( void )
8896 {
8897 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8898
8899 if ( stream_.state == STREAM_CLOSED ) {
8900 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8901 error( RtAudioError::INVALID_USE );
8902 return;
8903 }
8904 if ( stream_.state == STREAM_STOPPED ) {
8905 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8906 error( RtAudioError::WARNING );
8907 return;
8908 }
8909
8910 stream_.state = STREAM_STOPPED;
8911 MUTEX_LOCK( &stream_.mutex );
8912
8913 if ( pah ) {
8914 pah->runnable = false;
8915 if ( pah->s_play ) {
8916 int pa_error;
8917 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8918 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8919 pa_strerror( pa_error ) << ".";
8920 errorText_ = errorStream_.str();
8921 MUTEX_UNLOCK( &stream_.mutex );
8922 error( RtAudioError::SYSTEM_ERROR );
8923 return;
8924 }
8925 }
8926 }
8927
8928 stream_.state = STREAM_STOPPED;
8929 MUTEX_UNLOCK( &stream_.mutex );
8930 }
8931
8932 void RtApiPulse::abortStream( void )
8933 {
8934 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8935
8936 if ( stream_.state == STREAM_CLOSED ) {
8937 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8938 error( RtAudioError::INVALID_USE );
8939 return;
8940 }
8941 if ( stream_.state == STREAM_STOPPED ) {
8942 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8943 error( RtAudioError::WARNING );
8944 return;
8945 }
8946
8947 stream_.state = STREAM_STOPPED;
8948 MUTEX_LOCK( &stream_.mutex );
8949
8950 if ( pah ) {
8951 pah->runnable = false;
8952 if ( pah->s_play ) {
8953 int pa_error;
8954 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8955 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8956 pa_strerror( pa_error ) << ".";
8957 errorText_ = errorStream_.str();
8958 MUTEX_UNLOCK( &stream_.mutex );
8959 error( RtAudioError::SYSTEM_ERROR );
8960 return;
8961 }
8962 }
8963 }
8964
8965 stream_.state = STREAM_STOPPED;
8966 MUTEX_UNLOCK( &stream_.mutex );
8967 }
8968
8969 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8970 unsigned int channels, unsigned int firstChannel,
8971 unsigned int sampleRate, RtAudioFormat format,
8972 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8973 {
8974 PulseAudioHandle *pah = 0;
8975 unsigned long bufferBytes = 0;
8976 pa_sample_spec ss;
8977
8978 if ( device >= rt_pa_info.dev.size() ) return false;
8979 if ( firstChannel != 0 ) {
8980 errorText_ = "PulseAudio does not support channel offset mapping.";
8981 return false;
8982 }
8983
8984 /* these may be NULL for default, but we've already got the names */
8985 const char *dev_input = NULL;
8986 const char *dev_output = NULL;
8987 if (!rt_pa_info.dev[device].source_name.empty())
8988 dev_input = rt_pa_info.dev[device].source_name.c_str();
8989 if (!rt_pa_info.dev[device].sink_name.empty())
8990 dev_output = rt_pa_info.dev[device].sink_name.c_str();
8991
8992 if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels == 0) {
8993 errorText_ = "PulseAudio device does not support input.";
8994 return false;
8995 }
8996 if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels == 0) {
8997 errorText_ = "PulseAudio device does not support output.";
8998 return false;
8999 }
9000 if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels == 0) {
9001 /* Note: will always error, DUPLEX not yet supported */
9002 errorText_ = "PulseAudio device does not support duplex.";
9003 return false;
9004 }
9005
9006 if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels < channels) {
9007 errorText_ = "PulseAudio: unsupported number of input channels.";
9008 return false;
9009 }
9010
9011 if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels < channels) {
9012 errorText_ = "PulseAudio: unsupported number of output channels.";
9013 return false;
9014 }
9015
9016 if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels < channels) {
9017 /* Note: will always error, DUPLEX not yet supported */
9018 errorText_ = "PulseAudio: unsupported number of duplex channels.";
9019 return false;
9020 }
9021
9022 ss.channels = channels;
9023
9024 bool sr_found = false;
9025 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
9026 if ( sampleRate == *sr ) {
9027 sr_found = true;
9028 stream_.sampleRate = sampleRate;
9029 ss.rate = sampleRate;
9030 break;
9031 }
9032 }
9033 if ( !sr_found ) {
9034 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
9035 return false;
9036 }
9037
9038 bool sf_found = 0;
9039 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
9040 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
9041 if ( format == sf->rtaudio_format ) {
9042 sf_found = true;
9043 stream_.userFormat = sf->rtaudio_format;
9044 stream_.deviceFormat[mode] = stream_.userFormat;
9045 ss.format = sf->pa_format;
9046 break;
9047 }
9048 }
9049 if ( !sf_found ) { // Use internal data format conversion.
9050 stream_.userFormat = format;
9051 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
9052 ss.format = PA_SAMPLE_FLOAT32LE;
9053 }
9054
9055 // Set other stream parameters.
9056 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
9057 else stream_.userInterleaved = true;
9058 stream_.deviceInterleaved[mode] = true;
9059 stream_.nBuffers = options ? options->numberOfBuffers : 1;
9060 stream_.doByteSwap[mode] = false;
9061 stream_.nUserChannels[mode] = channels;
9062 stream_.nDeviceChannels[mode] = channels + firstChannel;
9063 stream_.channelOffset[mode] = 0;
9064 std::string streamName = "RtAudio";
9065
9066 // Set flags for buffer conversion.
9067 stream_.doConvertBuffer[mode] = false;
9068 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9069 stream_.doConvertBuffer[mode] = true;
9070 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9071 stream_.doConvertBuffer[mode] = true;
9072 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
9073 stream_.doConvertBuffer[mode] = true;
9074
9075 // Allocate necessary internal buffers.
9076 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9077 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9078 if ( stream_.userBuffer[mode] == NULL ) {
9079 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
9080 goto error;
9081 }
9082 stream_.bufferSize = *bufferSize;
9083
9084 if ( stream_.doConvertBuffer[mode] ) {
9085
9086 bool makeBuffer = true;
9087 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9088 if ( mode == INPUT ) {
9089 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9090 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9091 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9092 }
9093 }
9094
9095 if ( makeBuffer ) {
9096 bufferBytes *= *bufferSize;
9097 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9098 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9099 if ( stream_.deviceBuffer == NULL ) {
9100 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
9101 goto error;
9102 }
9103 }
9104 }
9105
9106 stream_.device[mode] = device;
9107
9108 // Setup the buffer conversion information structure.
9109 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9110
9111 if ( !stream_.apiHandle ) {
9112 PulseAudioHandle *pah = new PulseAudioHandle;
9113 if ( !pah ) {
9114 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
9115 goto error;
9116 }
9117
9118 stream_.apiHandle = pah;
9119 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
9120 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
9121 goto error;
9122 }
9123 }
9124 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
9125
9126 int error;
9127 if ( options && !options->streamName.empty() ) streamName = options->streamName;
9128 switch ( mode ) {
9129 pa_buffer_attr buffer_attr;
9130 case INPUT:
9131 buffer_attr.fragsize = bufferBytes;
9132 buffer_attr.maxlength = -1;
9133
9134 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD,
9135 dev_input, "Record", &ss, NULL, &buffer_attr, &error );
9136 if ( !pah->s_rec ) {
9137 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
9138 goto error;
9139 }
9140 break;
9141 case OUTPUT: {
9142 pa_buffer_attr * attr_ptr;
9143
9144 if ( options && options->numberOfBuffers > 0 ) {
9145 // pa_buffer_attr::fragsize is recording-only.
9146 // Hopefully PortAudio won't access uninitialized fields.
9147 buffer_attr.maxlength = bufferBytes * options->numberOfBuffers;
9148 buffer_attr.minreq = -1;
9149 buffer_attr.prebuf = -1;
9150 buffer_attr.tlength = -1;
9151 attr_ptr = &buffer_attr;
9152 } else {
9153 attr_ptr = nullptr;
9154 }
9155
9156 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK,
9157 dev_output, "Playback", &ss, NULL, attr_ptr, &error );
9158 if ( !pah->s_play ) {
9159 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
9160 goto error;
9161 }
9162 break;
9163 }
9164 case DUPLEX:
9165 /* Note: We could add DUPLEX by synchronizing multiple streams,
9166 but it would mean moving from Simple API to Asynchronous API:
9167 https://freedesktop.org/software/pulseaudio/doxygen/streams.html#sync_streams */
9168 errorText_ = "RtApiPulse::probeDeviceOpen: duplex not supported for PulseAudio.";
9169 goto error;
9170 default:
9171 goto error;
9172 }
9173
9174 if ( stream_.mode == UNINITIALIZED )
9175 stream_.mode = mode;
9176 else if ( stream_.mode == mode )
9177 goto error;
9178 else
9179 stream_.mode = DUPLEX;
9180
9181 if ( !stream_.callbackInfo.isRunning ) {
9182 stream_.callbackInfo.object = this;
9183
9184 stream_.state = STREAM_STOPPED;
9185 // Set the thread attributes for joinable and realtime scheduling
9186 // priority (optional). The higher priority will only take affect
9187 // if the program is run as root or suid. Note, under Linux
9188 // processes with CAP_SYS_NICE privilege, a user can change
9189 // scheduling policy and priority (thus need not be root). See
9190 // POSIX "capabilities".
9191 pthread_attr_t attr;
9192 pthread_attr_init( &attr );
9193 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9194 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9195 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9196 stream_.callbackInfo.doRealtime = true;
9197 struct sched_param param;
9198 int priority = options->priority;
9199 int min = sched_get_priority_min( SCHED_RR );
9200 int max = sched_get_priority_max( SCHED_RR );
9201 if ( priority < min ) priority = min;
9202 else if ( priority > max ) priority = max;
9203 param.sched_priority = priority;
9204
9205 // Set the policy BEFORE the priority. Otherwise it fails.
9206 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9207 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9208 // This is definitely required. Otherwise it fails.
9209 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9210 pthread_attr_setschedparam(&attr, &param);
9211 }
9212 else
9213 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9214 #else
9215 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9216 #endif
9217
9218 stream_.callbackInfo.isRunning = true;
9219 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
9220 pthread_attr_destroy(&attr);
9221 if(result != 0) {
9222 // Failed. Try instead with default attributes.
9223 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
9224 if(result != 0) {
9225 stream_.callbackInfo.isRunning = false;
9226 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9227 goto error;
9228 }
9229 }
9230 }
9231
9232 return SUCCESS;
9233
9234 error:
9235 if ( pah && stream_.callbackInfo.isRunning ) {
9236 pthread_cond_destroy( &pah->runnable_cv );
9237 delete pah;
9238 stream_.apiHandle = 0;
9239 }
9240
9241 for ( int i=0; i<2; i++ ) {
9242 if ( stream_.userBuffer[i] ) {
9243 free( stream_.userBuffer[i] );
9244 stream_.userBuffer[i] = 0;
9245 }
9246 }
9247
9248 if ( stream_.deviceBuffer ) {
9249 free( stream_.deviceBuffer );
9250 stream_.deviceBuffer = 0;
9251 }
9252
9253 stream_.state = STREAM_CLOSED;
9254 return FAILURE;
9255 }
9256
9257 //******************** End of __LINUX_PULSE__ *********************//
9258 #endif
9259
9260 #if defined(__LINUX_OSS__)
9261
9262 #include <unistd.h>
9263 #include <sys/ioctl.h>
9264 #include <unistd.h>
9265 #include <fcntl.h>
9266 #include <sys/soundcard.h>
9267 #include <errno.h>
9268 #include <math.h>
9269
9270 static void *ossCallbackHandler(void * ptr);
9271
9272 // A structure to hold various information related to the OSS API
9273 // implementation.
9274 struct OssHandle {
9275 int id[2]; // device ids
9276 bool xrun[2];
9277 bool triggered;
9278 pthread_cond_t runnable;
9279
9280 OssHandle()
9281 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9282 };
9283
9284 RtApiOss :: RtApiOss()
9285 {
9286 // Nothing to do here.
9287 }
9288
9289 RtApiOss :: ~RtApiOss()
9290 {
9291 if ( stream_.state != STREAM_CLOSED ) closeStream();
9292 }
9293
9294 unsigned int RtApiOss :: getDeviceCount( void )
9295 {
9296 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9297 if ( mixerfd == -1 ) {
9298 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9299 error( RtAudioError::WARNING );
9300 return 0;
9301 }
9302
9303 oss_sysinfo sysinfo;
9304 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9305 close( mixerfd );
9306 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9307 error( RtAudioError::WARNING );
9308 return 0;
9309 }
9310
9311 close( mixerfd );
9312 return sysinfo.numaudios;
9313 }
9314
9315 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9316 {
9317 RtAudio::DeviceInfo info;
9318 info.probed = false;
9319
9320 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9321 if ( mixerfd == -1 ) {
9322 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9323 error( RtAudioError::WARNING );
9324 return info;
9325 }
9326
9327 oss_sysinfo sysinfo;
9328 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9329 if ( result == -1 ) {
9330 close( mixerfd );
9331 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9332 error( RtAudioError::WARNING );
9333 return info;
9334 }
9335
9336 unsigned nDevices = sysinfo.numaudios;
9337 if ( nDevices == 0 ) {
9338 close( mixerfd );
9339 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9340 error( RtAudioError::INVALID_USE );
9341 return info;
9342 }
9343
9344 if ( device >= nDevices ) {
9345 close( mixerfd );
9346 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9347 error( RtAudioError::INVALID_USE );
9348 return info;
9349 }
9350
9351 oss_audioinfo ainfo;
9352 ainfo.dev = device;
9353 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9354 close( mixerfd );
9355 if ( result == -1 ) {
9356 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9357 errorText_ = errorStream_.str();
9358 error( RtAudioError::WARNING );
9359 return info;
9360 }
9361
9362 // Probe channels
9363 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9364 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9365 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9366 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9367 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9368 }
9369
9370 // Probe data formats ... do for input
9371 unsigned long mask = ainfo.iformats;
9372 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9373 info.nativeFormats |= RTAUDIO_SINT16;
9374 if ( mask & AFMT_S8 )
9375 info.nativeFormats |= RTAUDIO_SINT8;
9376 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9377 info.nativeFormats |= RTAUDIO_SINT32;
9378 #ifdef AFMT_FLOAT
9379 if ( mask & AFMT_FLOAT )
9380 info.nativeFormats |= RTAUDIO_FLOAT32;
9381 #endif
9382 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9383 info.nativeFormats |= RTAUDIO_SINT24;
9384
9385 // Check that we have at least one supported format
9386 if ( info.nativeFormats == 0 ) {
9387 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9388 errorText_ = errorStream_.str();
9389 error( RtAudioError::WARNING );
9390 return info;
9391 }
9392
9393 // Probe the supported sample rates.
9394 info.sampleRates.clear();
9395 if ( ainfo.nrates ) {
9396 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9397 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9398 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9399 info.sampleRates.push_back( SAMPLE_RATES[k] );
9400
9401 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9402 info.preferredSampleRate = SAMPLE_RATES[k];
9403
9404 break;
9405 }
9406 }
9407 }
9408 }
9409 else {
9410 // Check min and max rate values;
9411 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9412 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9413 info.sampleRates.push_back( SAMPLE_RATES[k] );
9414
9415 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9416 info.preferredSampleRate = SAMPLE_RATES[k];
9417 }
9418 }
9419 }
9420
9421 if ( info.sampleRates.size() == 0 ) {
9422 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9423 errorText_ = errorStream_.str();
9424 error( RtAudioError::WARNING );
9425 }
9426 else {
9427 info.probed = true;
9428 info.name = ainfo.name;
9429 }
9430
9431 return info;
9432 }
9433
9434
9435 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9436 unsigned int firstChannel, unsigned int sampleRate,
9437 RtAudioFormat format, unsigned int *bufferSize,
9438 RtAudio::StreamOptions *options )
9439 {
9440 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9441 if ( mixerfd == -1 ) {
9442 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9443 return FAILURE;
9444 }
9445
9446 oss_sysinfo sysinfo;
9447 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9448 if ( result == -1 ) {
9449 close( mixerfd );
9450 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9451 return FAILURE;
9452 }
9453
9454 unsigned nDevices = sysinfo.numaudios;
9455 if ( nDevices == 0 ) {
9456 // This should not happen because a check is made before this function is called.
9457 close( mixerfd );
9458 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9459 return FAILURE;
9460 }
9461
9462 if ( device >= nDevices ) {
9463 // This should not happen because a check is made before this function is called.
9464 close( mixerfd );
9465 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9466 return FAILURE;
9467 }
9468
9469 oss_audioinfo ainfo;
9470 ainfo.dev = device;
9471 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9472 close( mixerfd );
9473 if ( result == -1 ) {
9474 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9475 errorText_ = errorStream_.str();
9476 return FAILURE;
9477 }
9478
9479 // Check if device supports input or output
9480 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9481 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9482 if ( mode == OUTPUT )
9483 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9484 else
9485 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9486 errorText_ = errorStream_.str();
9487 return FAILURE;
9488 }
9489
9490 int flags = 0;
9491 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9492 if ( mode == OUTPUT )
9493 flags |= O_WRONLY;
9494 else { // mode == INPUT
9495 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9496 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9497 close( handle->id[0] );
9498 handle->id[0] = 0;
9499 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9500 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9501 errorText_ = errorStream_.str();
9502 return FAILURE;
9503 }
9504 // Check that the number previously set channels is the same.
9505 if ( stream_.nUserChannels[0] != channels ) {
9506 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9507 errorText_ = errorStream_.str();
9508 return FAILURE;
9509 }
9510 flags |= O_RDWR;
9511 }
9512 else
9513 flags |= O_RDONLY;
9514 }
9515
9516 // Set exclusive access if specified.
9517 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9518
9519 // Try to open the device.
9520 int fd;
9521 fd = open( ainfo.devnode, flags, 0 );
9522 if ( fd == -1 ) {
9523 if ( errno == EBUSY )
9524 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9525 else
9526 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9527 errorText_ = errorStream_.str();
9528 return FAILURE;
9529 }
9530
9531 // For duplex operation, specifically set this mode (this doesn't seem to work).
9532 /*
9533 if ( flags | O_RDWR ) {
9534 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9535 if ( result == -1) {
9536 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9537 errorText_ = errorStream_.str();
9538 return FAILURE;
9539 }
9540 }
9541 */
9542
9543 // Check the device channel support.
9544 stream_.nUserChannels[mode] = channels;
9545 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9546 close( fd );
9547 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9548 errorText_ = errorStream_.str();
9549 return FAILURE;
9550 }
9551
9552 // Set the number of channels.
9553 int deviceChannels = channels + firstChannel;
9554 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9555 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9556 close( fd );
9557 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9558 errorText_ = errorStream_.str();
9559 return FAILURE;
9560 }
9561 stream_.nDeviceChannels[mode] = deviceChannels;
9562
9563 // Get the data format mask
9564 int mask;
9565 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9566 if ( result == -1 ) {
9567 close( fd );
9568 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9569 errorText_ = errorStream_.str();
9570 return FAILURE;
9571 }
9572
9573 // Determine how to set the device format.
9574 stream_.userFormat = format;
9575 int deviceFormat = -1;
9576 stream_.doByteSwap[mode] = false;
9577 if ( format == RTAUDIO_SINT8 ) {
9578 if ( mask & AFMT_S8 ) {
9579 deviceFormat = AFMT_S8;
9580 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9581 }
9582 }
9583 else if ( format == RTAUDIO_SINT16 ) {
9584 if ( mask & AFMT_S16_NE ) {
9585 deviceFormat = AFMT_S16_NE;
9586 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9587 }
9588 else if ( mask & AFMT_S16_OE ) {
9589 deviceFormat = AFMT_S16_OE;
9590 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9591 stream_.doByteSwap[mode] = true;
9592 }
9593 }
9594 else if ( format == RTAUDIO_SINT24 ) {
9595 if ( mask & AFMT_S24_NE ) {
9596 deviceFormat = AFMT_S24_NE;
9597 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9598 }
9599 else if ( mask & AFMT_S24_OE ) {
9600 deviceFormat = AFMT_S24_OE;
9601 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9602 stream_.doByteSwap[mode] = true;
9603 }
9604 }
9605 else if ( format == RTAUDIO_SINT32 ) {
9606 if ( mask & AFMT_S32_NE ) {
9607 deviceFormat = AFMT_S32_NE;
9608 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9609 }
9610 else if ( mask & AFMT_S32_OE ) {
9611 deviceFormat = AFMT_S32_OE;
9612 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9613 stream_.doByteSwap[mode] = true;
9614 }
9615 }
9616
9617 if ( deviceFormat == -1 ) {
9618 // The user requested format is not natively supported by the device.
9619 if ( mask & AFMT_S16_NE ) {
9620 deviceFormat = AFMT_S16_NE;
9621 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9622 }
9623 else if ( mask & AFMT_S32_NE ) {
9624 deviceFormat = AFMT_S32_NE;
9625 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9626 }
9627 else if ( mask & AFMT_S24_NE ) {
9628 deviceFormat = AFMT_S24_NE;
9629 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9630 }
9631 else if ( mask & AFMT_S16_OE ) {
9632 deviceFormat = AFMT_S16_OE;
9633 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9634 stream_.doByteSwap[mode] = true;
9635 }
9636 else if ( mask & AFMT_S32_OE ) {
9637 deviceFormat = AFMT_S32_OE;
9638 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9639 stream_.doByteSwap[mode] = true;
9640 }
9641 else if ( mask & AFMT_S24_OE ) {
9642 deviceFormat = AFMT_S24_OE;
9643 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9644 stream_.doByteSwap[mode] = true;
9645 }
9646 else if ( mask & AFMT_S8) {
9647 deviceFormat = AFMT_S8;
9648 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9649 }
9650 }
9651
9652 if ( stream_.deviceFormat[mode] == 0 ) {
9653 // This really shouldn't happen ...
9654 close( fd );
9655 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9656 errorText_ = errorStream_.str();
9657 return FAILURE;
9658 }
9659
9660 // Set the data format.
9661 int temp = deviceFormat;
9662 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9663 if ( result == -1 || deviceFormat != temp ) {
9664 close( fd );
9665 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9666 errorText_ = errorStream_.str();
9667 return FAILURE;
9668 }
9669
9670 // Attempt to set the buffer size. According to OSS, the minimum
9671 // number of buffers is two. The supposed minimum buffer size is 16
9672 // bytes, so that will be our lower bound. The argument to this
9673 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9674 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9675 // We'll check the actual value used near the end of the setup
9676 // procedure.
9677 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9678 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9679 int buffers = 0;
9680 if ( options ) buffers = options->numberOfBuffers;
9681 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9682 if ( buffers < 2 ) buffers = 3;
9683 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9684 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9685 if ( result == -1 ) {
9686 close( fd );
9687 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9688 errorText_ = errorStream_.str();
9689 return FAILURE;
9690 }
9691 stream_.nBuffers = buffers;
9692
9693 // Save buffer size (in sample frames).
9694 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9695 stream_.bufferSize = *bufferSize;
9696
9697 // Set the sample rate.
9698 int srate = sampleRate;
9699 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9700 if ( result == -1 ) {
9701 close( fd );
9702 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9703 errorText_ = errorStream_.str();
9704 return FAILURE;
9705 }
9706
9707 // Verify the sample rate setup worked.
9708 if ( abs( srate - (int)sampleRate ) > 100 ) {
9709 close( fd );
9710 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9711 errorText_ = errorStream_.str();
9712 return FAILURE;
9713 }
9714 stream_.sampleRate = sampleRate;
9715
9716 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9717 // We're doing duplex setup here.
9718 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9719 stream_.nDeviceChannels[0] = deviceChannels;
9720 }
9721
9722 // Set interleaving parameters.
9723 stream_.userInterleaved = true;
9724 stream_.deviceInterleaved[mode] = true;
9725 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9726 stream_.userInterleaved = false;
9727
9728 // Set flags for buffer conversion
9729 stream_.doConvertBuffer[mode] = false;
9730 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9731 stream_.doConvertBuffer[mode] = true;
9732 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9733 stream_.doConvertBuffer[mode] = true;
9734 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9735 stream_.nUserChannels[mode] > 1 )
9736 stream_.doConvertBuffer[mode] = true;
9737
9738 // Allocate the stream handles if necessary and then save.
9739 if ( stream_.apiHandle == 0 ) {
9740 try {
9741 handle = new OssHandle;
9742 }
9743 catch ( std::bad_alloc& ) {
9744 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9745 goto error;
9746 }
9747
9748 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9749 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9750 goto error;
9751 }
9752
9753 stream_.apiHandle = (void *) handle;
9754 }
9755 else {
9756 handle = (OssHandle *) stream_.apiHandle;
9757 }
9758 handle->id[mode] = fd;
9759
9760 // Allocate necessary internal buffers.
9761 unsigned long bufferBytes;
9762 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9763 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9764 if ( stream_.userBuffer[mode] == NULL ) {
9765 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9766 goto error;
9767 }
9768
9769 if ( stream_.doConvertBuffer[mode] ) {
9770
9771 bool makeBuffer = true;
9772 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9773 if ( mode == INPUT ) {
9774 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9775 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9776 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9777 }
9778 }
9779
9780 if ( makeBuffer ) {
9781 bufferBytes *= *bufferSize;
9782 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9783 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9784 if ( stream_.deviceBuffer == NULL ) {
9785 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9786 goto error;
9787 }
9788 }
9789 }
9790
9791 stream_.device[mode] = device;
9792 stream_.state = STREAM_STOPPED;
9793
9794 // Setup the buffer conversion information structure.
9795 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9796
9797 // Setup thread if necessary.
9798 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9799 // We had already set up an output stream.
9800 stream_.mode = DUPLEX;
9801 if ( stream_.device[0] == device ) handle->id[0] = fd;
9802 }
9803 else {
9804 stream_.mode = mode;
9805
9806 // Setup callback thread.
9807 stream_.callbackInfo.object = (void *) this;
9808
9809 // Set the thread attributes for joinable and realtime scheduling
9810 // priority. The higher priority will only take affect if the
9811 // program is run as root or suid.
9812 pthread_attr_t attr;
9813 pthread_attr_init( &attr );
9814 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9815 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9816 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9817 stream_.callbackInfo.doRealtime = true;
9818 struct sched_param param;
9819 int priority = options->priority;
9820 int min = sched_get_priority_min( SCHED_RR );
9821 int max = sched_get_priority_max( SCHED_RR );
9822 if ( priority < min ) priority = min;
9823 else if ( priority > max ) priority = max;
9824 param.sched_priority = priority;
9825
9826 // Set the policy BEFORE the priority. Otherwise it fails.
9827 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9828 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9829 // This is definitely required. Otherwise it fails.
9830 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9831 pthread_attr_setschedparam(&attr, &param);
9832 }
9833 else
9834 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9835 #else
9836 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9837 #endif
9838
9839 stream_.callbackInfo.isRunning = true;
9840 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9841 pthread_attr_destroy( &attr );
9842 if ( result ) {
9843 // Failed. Try instead with default attributes.
9844 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9845 if ( result ) {
9846 stream_.callbackInfo.isRunning = false;
9847 errorText_ = "RtApiOss::error creating callback thread!";
9848 goto error;
9849 }
9850 }
9851 }
9852
9853 return SUCCESS;
9854
9855 error:
9856 if ( handle ) {
9857 pthread_cond_destroy( &handle->runnable );
9858 if ( handle->id[0] ) close( handle->id[0] );
9859 if ( handle->id[1] ) close( handle->id[1] );
9860 delete handle;
9861 stream_.apiHandle = 0;
9862 }
9863
9864 for ( int i=0; i<2; i++ ) {
9865 if ( stream_.userBuffer[i] ) {
9866 free( stream_.userBuffer[i] );
9867 stream_.userBuffer[i] = 0;
9868 }
9869 }
9870
9871 if ( stream_.deviceBuffer ) {
9872 free( stream_.deviceBuffer );
9873 stream_.deviceBuffer = 0;
9874 }
9875
9876 stream_.state = STREAM_CLOSED;
9877 return FAILURE;
9878 }
9879
9880 void RtApiOss :: closeStream()
9881 {
9882 if ( stream_.state == STREAM_CLOSED ) {
9883 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9884 error( RtAudioError::WARNING );
9885 return;
9886 }
9887
9888 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9889 stream_.callbackInfo.isRunning = false;
9890 MUTEX_LOCK( &stream_.mutex );
9891 if ( stream_.state == STREAM_STOPPED )
9892 pthread_cond_signal( &handle->runnable );
9893 MUTEX_UNLOCK( &stream_.mutex );
9894 pthread_join( stream_.callbackInfo.thread, NULL );
9895
9896 if ( stream_.state == STREAM_RUNNING ) {
9897 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9898 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9899 else
9900 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9901 stream_.state = STREAM_STOPPED;
9902 }
9903
9904 if ( handle ) {
9905 pthread_cond_destroy( &handle->runnable );
9906 if ( handle->id[0] ) close( handle->id[0] );
9907 if ( handle->id[1] ) close( handle->id[1] );
9908 delete handle;
9909 stream_.apiHandle = 0;
9910 }
9911
9912 for ( int i=0; i<2; i++ ) {
9913 if ( stream_.userBuffer[i] ) {
9914 free( stream_.userBuffer[i] );
9915 stream_.userBuffer[i] = 0;
9916 }
9917 }
9918
9919 if ( stream_.deviceBuffer ) {
9920 free( stream_.deviceBuffer );
9921 stream_.deviceBuffer = 0;
9922 }
9923
9924 stream_.mode = UNINITIALIZED;
9925 stream_.state = STREAM_CLOSED;
9926 }
9927
9928 void RtApiOss :: startStream()
9929 {
9930 verifyStream();
9931 if ( stream_.state == STREAM_RUNNING ) {
9932 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9933 error( RtAudioError::WARNING );
9934 return;
9935 }
9936
9937 MUTEX_LOCK( &stream_.mutex );
9938
9939 #if defined( HAVE_GETTIMEOFDAY )
9940 gettimeofday( &stream_.lastTickTimestamp, NULL );
9941 #endif
9942
9943 stream_.state = STREAM_RUNNING;
9944
9945 // No need to do anything else here ... OSS automatically starts
9946 // when fed samples.
9947
9948 MUTEX_UNLOCK( &stream_.mutex );
9949
9950 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9951 pthread_cond_signal( &handle->runnable );
9952 }
9953
9954 void RtApiOss :: stopStream()
9955 {
9956 verifyStream();
9957 if ( stream_.state == STREAM_STOPPED ) {
9958 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9959 error( RtAudioError::WARNING );
9960 return;
9961 }
9962
9963 MUTEX_LOCK( &stream_.mutex );
9964
9965 // The state might change while waiting on a mutex.
9966 if ( stream_.state == STREAM_STOPPED ) {
9967 MUTEX_UNLOCK( &stream_.mutex );
9968 return;
9969 }
9970
9971 int result = 0;
9972 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9973 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9974
9975 // Flush the output with zeros a few times.
9976 char *buffer;
9977 int samples;
9978 RtAudioFormat format;
9979
9980 if ( stream_.doConvertBuffer[0] ) {
9981 buffer = stream_.deviceBuffer;
9982 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9983 format = stream_.deviceFormat[0];
9984 }
9985 else {
9986 buffer = stream_.userBuffer[0];
9987 samples = stream_.bufferSize * stream_.nUserChannels[0];
9988 format = stream_.userFormat;
9989 }
9990
9991 memset( buffer, 0, samples * formatBytes(format) );
9992 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9993 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9994 if ( result == -1 ) {
9995 errorText_ = "RtApiOss::stopStream: audio write error.";
9996 error( RtAudioError::WARNING );
9997 }
9998 }
9999
10000 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10001 if ( result == -1 ) {
10002 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10003 errorText_ = errorStream_.str();
10004 goto unlock;
10005 }
10006 handle->triggered = false;
10007 }
10008
10009 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10010 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10011 if ( result == -1 ) {
10012 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10013 errorText_ = errorStream_.str();
10014 goto unlock;
10015 }
10016 }
10017
10018 unlock:
10019 stream_.state = STREAM_STOPPED;
10020 MUTEX_UNLOCK( &stream_.mutex );
10021
10022 if ( result != -1 ) return;
10023 error( RtAudioError::SYSTEM_ERROR );
10024 }
10025
10026 void RtApiOss :: abortStream()
10027 {
10028 verifyStream();
10029 if ( stream_.state == STREAM_STOPPED ) {
10030 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
10031 error( RtAudioError::WARNING );
10032 return;
10033 }
10034
10035 MUTEX_LOCK( &stream_.mutex );
10036
10037 // The state might change while waiting on a mutex.
10038 if ( stream_.state == STREAM_STOPPED ) {
10039 MUTEX_UNLOCK( &stream_.mutex );
10040 return;
10041 }
10042
10043 int result = 0;
10044 OssHandle *handle = (OssHandle *) stream_.apiHandle;
10045 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10046 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10047 if ( result == -1 ) {
10048 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10049 errorText_ = errorStream_.str();
10050 goto unlock;
10051 }
10052 handle->triggered = false;
10053 }
10054
10055 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10056 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10057 if ( result == -1 ) {
10058 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10059 errorText_ = errorStream_.str();
10060 goto unlock;
10061 }
10062 }
10063
10064 unlock:
10065 stream_.state = STREAM_STOPPED;
10066 MUTEX_UNLOCK( &stream_.mutex );
10067
10068 if ( result != -1 ) return;
10069 error( RtAudioError::SYSTEM_ERROR );
10070 }
10071
10072 void RtApiOss :: callbackEvent()
10073 {
10074 OssHandle *handle = (OssHandle *) stream_.apiHandle;
10075 if ( stream_.state == STREAM_STOPPED ) {
10076 MUTEX_LOCK( &stream_.mutex );
10077 pthread_cond_wait( &handle->runnable, &stream_.mutex );
10078 if ( stream_.state != STREAM_RUNNING ) {
10079 MUTEX_UNLOCK( &stream_.mutex );
10080 return;
10081 }
10082 MUTEX_UNLOCK( &stream_.mutex );
10083 }
10084
10085 if ( stream_.state == STREAM_CLOSED ) {
10086 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
10087 error( RtAudioError::WARNING );
10088 return;
10089 }
10090
10091 // Invoke user callback to get fresh output data.
10092 int doStopStream = 0;
10093 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
10094 double streamTime = getStreamTime();
10095 RtAudioStreamStatus status = 0;
10096 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
10097 status |= RTAUDIO_OUTPUT_UNDERFLOW;
10098 handle->xrun[0] = false;
10099 }
10100 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
10101 status |= RTAUDIO_INPUT_OVERFLOW;
10102 handle->xrun[1] = false;
10103 }
10104 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
10105 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
10106 if ( doStopStream == 2 ) {
10107 this->abortStream();
10108 return;
10109 }
10110
10111 MUTEX_LOCK( &stream_.mutex );
10112
10113 // The state might change while waiting on a mutex.
10114 if ( stream_.state == STREAM_STOPPED ) goto unlock;
10115
10116 int result;
10117 char *buffer;
10118 int samples;
10119 RtAudioFormat format;
10120
10121 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10122
10123 // Setup parameters and do buffer conversion if necessary.
10124 if ( stream_.doConvertBuffer[0] ) {
10125 buffer = stream_.deviceBuffer;
10126 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
10127 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10128 format = stream_.deviceFormat[0];
10129 }
10130 else {
10131 buffer = stream_.userBuffer[0];
10132 samples = stream_.bufferSize * stream_.nUserChannels[0];
10133 format = stream_.userFormat;
10134 }
10135
10136 // Do byte swapping if necessary.
10137 if ( stream_.doByteSwap[0] )
10138 byteSwapBuffer( buffer, samples, format );
10139
10140 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
10141 int trig = 0;
10142 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10143 result = write( handle->id[0], buffer, samples * formatBytes(format) );
10144 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
10145 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10146 handle->triggered = true;
10147 }
10148 else
10149 // Write samples to device.
10150 result = write( handle->id[0], buffer, samples * formatBytes(format) );
10151
10152 if ( result == -1 ) {
10153 // We'll assume this is an underrun, though there isn't a
10154 // specific means for determining that.
10155 handle->xrun[0] = true;
10156 errorText_ = "RtApiOss::callbackEvent: audio write error.";
10157 error( RtAudioError::WARNING );
10158 // Continue on to input section.
10159 }
10160 }
10161
10162 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
10163
10164 // Setup parameters.
10165 if ( stream_.doConvertBuffer[1] ) {
10166 buffer = stream_.deviceBuffer;
10167 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
10168 format = stream_.deviceFormat[1];
10169 }
10170 else {
10171 buffer = stream_.userBuffer[1];
10172 samples = stream_.bufferSize * stream_.nUserChannels[1];
10173 format = stream_.userFormat;
10174 }
10175
10176 // Read samples from device.
10177 result = read( handle->id[1], buffer, samples * formatBytes(format) );
10178
10179 if ( result == -1 ) {
10180 // We'll assume this is an overrun, though there isn't a
10181 // specific means for determining that.
10182 handle->xrun[1] = true;
10183 errorText_ = "RtApiOss::callbackEvent: audio read error.";
10184 error( RtAudioError::WARNING );
10185 goto unlock;
10186 }
10187
10188 // Do byte swapping if necessary.
10189 if ( stream_.doByteSwap[1] )
10190 byteSwapBuffer( buffer, samples, format );
10191
10192 // Do buffer conversion if necessary.
10193 if ( stream_.doConvertBuffer[1] )
10194 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
10195 }
10196
10197 unlock:
10198 MUTEX_UNLOCK( &stream_.mutex );
10199
10200 RtApi::tickStreamTime();
10201 if ( doStopStream == 1 ) this->stopStream();
10202 }
10203
10204 static void *ossCallbackHandler( void *ptr )
10205 {
10206 CallbackInfo *info = (CallbackInfo *) ptr;
10207 RtApiOss *object = (RtApiOss *) info->object;
10208 bool *isRunning = &info->isRunning;
10209
10210 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
10211 if (info->doRealtime) {
10212 std::cerr << "RtAudio oss: " <<
10213 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
10214 "running realtime scheduling" << std::endl;
10215 }
10216 #endif
10217
10218 while ( *isRunning == true ) {
10219 pthread_testcancel();
10220 object->callbackEvent();
10221 }
10222
10223 pthread_exit( NULL );
10224 }
10225
10226 //******************** End of __LINUX_OSS__ *********************//
10227 #endif
10228
10229
10230 // *************************************************** //
10231 //
10232 // Protected common (OS-independent) RtAudio methods.
10233 //
10234 // *************************************************** //
10235
10236 // This method can be modified to control the behavior of error
10237 // message printing.
10238 void RtApi :: error( RtAudioError::Type type )
10239 {
10240 errorStream_.str(""); // clear the ostringstream
10241
10242 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
10243 if ( errorCallback ) {
10244 // abortStream() can generate new error messages. Ignore them. Just keep original one.
10245
10246 if ( firstErrorOccurred_ )
10247 return;
10248
10249 firstErrorOccurred_ = true;
10250 const std::string errorMessage = errorText_;
10251
10252 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
10253 stream_.callbackInfo.isRunning = false; // exit from the thread
10254 abortStream();
10255 }
10256
10257 errorCallback( type, errorMessage );
10258 firstErrorOccurred_ = false;
10259 return;
10260 }
10261
10262 if ( type == RtAudioError::WARNING && showWarnings_ == true )
10263 std::cerr << '\n' << errorText_ << "\n\n";
10264 else if ( type != RtAudioError::WARNING )
10265 throw( RtAudioError( errorText_, type ) );
10266 }
10267
10268 void RtApi :: verifyStream()
10269 {
10270 if ( stream_.state == STREAM_CLOSED ) {
10271 errorText_ = "RtApi:: a stream is not open!";
10272 error( RtAudioError::INVALID_USE );
10273 }
10274 }
10275
10276 void RtApi :: clearStreamInfo()
10277 {
10278 stream_.mode = UNINITIALIZED;
10279 stream_.state = STREAM_CLOSED;
10280 stream_.sampleRate = 0;
10281 stream_.bufferSize = 0;
10282 stream_.nBuffers = 0;
10283 stream_.userFormat = 0;
10284 stream_.userInterleaved = true;
10285 stream_.streamTime = 0.0;
10286 stream_.apiHandle = 0;
10287 stream_.deviceBuffer = 0;
10288 stream_.callbackInfo.callback = 0;
10289 stream_.callbackInfo.userData = 0;
10290 stream_.callbackInfo.isRunning = false;
10291 stream_.callbackInfo.errorCallback = 0;
10292 for ( int i=0; i<2; i++ ) {
10293 stream_.device[i] = 11111;
10294 stream_.doConvertBuffer[i] = false;
10295 stream_.deviceInterleaved[i] = true;
10296 stream_.doByteSwap[i] = false;
10297 stream_.nUserChannels[i] = 0;
10298 stream_.nDeviceChannels[i] = 0;
10299 stream_.channelOffset[i] = 0;
10300 stream_.deviceFormat[i] = 0;
10301 stream_.latency[i] = 0;
10302 stream_.userBuffer[i] = 0;
10303 stream_.convertInfo[i].channels = 0;
10304 stream_.convertInfo[i].inJump = 0;
10305 stream_.convertInfo[i].outJump = 0;
10306 stream_.convertInfo[i].inFormat = 0;
10307 stream_.convertInfo[i].outFormat = 0;
10308 stream_.convertInfo[i].inOffset.clear();
10309 stream_.convertInfo[i].outOffset.clear();
10310 }
10311 }
10312
10313 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10314 {
10315 if ( format == RTAUDIO_SINT16 )
10316 return 2;
10317 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10318 return 4;
10319 else if ( format == RTAUDIO_FLOAT64 )
10320 return 8;
10321 else if ( format == RTAUDIO_SINT24 )
10322 return 3;
10323 else if ( format == RTAUDIO_SINT8 )
10324 return 1;
10325
10326 errorText_ = "RtApi::formatBytes: undefined format.";
10327 error( RtAudioError::WARNING );
10328
10329 return 0;
10330 }
10331
10332 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10333 {
10334 if ( mode == INPUT ) { // convert device to user buffer
10335 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10336 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10337 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10338 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10339 }
10340 else { // convert user to device buffer
10341 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10342 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10343 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10344 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10345 }
10346
10347 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10348 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10349 else
10350 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10351
10352 // Set up the interleave/deinterleave offsets.
10353 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10354 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10355 ( mode == INPUT && stream_.userInterleaved ) ) {
10356 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10357 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10358 stream_.convertInfo[mode].outOffset.push_back( k );
10359 stream_.convertInfo[mode].inJump = 1;
10360 }
10361 }
10362 else {
10363 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10364 stream_.convertInfo[mode].inOffset.push_back( k );
10365 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10366 stream_.convertInfo[mode].outJump = 1;
10367 }
10368 }
10369 }
10370 else { // no (de)interleaving
10371 if ( stream_.userInterleaved ) {
10372 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10373 stream_.convertInfo[mode].inOffset.push_back( k );
10374 stream_.convertInfo[mode].outOffset.push_back( k );
10375 }
10376 }
10377 else {
10378 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10379 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10380 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10381 stream_.convertInfo[mode].inJump = 1;
10382 stream_.convertInfo[mode].outJump = 1;
10383 }
10384 }
10385 }
10386
10387 // Add channel offset.
10388 if ( firstChannel > 0 ) {
10389 if ( stream_.deviceInterleaved[mode] ) {
10390 if ( mode == OUTPUT ) {
10391 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10392 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10393 }
10394 else {
10395 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10396 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10397 }
10398 }
10399 else {
10400 if ( mode == OUTPUT ) {
10401 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10402 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10403 }
10404 else {
10405 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10406 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10407 }
10408 }
10409 }
10410 }
10411
10412 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10413 {
10414 // This function does format conversion, input/output channel compensation, and
10415 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10416 // the lower three bytes of a 32-bit integer.
10417
10418 // Clear our duplex device output buffer if there are more device outputs than user outputs
10419 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX && info.outJump > info.inJump )
10420 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10421
10422 int j;
10423 if (info.outFormat == RTAUDIO_FLOAT64) {
10424 Float64 *out = (Float64 *)outBuffer;
10425
10426 if (info.inFormat == RTAUDIO_SINT8) {
10427 signed char *in = (signed char *)inBuffer;
10428 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10429 for (j=0; j<info.channels; j++) {
10430 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 128.0;
10431 }
10432 in += info.inJump;
10433 out += info.outJump;
10434 }
10435 }
10436 else if (info.inFormat == RTAUDIO_SINT16) {
10437 Int16 *in = (Int16 *)inBuffer;
10438 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10439 for (j=0; j<info.channels; j++) {
10440 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 32768.0;
10441 }
10442 in += info.inJump;
10443 out += info.outJump;
10444 }
10445 }
10446 else if (info.inFormat == RTAUDIO_SINT24) {
10447 Int24 *in = (Int24 *)inBuffer;
10448 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10449 for (j=0; j<info.channels; j++) {
10450 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]].asInt() / 8388608.0;
10451 }
10452 in += info.inJump;
10453 out += info.outJump;
10454 }
10455 }
10456 else if (info.inFormat == RTAUDIO_SINT32) {
10457 Int32 *in = (Int32 *)inBuffer;
10458 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10459 for (j=0; j<info.channels; j++) {
10460 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 2147483648.0;
10461 }
10462 in += info.inJump;
10463 out += info.outJump;
10464 }
10465 }
10466 else if (info.inFormat == RTAUDIO_FLOAT32) {
10467 Float32 *in = (Float32 *)inBuffer;
10468 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10469 for (j=0; j<info.channels; j++) {
10470 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10471 }
10472 in += info.inJump;
10473 out += info.outJump;
10474 }
10475 }
10476 else if (info.inFormat == RTAUDIO_FLOAT64) {
10477 // Channel compensation and/or (de)interleaving only.
10478 Float64 *in = (Float64 *)inBuffer;
10479 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10480 for (j=0; j<info.channels; j++) {
10481 out[info.outOffset[j]] = in[info.inOffset[j]];
10482 }
10483 in += info.inJump;
10484 out += info.outJump;
10485 }
10486 }
10487 }
10488 else if (info.outFormat == RTAUDIO_FLOAT32) {
10489 Float32 *out = (Float32 *)outBuffer;
10490
10491 if (info.inFormat == RTAUDIO_SINT8) {
10492 signed char *in = (signed char *)inBuffer;
10493 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10494 for (j=0; j<info.channels; j++) {
10495 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 128.f;
10496 }
10497 in += info.inJump;
10498 out += info.outJump;
10499 }
10500 }
10501 else if (info.inFormat == RTAUDIO_SINT16) {
10502 Int16 *in = (Int16 *)inBuffer;
10503 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10504 for (j=0; j<info.channels; j++) {
10505 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 32768.f;
10506 }
10507 in += info.inJump;
10508 out += info.outJump;
10509 }
10510 }
10511 else if (info.inFormat == RTAUDIO_SINT24) {
10512 Int24 *in = (Int24 *)inBuffer;
10513 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10514 for (j=0; j<info.channels; j++) {
10515 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]].asInt() / 8388608.f;
10516 }
10517 in += info.inJump;
10518 out += info.outJump;
10519 }
10520 }
10521 else if (info.inFormat == RTAUDIO_SINT32) {
10522 Int32 *in = (Int32 *)inBuffer;
10523 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10524 for (j=0; j<info.channels; j++) {
10525 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 2147483648.f;
10526 }
10527 in += info.inJump;
10528 out += info.outJump;
10529 }
10530 }
10531 else if (info.inFormat == RTAUDIO_FLOAT32) {
10532 // Channel compensation and/or (de)interleaving only.
10533 Float32 *in = (Float32 *)inBuffer;
10534 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10535 for (j=0; j<info.channels; j++) {
10536 out[info.outOffset[j]] = in[info.inOffset[j]];
10537 }
10538 in += info.inJump;
10539 out += info.outJump;
10540 }
10541 }
10542 else if (info.inFormat == RTAUDIO_FLOAT64) {
10543 Float64 *in = (Float64 *)inBuffer;
10544 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10545 for (j=0; j<info.channels; j++) {
10546 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10547 }
10548 in += info.inJump;
10549 out += info.outJump;
10550 }
10551 }
10552 }
10553 else if (info.outFormat == RTAUDIO_SINT32) {
10554 Int32 *out = (Int32 *)outBuffer;
10555 if (info.inFormat == RTAUDIO_SINT8) {
10556 signed char *in = (signed char *)inBuffer;
10557 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10558 for (j=0; j<info.channels; j++) {
10559 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10560 out[info.outOffset[j]] <<= 24;
10561 }
10562 in += info.inJump;
10563 out += info.outJump;
10564 }
10565 }
10566 else if (info.inFormat == RTAUDIO_SINT16) {
10567 Int16 *in = (Int16 *)inBuffer;
10568 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10569 for (j=0; j<info.channels; j++) {
10570 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10571 out[info.outOffset[j]] <<= 16;
10572 }
10573 in += info.inJump;
10574 out += info.outJump;
10575 }
10576 }
10577 else if (info.inFormat == RTAUDIO_SINT24) {
10578 Int24 *in = (Int24 *)inBuffer;
10579 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10580 for (j=0; j<info.channels; j++) {
10581 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10582 out[info.outOffset[j]] <<= 8;
10583 }
10584 in += info.inJump;
10585 out += info.outJump;
10586 }
10587 }
10588 else if (info.inFormat == RTAUDIO_SINT32) {
10589 // Channel compensation and/or (de)interleaving only.
10590 Int32 *in = (Int32 *)inBuffer;
10591 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10592 for (j=0; j<info.channels; j++) {
10593 out[info.outOffset[j]] = in[info.inOffset[j]];
10594 }
10595 in += info.inJump;
10596 out += info.outJump;
10597 }
10598 }
10599 else if (info.inFormat == RTAUDIO_FLOAT32) {
10600 Float32 *in = (Float32 *)inBuffer;
10601 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10602 for (j=0; j<info.channels; j++) {
10603 // Use llround() which returns `long long` which is guaranteed to be at least 64 bits.
10604 out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.f), 2147483647LL);
10605 }
10606 in += info.inJump;
10607 out += info.outJump;
10608 }
10609 }
10610 else if (info.inFormat == RTAUDIO_FLOAT64) {
10611 Float64 *in = (Float64 *)inBuffer;
10612 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10613 for (j=0; j<info.channels; j++) {
10614 out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.0), 2147483647LL);
10615 }
10616 in += info.inJump;
10617 out += info.outJump;
10618 }
10619 }
10620 }
10621 else if (info.outFormat == RTAUDIO_SINT24) {
10622 Int24 *out = (Int24 *)outBuffer;
10623 if (info.inFormat == RTAUDIO_SINT8) {
10624 signed char *in = (signed char *)inBuffer;
10625 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10626 for (j=0; j<info.channels; j++) {
10627 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10628 //out[info.outOffset[j]] <<= 16;
10629 }
10630 in += info.inJump;
10631 out += info.outJump;
10632 }
10633 }
10634 else if (info.inFormat == RTAUDIO_SINT16) {
10635 Int16 *in = (Int16 *)inBuffer;
10636 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10637 for (j=0; j<info.channels; j++) {
10638 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10639 //out[info.outOffset[j]] <<= 8;
10640 }
10641 in += info.inJump;
10642 out += info.outJump;
10643 }
10644 }
10645 else if (info.inFormat == RTAUDIO_SINT24) {
10646 // Channel compensation and/or (de)interleaving only.
10647 Int24 *in = (Int24 *)inBuffer;
10648 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10649 for (j=0; j<info.channels; j++) {
10650 out[info.outOffset[j]] = in[info.inOffset[j]];
10651 }
10652 in += info.inJump;
10653 out += info.outJump;
10654 }
10655 }
10656 else if (info.inFormat == RTAUDIO_SINT32) {
10657 Int32 *in = (Int32 *)inBuffer;
10658 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10659 for (j=0; j<info.channels; j++) {
10660 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10661 //out[info.outOffset[j]] >>= 8;
10662 }
10663 in += info.inJump;
10664 out += info.outJump;
10665 }
10666 }
10667 else if (info.inFormat == RTAUDIO_FLOAT32) {
10668 Float32 *in = (Float32 *)inBuffer;
10669 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10670 for (j=0; j<info.channels; j++) {
10671 out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.f), 8388607LL);
10672 }
10673 in += info.inJump;
10674 out += info.outJump;
10675 }
10676 }
10677 else if (info.inFormat == RTAUDIO_FLOAT64) {
10678 Float64 *in = (Float64 *)inBuffer;
10679 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10680 for (j=0; j<info.channels; j++) {
10681 out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.0), 8388607LL);
10682 }
10683 in += info.inJump;
10684 out += info.outJump;
10685 }
10686 }
10687 }
10688 else if (info.outFormat == RTAUDIO_SINT16) {
10689 Int16 *out = (Int16 *)outBuffer;
10690 if (info.inFormat == RTAUDIO_SINT8) {
10691 signed char *in = (signed char *)inBuffer;
10692 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10693 for (j=0; j<info.channels; j++) {
10694 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10695 out[info.outOffset[j]] <<= 8;
10696 }
10697 in += info.inJump;
10698 out += info.outJump;
10699 }
10700 }
10701 else if (info.inFormat == RTAUDIO_SINT16) {
10702 // Channel compensation and/or (de)interleaving only.
10703 Int16 *in = (Int16 *)inBuffer;
10704 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10705 for (j=0; j<info.channels; j++) {
10706 out[info.outOffset[j]] = in[info.inOffset[j]];
10707 }
10708 in += info.inJump;
10709 out += info.outJump;
10710 }
10711 }
10712 else if (info.inFormat == RTAUDIO_SINT24) {
10713 Int24 *in = (Int24 *)inBuffer;
10714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10715 for (j=0; j<info.channels; j++) {
10716 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10717 }
10718 in += info.inJump;
10719 out += info.outJump;
10720 }
10721 }
10722 else if (info.inFormat == RTAUDIO_SINT32) {
10723 Int32 *in = (Int32 *)inBuffer;
10724 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10725 for (j=0; j<info.channels; j++) {
10726 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10727 }
10728 in += info.inJump;
10729 out += info.outJump;
10730 }
10731 }
10732 else if (info.inFormat == RTAUDIO_FLOAT32) {
10733 Float32 *in = (Float32 *)inBuffer;
10734 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10735 for (j=0; j<info.channels; j++) {
10736 out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.f), 32767LL);
10737 }
10738 in += info.inJump;
10739 out += info.outJump;
10740 }
10741 }
10742 else if (info.inFormat == RTAUDIO_FLOAT64) {
10743 Float64 *in = (Float64 *)inBuffer;
10744 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10745 for (j=0; j<info.channels; j++) {
10746 out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.0), 32767LL);
10747 }
10748 in += info.inJump;
10749 out += info.outJump;
10750 }
10751 }
10752 }
10753 else if (info.outFormat == RTAUDIO_SINT8) {
10754 signed char *out = (signed char *)outBuffer;
10755 if (info.inFormat == RTAUDIO_SINT8) {
10756 // Channel compensation and/or (de)interleaving only.
10757 signed char *in = (signed char *)inBuffer;
10758 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10759 for (j=0; j<info.channels; j++) {
10760 out[info.outOffset[j]] = in[info.inOffset[j]];
10761 }
10762 in += info.inJump;
10763 out += info.outJump;
10764 }
10765 }
10766 if (info.inFormat == RTAUDIO_SINT16) {
10767 Int16 *in = (Int16 *)inBuffer;
10768 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10769 for (j=0; j<info.channels; j++) {
10770 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10771 }
10772 in += info.inJump;
10773 out += info.outJump;
10774 }
10775 }
10776 else if (info.inFormat == RTAUDIO_SINT24) {
10777 Int24 *in = (Int24 *)inBuffer;
10778 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10779 for (j=0; j<info.channels; j++) {
10780 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10781 }
10782 in += info.inJump;
10783 out += info.outJump;
10784 }
10785 }
10786 else if (info.inFormat == RTAUDIO_SINT32) {
10787 Int32 *in = (Int32 *)inBuffer;
10788 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10789 for (j=0; j<info.channels; j++) {
10790 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10791 }
10792 in += info.inJump;
10793 out += info.outJump;
10794 }
10795 }
10796 else if (info.inFormat == RTAUDIO_FLOAT32) {
10797 Float32 *in = (Float32 *)inBuffer;
10798 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10799 for (j=0; j<info.channels; j++) {
10800 out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.f), 127LL);
10801 }
10802 in += info.inJump;
10803 out += info.outJump;
10804 }
10805 }
10806 else if (info.inFormat == RTAUDIO_FLOAT64) {
10807 Float64 *in = (Float64 *)inBuffer;
10808 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10809 for (j=0; j<info.channels; j++) {
10810 out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.0), 127LL);
10811 }
10812 in += info.inJump;
10813 out += info.outJump;
10814 }
10815 }
10816 }
10817 }
10818
10819 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10820 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10821 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10822
10823 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10824 {
10825 char val;
10826 char *ptr;
10827
10828 ptr = buffer;
10829 if ( format == RTAUDIO_SINT16 ) {
10830 for ( unsigned int i=0; i<samples; i++ ) {
10831 // Swap 1st and 2nd bytes.
10832 val = *(ptr);
10833 *(ptr) = *(ptr+1);
10834 *(ptr+1) = val;
10835
10836 // Increment 2 bytes.
10837 ptr += 2;
10838 }
10839 }
10840 else if ( format == RTAUDIO_SINT32 ||
10841 format == RTAUDIO_FLOAT32 ) {
10842 for ( unsigned int i=0; i<samples; i++ ) {
10843 // Swap 1st and 4th bytes.
10844 val = *(ptr);
10845 *(ptr) = *(ptr+3);
10846 *(ptr+3) = val;
10847
10848 // Swap 2nd and 3rd bytes.
10849 ptr += 1;
10850 val = *(ptr);
10851 *(ptr) = *(ptr+1);
10852 *(ptr+1) = val;
10853
10854 // Increment 3 more bytes.
10855 ptr += 3;
10856 }
10857 }
10858 else if ( format == RTAUDIO_SINT24 ) {
10859 for ( unsigned int i=0; i<samples; i++ ) {
10860 // Swap 1st and 3rd bytes.
10861 val = *(ptr);
10862 *(ptr) = *(ptr+2);
10863 *(ptr+2) = val;
10864
10865 // Increment 2 more bytes.
10866 ptr += 2;
10867 }
10868 }
10869 else if ( format == RTAUDIO_FLOAT64 ) {
10870 for ( unsigned int i=0; i<samples; i++ ) {
10871 // Swap 1st and 8th bytes
10872 val = *(ptr);
10873 *(ptr) = *(ptr+7);
10874 *(ptr+7) = val;
10875
10876 // Swap 2nd and 7th bytes
10877 ptr += 1;
10878 val = *(ptr);
10879 *(ptr) = *(ptr+5);
10880 *(ptr+5) = val;
10881
10882 // Swap 3rd and 6th bytes
10883 ptr += 1;
10884 val = *(ptr);
10885 *(ptr) = *(ptr+3);
10886 *(ptr+3) = val;
10887
10888 // Swap 4th and 5th bytes
10889 ptr += 1;
10890 val = *(ptr);
10891 *(ptr) = *(ptr+1);
10892 *(ptr+1) = val;
10893
10894 // Increment 5 more bytes.
10895 ptr += 5;
10896 }
10897 }
10898 }
10899
10900 // Indentation settings for Vim and Emacs
10901 //
10902 // Local Variables:
10903 // c-basic-offset: 2
10904 // indent-tabs-mode: nil
10905 // End:
10906 //
10907 // vim: et sts=2 sw=2
10908