1 /**********************************************************************
2 
3   Audacity: A Digital Audio Editor
4 
5   AudioIO.cpp
6 
7   Copyright 2000-2004:
8   Dominic Mazzoni
9   Joshua Haberman
10   Markus Meyer
11   Matt Brubeck
12 
13   This program is free software; you can redistribute it and/or modify it
14   under the terms of the GNU General Public License as published by the Free
15   Software Foundation; either version 2 of the License, or (at your option)
16   any later version.
17 
18 ********************************************************************//**
19 
20 \class AudioIoCallback
21 \brief AudioIoCallback is a class that implements the callback required
22 by PortAudio.  The callback needs to be responsive, has no GUI, and
23 copies data into and out of the sound card buffers.  It also sends data
24 to the meters.
25 
26 
27 *//*****************************************************************//**
28 
29 \class AudioIO
30 \brief AudioIO uses the PortAudio library to play and record sound.
31 
32   Great care and attention to detail are necessary for understanding and
33   modifying this system.  The code in this file is run from three
34   different thread contexts: the UI thread, the disk thread (which
35   this file creates and maintains; in the code, this is called the
36   Audio Thread), and the PortAudio callback thread.
37   To highlight this deliniation, the file is divided into three parts
38   based on what thread context each function is intended to run in.
39 
40   \todo run through all functions called from audio and portaudio threads
41   to verify they are thread-safe. Note that synchronization of the style:
42   "A sets flag to signal B, B clears flag to acknowledge completion"
43   is not thread safe in a general multiple-CPU context. For example,
44   B can write to a buffer and set a completion flag. The flag write can
45   occur before the buffer write due to out-of-order execution. Then A
46   can see the flag and read the buffer before buffer writes complete.
47 
48 *//****************************************************************//**
49 
50 \class AudioThread
51 \brief Defined different on Mac and other platforms (on Mac it does not
52 use wxWidgets wxThread), this class sits in a thread loop reading and
53 writing audio.
54 
55 *//****************************************************************//**
56 
57 \class AudioIOListener
58 \brief Monitors record play start/stop and new sample blocks.  Has
59 callbacks for these events.
60 
61 *//****************************************************************//**
62 
63 \class AudioIOStartStreamOptions
64 \brief struct holding stream options, including a pointer to the
65 time warp info and AudioIOListener and whether the playback is looped.
66 
67 *//*******************************************************************/
68 
69 
70 #include "AudioIO.h"
71 
72 
73 
74 #include "AudioIOExt.h"
75 #include "AudioIOListener.h"
76 
77 #include "float_cast.h"
78 #include "DeviceManager.h"
79 
80 #include <cfloat>
81 #include <math.h>
82 #include <stdlib.h>
83 #include <algorithm>
84 #include <numeric>
85 
86 #ifdef __WXMSW__
87 #include <malloc.h>
88 #endif
89 
90 #ifdef HAVE_ALLOCA_H
91 #include <alloca.h>
92 #endif
93 
94 #include "portaudio.h"
95 
96 #if USE_PORTMIXER
97 #include "portmixer.h"
98 #endif
99 
100 #include <wx/app.h>
101 #include <wx/frame.h>
102 #include <wx/wxcrtvararg.h>
103 #include <wx/log.h>
104 #include <wx/textctrl.h>
105 #include <wx/timer.h>
106 #include <wx/intl.h>
107 #include <wx/debug.h>
108 
109 #if defined(__WXMAC__) || defined(__WXMSW__)
110 #include <wx/power.h>
111 #endif
112 
113 #include "Meter.h"
114 #include "Mix.h"
115 #include "Resample.h"
116 #include "RingBuffer.h"
117 #include "Decibels.h"
118 #include "Prefs.h"
119 #include "Project.h"
120 #include "DBConnection.h"
121 #include "ProjectFileIO.h"
122 #include "ProjectWindows.h"
123 #include "WaveTrack.h"
124 
125 #include "effects/RealtimeEffectManager.h"
126 #include "QualitySettings.h"
127 #include "widgets/AudacityMessageBox.h"
128 #include "BasicUI.h"
129 
130 #include "Gain.h"
131 
132 #ifdef EXPERIMENTAL_AUTOMATED_INPUT_LEVEL_ADJUSTMENT
133    #define LOWER_BOUND 0.0
134    #define UPPER_BOUND 1.0
135 #endif
136 
137 using std::max;
138 using std::min;
139 
Get()140 AudioIO *AudioIO::Get()
141 {
142    return static_cast< AudioIO* >( AudioIOBase::Get() );
143 }
144 
145 wxDEFINE_EVENT(EVT_AUDIOIO_PLAYBACK, wxCommandEvent);
146 wxDEFINE_EVENT(EVT_AUDIOIO_CAPTURE, wxCommandEvent);
147 wxDEFINE_EVENT(EVT_AUDIOIO_MONITOR, wxCommandEvent);
148 
149 // static
150 int AudioIoCallback::mNextStreamToken = 0;
151 double AudioIoCallback::mCachedBestRateOut;
152 bool AudioIoCallback::mCachedBestRatePlaying;
153 bool AudioIoCallback::mCachedBestRateCapturing;
154 
155 #ifdef __WXGTK__
156    // Might #define this for a useful thing on Linux
157    #undef REALTIME_ALSA_THREAD
158 #else
159    // never on the other operating systems
160    #undef REALTIME_ALSA_THREAD
161 #endif
162 
163 #ifdef REALTIME_ALSA_THREAD
164 #include "pa_linux_alsa.h"
165 #endif
166 
167 int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
168                           unsigned long framesPerBuffer,
169                           const PaStreamCallbackTimeInfo *timeInfo,
170                           PaStreamCallbackFlags statusFlags, void *userData );
171 
172 
173 //////////////////////////////////////////////////////////////////////
174 //
175 //     class AudioThread - declaration and glue code
176 //
177 //////////////////////////////////////////////////////////////////////
178 
179 #include <thread>
180 
181 #ifdef __WXMAC__
182 
183 // On Mac OS X, it's better not to use the wxThread class.
184 // We use our own implementation based on pthreads instead.
185 
186 #include <pthread.h>
187 #include <time.h>
188 
189 class AudioThread {
190  public:
191    typedef int ExitCode;
AudioThread()192    AudioThread() { mDestroy = false; mThread = NULL; }
193    virtual ExitCode Entry();
Create()194    void Create() {}
Delete()195    void Delete() {
196       mDestroy = true;
197       pthread_join(mThread, NULL);
198    }
TestDestroy()199    bool TestDestroy() { return mDestroy; }
Sleep(int ms)200    void Sleep(int ms) {
201       struct timespec spec;
202       spec.tv_sec = 0;
203       spec.tv_nsec = ms * 1000 * 1000;
204       nanosleep(&spec, NULL);
205    }
callback(void * p)206    static void *callback(void *p) {
207       AudioThread *th = (AudioThread *)p;
208       return reinterpret_cast<void *>( th->Entry() );
209    }
Run()210    void Run() {
211       pthread_create(&mThread, NULL, callback, this);
212    }
213  private:
214    bool mDestroy;
215    pthread_t mThread;
216 };
217 
218 #else
219 
220 // The normal wxThread-derived AudioThread class for all other
221 // platforms:
222 class AudioThread /* not final */ : public wxThread {
223  public:
AudioThread()224    AudioThread():wxThread(wxTHREAD_JOINABLE) {}
225    ExitCode Entry() override;
226 };
227 
228 #endif
229 
230 //////////////////////////////////////////////////////////////////////
231 //
232 //     UI Thread Context
233 //
234 //////////////////////////////////////////////////////////////////////
235 
Init()236 void AudioIO::Init()
237 {
238    ugAudioIO.reset(safenew AudioIO());
239    Get()->mThread->Run();
240 
241    // Make sure device prefs are initialized
242    if (gPrefs->Read(wxT("AudioIO/RecordingDevice"), wxT("")).empty()) {
243       int i = getRecordDevIndex();
244       const PaDeviceInfo *info = Pa_GetDeviceInfo(i);
245       if (info) {
246          AudioIORecordingDevice.Write(DeviceName(info));
247          AudioIOHost.Write(HostName(info));
248       }
249    }
250 
251    if (gPrefs->Read(wxT("AudioIO/PlaybackDevice"), wxT("")).empty()) {
252       int i = getPlayDevIndex();
253       const PaDeviceInfo *info = Pa_GetDeviceInfo(i);
254       if (info) {
255          AudioIOPlaybackDevice.Write(DeviceName(info));
256          AudioIOHost.Write(HostName(info));
257       }
258    }
259 
260    gPrefs->Flush();
261 }
262 
Deinit()263 void AudioIO::Deinit()
264 {
265    ugAudioIO.reset();
266 }
267 
ValidateDeviceNames(const wxString & play,const wxString & rec)268 bool AudioIO::ValidateDeviceNames(const wxString &play, const wxString &rec)
269 {
270    const PaDeviceInfo *pInfo = Pa_GetDeviceInfo(getPlayDevIndex(play));
271    const PaDeviceInfo *rInfo = Pa_GetDeviceInfo(getRecordDevIndex(rec));
272 
273    // Valid iff both defined and the same api.
274    return pInfo != nullptr && rInfo != nullptr && pInfo->hostApi == rInfo->hostApi;
275 }
276 
AudioIO()277 AudioIO::AudioIO()
278 {
279    if (!std::atomic<double>{}.is_lock_free()) {
280       // If this check fails, then the atomic<double> members in AudioIO.h
281       // might be changed to atomic<float> to be more efficient with some
282       // loss of precision.  That could be conditionally compiled depending
283       // on the platform.
284       wxASSERT(false);
285    }
286 
287    // This ASSERT because of casting in the callback
288    // functions where we cast a tempFloats buffer to a (short*) buffer.
289    // We have to ASSERT in the GUI thread, if we are to see it properly.
290    wxASSERT( sizeof( short ) <= sizeof( float ));
291 
292    mAudioThreadShouldCallTrackBufferExchangeOnce = false;
293    mAudioThreadTrackBufferExchangeLoopRunning = false;
294    mAudioThreadTrackBufferExchangeLoopActive = false;
295    mPortStreamV19 = NULL;
296 
297    mNumPauseFrames = 0;
298 
299 #ifdef EXPERIMENTAL_AUTOMATED_INPUT_LEVEL_ADJUSTMENT
300    mAILAActive = false;
301 #endif
302    mStreamToken = 0;
303 
304    mLastPaError = paNoError;
305 
306    mLastRecordingOffset = 0.0;
307    mNumCaptureChannels = 0;
308    mPaused = false;
309    mSilenceLevel = 0.0;
310 
311    mUpdateMeters = false;
312    mUpdatingMeters = false;
313 
314    mOutputMeter.reset();
315 
316    PaError err = Pa_Initialize();
317 
318    if (err != paNoError) {
319       auto errStr = XO("Could not find any audio devices.\n");
320       errStr += XO("You will not be able to play or record audio.\n\n");
321       wxString paErrStr = LAT1CTOWX(Pa_GetErrorText(err));
322       if (!paErrStr.empty())
323          errStr += XO("Error: %s").Format( paErrStr );
324       // XXX: we are in libaudacity, popping up dialogs not allowed!  A
325       // long-term solution will probably involve exceptions
326       AudacityMessageBox(
327          errStr,
328          XO("Error Initializing Audio"),
329          wxICON_ERROR|wxOK);
330 
331       // Since PortAudio is not initialized, all calls to PortAudio
332       // functions will fail.  This will give reasonable behavior, since
333       // the user will be able to do things not relating to audio i/o,
334       // but any attempt to play or record will simply fail.
335    }
336 
337    // Start thread
338    mThread = std::make_unique<AudioThread>();
339    mThread->Create();
340 
341 #if defined(USE_PORTMIXER)
342    mPortMixer = NULL;
343    mPreviousHWPlaythrough = -1.0;
344    HandleDeviceChange();
345 #else
346    mEmulateMixerOutputVol = true;
347    mInputMixerWorks = false;
348 #endif
349 
350    mMixerOutputVol = AudioIOPlaybackVolume.Read();
351 
352    mLastPlaybackTimeMillis = 0;
353 }
354 
~AudioIO()355 AudioIO::~AudioIO()
356 {
357    if ( !mOwningProject.expired() )
358       // Unlikely that this will be destroyed earlier than any projects, but
359       // be prepared anyway
360       ResetOwningProject();
361 
362 #if defined(USE_PORTMIXER)
363    if (mPortMixer) {
364       #if __WXMAC__
365       if (Px_SupportsPlaythrough(mPortMixer) && mPreviousHWPlaythrough >= 0.0)
366          Px_SetPlaythrough(mPortMixer, mPreviousHWPlaythrough);
367          mPreviousHWPlaythrough = -1.0;
368       #endif
369       Px_CloseMixer(mPortMixer);
370       mPortMixer = NULL;
371    }
372 #endif
373 
374    // FIXME: ? TRAP_ERR.  Pa_Terminate probably OK if err without reporting.
375    Pa_Terminate();
376 
377    /* Delete is a "graceful" way to stop the thread.
378       (Kill is the not-graceful way.) */
379 
380    // This causes reentrancy issues during application shutdown
381    // wxTheApp->Yield();
382 
383    mThread->Delete();
384    mThread.reset();
385 }
386 
SetMixer(int inputSource,float recordVolume,float playbackVolume)387 void AudioIO::SetMixer(int inputSource, float recordVolume,
388                        float playbackVolume)
389 {
390    mMixerOutputVol = playbackVolume;
391    AudioIOPlaybackVolume.Write(mMixerOutputVol);
392 
393 #if defined(USE_PORTMIXER)
394    PxMixer *mixer = mPortMixer;
395    if( !mixer )
396       return;
397 
398    float oldRecordVolume = Px_GetInputVolume(mixer);
399 
400    AudioIoCallback::SetMixer(inputSource);
401    if( oldRecordVolume != recordVolume )
402       Px_SetInputVolume(mixer, recordVolume);
403 
404 #endif
405 }
406 
GetMixer(int * recordDevice,float * recordVolume,float * playbackVolume)407 void AudioIO::GetMixer(int *recordDevice, float *recordVolume,
408                        float *playbackVolume)
409 {
410    *playbackVolume = mMixerOutputVol;
411 
412 #if defined(USE_PORTMIXER)
413 
414    PxMixer *mixer = mPortMixer;
415 
416    if( mixer )
417    {
418       *recordDevice = Px_GetCurrentInputSource(mixer);
419 
420       if (mInputMixerWorks)
421          *recordVolume = Px_GetInputVolume(mixer);
422       else
423          *recordVolume = 1.0f;
424 
425       return;
426    }
427 
428 #endif
429 
430    *recordDevice = 0;
431    *recordVolume = 1.0f;
432 }
433 
InputMixerWorks()434 bool AudioIO::InputMixerWorks()
435 {
436    return mInputMixerWorks;
437 }
438 
GetInputSourceNames()439 wxArrayString AudioIO::GetInputSourceNames()
440 {
441 #if defined(USE_PORTMIXER)
442 
443    wxArrayString deviceNames;
444 
445    if( mPortMixer )
446    {
447       int numSources = Px_GetNumInputSources(mPortMixer);
448       for( int source = 0; source < numSources; source++ )
449          deviceNames.push_back(wxString(wxSafeConvertMB2WX(Px_GetInputSourceName(mPortMixer, source))));
450    }
451    else
452    {
453       wxLogDebug(wxT("AudioIO::GetInputSourceNames(): PortMixer not initialised!"));
454    }
455 
456    return deviceNames;
457 
458 #else
459 
460    wxArrayString blank;
461 
462    return blank;
463 
464 #endif
465 }
466 
AudacityToPortAudioSampleFormat(sampleFormat format)467 static PaSampleFormat AudacityToPortAudioSampleFormat(sampleFormat format)
468 {
469    switch(format) {
470    case int16Sample:
471       return paInt16;
472    case int24Sample:
473       return paInt24;
474    case floatSample:
475    default:
476       return paFloat32;
477    }
478 }
479 
StartPortAudioStream(const AudioIOStartStreamOptions & options,unsigned int numPlaybackChannels,unsigned int numCaptureChannels,sampleFormat captureFormat)480 bool AudioIO::StartPortAudioStream(const AudioIOStartStreamOptions &options,
481                                    unsigned int numPlaybackChannels,
482                                    unsigned int numCaptureChannels,
483                                    sampleFormat captureFormat)
484 {
485    auto sampleRate = options.rate;
486    mNumPauseFrames = 0;
487    SetOwningProject( options.pProject );
488    bool success = false;
489    auto cleanup = finally([&]{
490       if (!success)
491          ResetOwningProject();
492    });
493 
494    // PRL:  Protection from crash reported by David Bailes, involving starting
495    // and stopping with frequent changes of active window, hard to reproduce
496    if (mOwningProject.expired())
497       return false;
498 
499    mInputMeter.reset();
500    mOutputMeter.reset();
501 
502    mLastPaError = paNoError;
503    // pick a rate to do the audio I/O at, from those available. The project
504    // rate is suggested, but we may get something else if it isn't supported
505    mRate = GetBestRate(numCaptureChannels > 0, numPlaybackChannels > 0, sampleRate);
506 
507    // July 2016 (Carsten and Uwe)
508    // BUG 193: Tell PortAudio sound card will handle 24 bit (under DirectSound) using
509    // userData.
510    int captureFormat_saved = captureFormat;
511    // Special case: Our 24-bit sample format is different from PortAudio's
512    // 3-byte packed format. So just make PortAudio return float samples,
513    // since we need float values anyway to apply the gain.
514    // ANSWER-ME: So we *never* actually handle 24-bit?! This causes mCapture to
515    // be set to floatSample below.
516    // JKC: YES that's right.  Internally Audacity uses float, and float has space for
517    // 24 bits as well as exponent.  Actual 24 bit would require packing and
518    // unpacking unaligned bytes and would be inefficient.
519    // ANSWER ME: is floatSample 64 bit on 64 bit machines?
520    if (captureFormat == int24Sample)
521       captureFormat = floatSample;
522 
523    mNumPlaybackChannels = numPlaybackChannels;
524    mNumCaptureChannels = numCaptureChannels;
525 
526    bool usePlayback = false, useCapture = false;
527    PaStreamParameters playbackParameters{};
528    PaStreamParameters captureParameters{};
529 
530    auto latencyDuration = AudioIOLatencyDuration.Read();
531 
532    if( numPlaybackChannels > 0)
533    {
534       usePlayback = true;
535 
536       // this sets the device index to whatever is "right" based on preferences,
537       // then defaults
538       playbackParameters.device = getPlayDevIndex();
539 
540       const PaDeviceInfo *playbackDeviceInfo;
541       playbackDeviceInfo = Pa_GetDeviceInfo( playbackParameters.device );
542 
543       if( playbackDeviceInfo == NULL )
544          return false;
545 
546       // regardless of source formats, we always mix to float
547       playbackParameters.sampleFormat = paFloat32;
548       playbackParameters.hostApiSpecificStreamInfo = NULL;
549       playbackParameters.channelCount = mNumPlaybackChannels;
550 
551       if (mSoftwarePlaythrough)
552          playbackParameters.suggestedLatency =
553             playbackDeviceInfo->defaultLowOutputLatency;
554       else {
555          // When using WASAPI, the suggested latency does not affect
556          // the latency of the playback, but the position of playback is given as if
557          // there was the suggested latency. This results in the last "suggested latency"
558          // of a selection not being played. So for WASAPI use 0.0 for the suggested
559          // latency regardless of user setting. See bug 1949.
560          const PaHostApiInfo* hostInfo = Pa_GetHostApiInfo(playbackDeviceInfo->hostApi);
561          bool isWASAPI = (hostInfo && hostInfo->type == paWASAPI);
562          playbackParameters.suggestedLatency = isWASAPI ? 0.0 : latencyDuration/1000.0;
563       }
564 
565       mOutputMeter = options.playbackMeter;
566    }
567 
568    if( numCaptureChannels > 0)
569    {
570       useCapture = true;
571       mCaptureFormat = captureFormat;
572 
573       const PaDeviceInfo *captureDeviceInfo;
574       // retrieve the index of the device set in the prefs, or a sensible
575       // default if it isn't set/valid
576       captureParameters.device = getRecordDevIndex();
577 
578       captureDeviceInfo = Pa_GetDeviceInfo( captureParameters.device );
579 
580       if( captureDeviceInfo == NULL )
581          return false;
582 
583       captureParameters.sampleFormat =
584          AudacityToPortAudioSampleFormat(mCaptureFormat);
585 
586       captureParameters.hostApiSpecificStreamInfo = NULL;
587       captureParameters.channelCount = mNumCaptureChannels;
588 
589       if (mSoftwarePlaythrough)
590          captureParameters.suggestedLatency =
591             captureDeviceInfo->defaultHighInputLatency;
592       else
593          captureParameters.suggestedLatency = latencyDuration/1000.0;
594 
595       SetCaptureMeter( mOwningProject.lock(), options.captureMeter );
596    }
597 
598    SetMeters();
599 
600 #ifdef USE_PORTMIXER
601 #ifdef __WXMSW__
602    //mchinen nov 30 2010.  For some reason Pa_OpenStream resets the input volume on windows.
603    //so cache and restore after it.
604    //The actual problem is likely in portaudio's pa_win_wmme.c OpenStream().
605    float oldRecordVolume = Px_GetInputVolume(mPortMixer);
606 #endif
607 #endif
608 
609    // July 2016 (Carsten and Uwe)
610    // BUG 193: Possibly tell portAudio to use 24 bit with DirectSound.
611    int  userData = 24;
612    int* lpUserData = (captureFormat_saved == int24Sample) ? &userData : NULL;
613 
614    // (Linux, bug 1885) After scanning devices it takes a little time for the
615    // ALSA device to be available, so allow retries.
616    // On my test machine, no more than 3 attempts are required.
617    unsigned int maxTries = 1;
618 #ifdef __WXGTK__
619    if (DeviceManager::Instance()->GetTimeSinceRescan() < 10)
620       maxTries = 5;
621 #endif
622 
623    for (unsigned int tries = 0; tries < maxTries; tries++) {
624       mLastPaError = Pa_OpenStream( &mPortStreamV19,
625                                     useCapture ? &captureParameters : NULL,
626                                     usePlayback ? &playbackParameters : NULL,
627                                     mRate, paFramesPerBufferUnspecified,
628                                     paNoFlag,
629                                     audacityAudioCallback, lpUserData );
630       if (mLastPaError == paNoError) {
631          break;
632       }
633       wxLogDebug("Attempt %u to open capture stream failed with: %d", 1 + tries, mLastPaError);
634       wxMilliSleep(1000);
635    }
636 
637 
638 #if USE_PORTMIXER
639 #ifdef __WXMSW__
640    Px_SetInputVolume(mPortMixer, oldRecordVolume);
641 #endif
642    if (mPortStreamV19 != NULL && mLastPaError == paNoError) {
643 
644       #ifdef __WXMAC__
645       if (mPortMixer) {
646          if (Px_SupportsPlaythrough(mPortMixer)) {
647             bool playthrough = false;
648 
649             mPreviousHWPlaythrough = Px_GetPlaythrough(mPortMixer);
650 
651             // Bug 388.  Feature not supported.
652             //gPrefs->Read(wxT("/AudioIO/Playthrough"), &playthrough, false);
653             if (playthrough)
654                Px_SetPlaythrough(mPortMixer, 1.0);
655             else
656                Px_SetPlaythrough(mPortMixer, 0.0);
657          }
658       }
659       #endif
660    }
661 #endif
662 
663 #if (defined(__WXMAC__) || defined(__WXMSW__)) && wxCHECK_VERSION(3,1,0)
664    // Don't want the system to sleep while audio I/O is active
665    if (mPortStreamV19 != NULL && mLastPaError == paNoError) {
666       wxPowerResource::Acquire(wxPOWER_RESOURCE_SCREEN, _("Audacity Audio"));
667    }
668 #endif
669 
670    return (success = (mLastPaError == paNoError));
671 }
672 
LastPaErrorString()673 wxString AudioIO::LastPaErrorString()
674 {
675    return wxString::Format(wxT("%d %s."), (int) mLastPaError, Pa_GetErrorText(mLastPaError));
676 }
677 
SetOwningProject(const std::shared_ptr<AudacityProject> & pProject)678 void AudioIO::SetOwningProject(
679    const std::shared_ptr<AudacityProject> &pProject )
680 {
681    if ( !mOwningProject.expired() ) {
682       wxASSERT(false);
683       ResetOwningProject();
684    }
685 
686    mOwningProject = pProject;
687 }
688 
ResetOwningProject()689 void AudioIO::ResetOwningProject()
690 {
691    mOwningProject.reset();
692 }
693 
StartMonitoring(const AudioIOStartStreamOptions & options)694 void AudioIO::StartMonitoring( const AudioIOStartStreamOptions &options )
695 {
696    if ( mPortStreamV19 || mStreamToken )
697       return;
698 
699    bool success;
700    auto captureFormat = QualitySettings::SampleFormatChoice();
701    auto captureChannels = AudioIORecordChannels.Read();
702    gPrefs->Read(wxT("/AudioIO/SWPlaythrough"), &mSoftwarePlaythrough, false);
703    int playbackChannels = 0;
704 
705    if (mSoftwarePlaythrough)
706       playbackChannels = 2;
707 
708    // FIXME: TRAP_ERR StartPortAudioStream (a PaError may be present)
709    // but StartPortAudioStream function only returns true or false.
710    mUsingAlsa = false;
711    success = StartPortAudioStream(options, (unsigned int)playbackChannels,
712                                   (unsigned int)captureChannels,
713                                   captureFormat);
714 
715    auto pOwningProject = mOwningProject.lock();
716    if (!success) {
717       using namespace BasicUI;
718       auto msg = XO("Error opening recording device.\nError code: %s")
719          .Format( Get()->LastPaErrorString() );
720       ShowErrorDialog( *ProjectFramePlacement( pOwningProject.get() ),
721          XO("Error"), msg, wxT("Error_opening_sound_device"),
722          ErrorDialogOptions{ ErrorDialogType::ModalErrorReport } );
723       return;
724    }
725 
726    wxCommandEvent e(EVT_AUDIOIO_MONITOR);
727    e.SetEventObject( pOwningProject.get() );
728    e.SetInt(true);
729    wxTheApp->ProcessEvent(e);
730 
731    // FIXME: TRAP_ERR PaErrorCode 'noted' but not reported in StartMonitoring.
732    // Now start the PortAudio stream!
733    // TODO: ? Factor out and reuse error reporting code from end of
734    // AudioIO::StartStream?
735    mLastPaError = Pa_StartStream( mPortStreamV19 );
736 
737    // Update UI display only now, after all possibilities for error are past.
738    auto pListener = GetListener();
739    if ((mLastPaError == paNoError) && pListener) {
740       // advertise the chosen I/O sample rate to the UI
741       pListener->OnAudioIORate((int)mRate);
742    }
743 }
744 
StartStream(const TransportTracks & tracks,double t0,double t1,double mixerLimit,const AudioIOStartStreamOptions & options)745 int AudioIO::StartStream(const TransportTracks &tracks,
746    double t0, double t1, double mixerLimit,
747    const AudioIOStartStreamOptions &options)
748 {
749    const auto &pStartTime = options.pStartTime;
750    t1 = std::min(t1, mixerLimit);
751 
752    mLostSamples = 0;
753    mLostCaptureIntervals.clear();
754    mDetectDropouts =
755       gPrefs->Read( WarningDialogKey(wxT("DropoutDetected")), true ) != 0;
756    auto cleanup = finally ( [this] { ClearRecordingException(); } );
757 
758    if( IsBusy() )
759       return 0;
760 
761    // We just want to set mStreamToken to -1 - this way avoids
762    // an extremely rare but possible race condition, if two functions
763    // somehow called StartStream at the same time...
764    mStreamToken--;
765    if (mStreamToken != -1)
766       return 0;
767 
768    // TODO: we don't really need to close and reopen stream if the
769    // format matches; however it's kind of tricky to keep it open...
770    //
771    //   if (sampleRate == mRate &&
772    //       playbackChannels == mNumPlaybackChannels &&
773    //       captureChannels == mNumCaptureChannels &&
774    //       captureFormat == mCaptureFormat) {
775 
776    if (mPortStreamV19) {
777       StopStream();
778       while(mPortStreamV19)
779          wxMilliSleep( 50 );
780    }
781 
782 #ifdef __WXGTK__
783    // Detect whether ALSA is the chosen host, and do the various involved MIDI
784    // timing compensations only then.
785    mUsingAlsa = (AudioIOHost.Read() == L"ALSA");
786 #endif
787 
788    gPrefs->Read(wxT("/AudioIO/SWPlaythrough"), &mSoftwarePlaythrough, false);
789    gPrefs->Read(wxT("/AudioIO/SoundActivatedRecord"), &mPauseRec, false);
790    gPrefs->Read(wxT("/AudioIO/Microfades"), &mbMicroFades, false);
791    int silenceLevelDB;
792    gPrefs->Read(wxT("/AudioIO/SilenceLevel"), &silenceLevelDB, -50);
793    int dBRange = DecibelScaleCutoff.Read();
794    if(silenceLevelDB < -dBRange)
795    {
796       silenceLevelDB = -dBRange + 3;
797       // meter range was made smaller than SilenceLevel
798       // so set SilenceLevel reasonable
799 
800       // PRL:  update prefs, or correct it only in-session?
801       // The behavior (as of 2.3.1) was the latter, the code suggested that
802       // the intent was the former;  I preserve the behavior, but uncomment
803       // this if you disagree.
804       // gPrefs->Write(wxT("/AudioIO/SilenceLevel"), silenceLevelDB);
805       // gPrefs->Flush();
806    }
807    mSilenceLevel = DB_TO_LINEAR(silenceLevelDB);  // meter goes -dBRange dB -> 0dB
808 
809    // Clamp pre-roll so we don't play before time 0
810    const auto preRoll = std::max(0.0, std::min(t0, options.preRoll));
811    mRecordingSchedule = {};
812    mRecordingSchedule.mPreRoll = preRoll;
813    mRecordingSchedule.mLatencyCorrection =
814       AudioIOLatencyCorrection.Read() / 1000.0;
815    mRecordingSchedule.mDuration = t1 - t0;
816    if (options.pCrossfadeData)
817       mRecordingSchedule.mCrossfadeData.swap( *options.pCrossfadeData );
818 
819    mListener = options.listener;
820    mRate    = options.rate;
821 
822    mSeek    = 0;
823    mLastRecordingOffset = 0;
824    mCaptureTracks = tracks.captureTracks;
825    mPlaybackTracks = tracks.playbackTracks;
826 
827    bool commit = false;
828    auto cleanupTracks = finally([&]{
829       if (!commit) {
830          // Don't keep unnecessary shared pointers to tracks
831          mPlaybackTracks.clear();
832          mCaptureTracks.clear();
833          for(auto &ext : Extensions())
834             ext.AbortOtherStream();
835 
836          // Don't cause a busy wait in the audio thread after stopping scrubbing
837          mPlaybackSchedule.ResetMode();
838       }
839    });
840 
841    mPlaybackBuffers.reset();
842    mPlaybackMixers.clear();
843    mCaptureBuffers.reset();
844    mResample.reset();
845    mPlaybackSchedule.mTimeQueue.Clear();
846 
847    mPlaybackSchedule.Init(
848       t0, t1, options, mCaptureTracks.empty() ? nullptr : &mRecordingSchedule );
849 
850    unsigned int playbackChannels = 0;
851    unsigned int captureChannels = 0;
852    sampleFormat captureFormat = floatSample;
853 
854    auto pListener = GetListener();
855 
856    if (tracks.playbackTracks.size() > 0
857       || tracks.otherPlayableTracks.size() > 0)
858       playbackChannels = 2;
859 
860    if (mSoftwarePlaythrough)
861       playbackChannels = 2;
862 
863    if (tracks.captureTracks.size() > 0)
864    {
865       // For capture, every input channel gets its own track
866       captureChannels = mCaptureTracks.size();
867       // I don't deal with the possibility of the capture tracks
868       // having different sample formats, since it will never happen
869       // with the current code.  This code wouldn't *break* if this
870       // assumption was false, but it would be sub-optimal.  For example,
871       // if the first track was 16-bit and the second track was 24-bit,
872       // we would set the sound card to capture in 16 bits and the second
873       // track wouldn't get the benefit of all 24 bits the card is capable
874       // of.
875       captureFormat = mCaptureTracks[0]->GetSampleFormat();
876 
877       // Tell project that we are about to start recording
878       if (pListener)
879          pListener->OnAudioIOStartRecording();
880    }
881 
882    bool successAudio;
883 
884    successAudio = StartPortAudioStream(options, playbackChannels,
885                                        captureChannels, captureFormat);
886 
887    // Call this only after reassignment of mRate that might happen in the
888    // previous call.
889    mPlaybackSchedule.GetPolicy().Initialize( mPlaybackSchedule, mRate );
890 
891 #ifdef EXPERIMENTAL_MIDI_OUT
892    auto range = Extensions();
893    successAudio = successAudio &&
894       std::all_of(range.begin(), range.end(),
895          [this, &tracks, t0](auto &ext){
896             return ext.StartOtherStream( tracks,
897               (mPortStreamV19 != NULL && mLastPaError == paNoError)
898                  ? Pa_GetStreamInfo(mPortStreamV19) : nullptr,
899               t0, mRate ); });
900 #endif
901 
902    if (!successAudio) {
903       if (pListener && captureChannels > 0)
904          pListener->OnAudioIOStopRecording();
905       mStreamToken = 0;
906 
907       return 0;
908    }
909 
910    {
911       double mixerStart = t0;
912       if (pStartTime)
913          mixerStart = std::min( mixerStart, *pStartTime );
914       if ( ! AllocateBuffers( options, tracks,
915          mixerStart, mixerLimit, options.rate ) )
916          return 0;
917    }
918 
919    if (mNumPlaybackChannels > 0)
920    {
921       auto & em = RealtimeEffectManager::Get();
922       // Setup for realtime playback at the rate of the realtime
923       // stream, not the rate of the track.
924       em.RealtimeInitialize(mRate);
925 
926       // The following adds a NEW effect processor for each logical track and the
927       // group determination should mimic what is done in audacityAudioCallback()
928       // when calling RealtimeProcess().
929       int group = 0;
930       for (size_t i = 0, cnt = mPlaybackTracks.size(); i < cnt;)
931       {
932          const WaveTrack *vt = mPlaybackTracks[i].get();
933 
934          // TODO: more-than-two-channels
935          unsigned chanCnt = TrackList::Channels(vt).size();
936          i += chanCnt;
937 
938          // Setup for realtime playback at the rate of the realtime
939          // stream, not the rate of the track.
940          em.RealtimeAddProcessor(group++, std::min(2u, chanCnt), mRate);
941       }
942    }
943 
944 #ifdef EXPERIMENTAL_AUTOMATED_INPUT_LEVEL_ADJUSTMENT
945    AILASetStartTime();
946 #endif
947 
948    if (pStartTime)
949    {
950       // Calculate the NEW time position
951       const auto time = *pStartTime;
952 
953       // Main thread's initialization of mTime
954       mPlaybackSchedule.SetTrackTime( time );
955       mPlaybackSchedule.GetPolicy().OffsetTrackTime( mPlaybackSchedule, 0 );
956 
957       // Reset mixer positions for all playback tracks
958       unsigned numMixers = mPlaybackTracks.size();
959       for (unsigned ii = 0; ii < numMixers; ++ii)
960          mPlaybackMixers[ii]->Reposition( time );
961    }
962 
963    // Now that we are done with AllocateBuffers() and SetTrackTime():
964    mPlaybackSchedule.mTimeQueue.Prime(mPlaybackSchedule.GetTrackTime());
965    // else recording only without overdub
966 
967    // We signal the audio thread to call TrackBufferExchange, to prime the RingBuffers
968    // so that they will have data in them when the stream starts.  Having the
969    // audio thread call TrackBufferExchange here makes the code more predictable, since
970    // TrackBufferExchange will ALWAYS get called from the Audio thread.
971    mAudioThreadShouldCallTrackBufferExchangeOnce = true;
972 
973    while( mAudioThreadShouldCallTrackBufferExchangeOnce ) {
974       auto interval = 50ull;
975       if (options.playbackStreamPrimer) {
976          interval = options.playbackStreamPrimer();
977       }
978       wxMilliSleep( interval );
979    }
980 
981    if(mNumPlaybackChannels > 0 || mNumCaptureChannels > 0) {
982 
983 #ifdef REALTIME_ALSA_THREAD
984       // PRL: Do this in hope of less thread scheduling jitter in calls to
985       // audacityAudioCallback.
986       // Not needed to make audio playback work smoothly.
987       // But needed in case we also play MIDI, so that the variable "offset"
988       // in AudioIO::MidiTime() is a better approximation of the duration
989       // between the call of audacityAudioCallback and the actual output of
990       // the first audio sample.
991       // (Which we should be able to determine from fields of
992       // PaStreamCallbackTimeInfo, but that seems not to work as documented with
993       // ALSA.)
994       if (mUsingAlsa)
995          // Perhaps we should do this only if also playing MIDI ?
996          PaAlsa_EnableRealtimeScheduling( mPortStreamV19, 1 );
997 #endif
998 
999       //
1000       // Generate a unique value each time, to be returned to
1001       // clients accessing the AudioIO API, so they can query if they
1002       // are the ones who have reserved AudioIO or not.
1003       //
1004       // It is important to set this before setting the portaudio stream in
1005       // motion -- otherwise it may play an unspecified number of leading
1006       // zeroes.
1007       mStreamToken = (++mNextStreamToken);
1008 
1009       // This affects the AudioThread (not the portaudio callback).
1010       // Probably not needed so urgently before portaudio thread start for usual
1011       // playback, since our ring buffers have been primed already with 4 sec
1012       // of audio, but then we might be scrubbing, so do it.
1013       mAudioThreadTrackBufferExchangeLoopRunning = true;
1014       mForceFadeOut.store(false, std::memory_order_relaxed);
1015 
1016       // Now start the PortAudio stream!
1017       PaError err;
1018       err = Pa_StartStream( mPortStreamV19 );
1019 
1020       if( err != paNoError )
1021       {
1022          mStreamToken = 0;
1023          mAudioThreadTrackBufferExchangeLoopRunning = false;
1024          if (pListener && mNumCaptureChannels > 0)
1025             pListener->OnAudioIOStopRecording();
1026          StartStreamCleanup();
1027          // PRL: PortAudio error messages are sadly not internationalized
1028          AudacityMessageBox(
1029             Verbatim( LAT1CTOWX(Pa_GetErrorText(err)) ) );
1030          return 0;
1031       }
1032    }
1033 
1034    // Update UI display only now, after all possibilities for error are past.
1035    if (pListener) {
1036       // advertise the chosen I/O sample rate to the UI
1037       pListener->OnAudioIORate((int)mRate);
1038    }
1039 
1040    auto pOwningProject = mOwningProject.lock();
1041    if (mNumPlaybackChannels > 0)
1042    {
1043       wxCommandEvent e(EVT_AUDIOIO_PLAYBACK);
1044       e.SetEventObject( pOwningProject.get() );
1045       e.SetInt(true);
1046       wxTheApp->ProcessEvent(e);
1047    }
1048 
1049    if (mNumCaptureChannels > 0)
1050    {
1051       wxCommandEvent e(EVT_AUDIOIO_CAPTURE);
1052       e.SetEventObject( pOwningProject.get() );
1053       e.SetInt(true);
1054       wxTheApp->ProcessEvent(e);
1055    }
1056 
1057    commit = true;
1058    return mStreamToken;
1059 }
1060 
DelayActions(bool recording)1061 void AudioIO::DelayActions(bool recording)
1062 {
1063    mDelayingActions = recording;
1064 }
1065 
DelayingActions() const1066 bool AudioIO::DelayingActions() const
1067 {
1068    return mDelayingActions || (mPortStreamV19 && mNumCaptureChannels > 0);
1069 }
1070 
CallAfterRecording(PostRecordingAction action)1071 void AudioIO::CallAfterRecording(PostRecordingAction action)
1072 {
1073    if (!action)
1074       return;
1075 
1076    {
1077       std::lock_guard<std::mutex> guard{ mPostRecordingActionMutex };
1078       if (mPostRecordingAction) {
1079          // Enqueue it, even if perhaps not still recording,
1080          // but it wasn't cleared yet
1081          mPostRecordingAction = [
1082             prevAction = std::move(mPostRecordingAction),
1083             nextAction = std::move(action)
1084          ]{ prevAction(); nextAction(); };
1085          return;
1086       }
1087       else if (DelayingActions()) {
1088          mPostRecordingAction = std::move(action);
1089          return;
1090       }
1091    }
1092 
1093    // Don't delay it except until idle time.
1094    // (Recording might start between now and then, but won't go far before
1095    // the action is done.  So the system isn't bulletproof yet.)
1096    wxTheApp->CallAfter(std::move(action));
1097 }
1098 
AllocateBuffers(const AudioIOStartStreamOptions & options,const TransportTracks & tracks,double t0,double t1,double sampleRate)1099 bool AudioIO::AllocateBuffers(
1100    const AudioIOStartStreamOptions &options,
1101    const TransportTracks &tracks, double t0, double t1, double sampleRate )
1102 {
1103    bool success = false;
1104    auto cleanup = finally([&]{
1105       if (!success) StartStreamCleanup( false );
1106    });
1107 
1108    auto &policy = mPlaybackSchedule.GetPolicy();
1109    auto times = policy.SuggestedBufferTimes(mPlaybackSchedule);
1110 
1111    //
1112    // The (audio) stream has been opened successfully (assuming we tried
1113    // to open it). We now proceed to
1114    // allocate the memory structures the stream will need.
1115    //
1116 
1117    //
1118    // The RingBuffer sizes, and the max amount of the buffer to
1119    // fill at a time, both grow linearly with the number of
1120    // tracks.  This allows us to scale up to many tracks without
1121    // killing performance.
1122    //
1123 
1124    // real playback time to produce with each filling of the buffers
1125    // by the Audio thread (except at the end of playback):
1126    // usually, make fillings fewer and longer for less CPU usage.
1127    // What Audio thread produces for playback is then consumed by the PortAudio
1128    // thread, in many smaller pieces.
1129    double playbackTime = lrint(times.batchSize * mRate) / mRate;
1130 
1131    wxASSERT( playbackTime >= 0 );
1132    mPlaybackSamplesToCopy = playbackTime * mRate;
1133 
1134    // Capacity of the playback buffer.
1135    mPlaybackRingBufferSecs = times.ringBufferDelay;
1136 
1137    mCaptureRingBufferSecs =
1138       4.5 + 0.5 * std::min(size_t(16), mCaptureTracks.size());
1139    mMinCaptureSecsToCopy =
1140       0.2 + 0.2 * std::min(size_t(16), mCaptureTracks.size());
1141 
1142    bool bDone;
1143    do
1144    {
1145       bDone = true; // assume success
1146       try
1147       {
1148          if( mNumPlaybackChannels > 0 ) {
1149             // Allocate output buffers.  For every output track we allocate
1150             // a ring buffer of ten seconds
1151             auto playbackBufferSize =
1152                (size_t)lrint(mRate * mPlaybackRingBufferSecs);
1153 
1154             // Always make at least one playback buffer
1155             mPlaybackBuffers.reinit(
1156                std::max<size_t>(1, mPlaybackTracks.size()));
1157             mPlaybackMixers.clear();
1158             mPlaybackMixers.resize(mPlaybackTracks.size());
1159 
1160             const auto &warpOptions =
1161                policy.MixerWarpOptions(mPlaybackSchedule);
1162 
1163             mPlaybackQueueMinimum = lrint( mRate * times.latency );
1164             mPlaybackQueueMinimum =
1165                std::min( mPlaybackQueueMinimum, playbackBufferSize );
1166 
1167             if (mPlaybackTracks.empty())
1168                // Make at least one playback buffer
1169                mPlaybackBuffers[0] =
1170                   std::make_unique<RingBuffer>(floatSample, playbackBufferSize);
1171 
1172             for (unsigned int i = 0; i < mPlaybackTracks.size(); i++)
1173             {
1174                // Bug 1763 - We must fade in from zero to avoid a click on starting.
1175                mPlaybackTracks[i]->SetOldChannelGain(0, 0.0);
1176                mPlaybackTracks[i]->SetOldChannelGain(1, 0.0);
1177 
1178                mPlaybackBuffers[i] =
1179                   std::make_unique<RingBuffer>(floatSample, playbackBufferSize);
1180 
1181                // use track time for the end time, not real time!
1182                WaveTrackConstArray mixTracks;
1183                mixTracks.push_back(mPlaybackTracks[i]);
1184 
1185                double startTime, endTime;
1186                if (make_iterator_range(tracks.prerollTracks)
1187                       .contains(mPlaybackTracks[i])) {
1188                   // Stop playing this track after pre-roll
1189                   startTime = mPlaybackSchedule.mT0;
1190                   endTime = t0;
1191                }
1192                else {
1193                   // Pass t1 -- not mT1 as may have been adjusted for latency
1194                   // -- so that overdub recording stops playing back samples
1195                   // at the right time, though transport may continue to record
1196                   startTime = t0;
1197                   endTime = t1;
1198                }
1199 
1200                mPlaybackMixers[i] = std::make_unique<Mixer>
1201                   (mixTracks,
1202                   // Don't throw for read errors, just play silence:
1203                   false,
1204                   warpOptions,
1205                   startTime,
1206                   endTime,
1207                   1,
1208                   std::max( mPlaybackSamplesToCopy, mPlaybackQueueMinimum ),
1209                   false,
1210                   mRate, floatSample,
1211                   false, // low quality dithering and resampling
1212                   nullptr,
1213                   false // don't apply track gains
1214                );
1215             }
1216 
1217             const auto timeQueueSize = 1 +
1218                (playbackBufferSize + TimeQueueGrainSize - 1)
1219                   / TimeQueueGrainSize;
1220             mPlaybackSchedule.mTimeQueue.Resize( timeQueueSize );
1221          }
1222 
1223          if( mNumCaptureChannels > 0 )
1224          {
1225             // Allocate input buffers.  For every input track we allocate
1226             // a ring buffer of five seconds
1227             auto captureBufferSize =
1228                (size_t)(mRate * mCaptureRingBufferSecs + 0.5);
1229 
1230             // In the extraordinarily rare case that we can't even afford
1231             // 100 samples, just give up.
1232             if(captureBufferSize < 100)
1233             {
1234                AudacityMessageBox( XO("Out of memory!") );
1235                return false;
1236             }
1237 
1238             mCaptureBuffers.reinit(mCaptureTracks.size());
1239             mResample.reinit(mCaptureTracks.size());
1240             mFactor = sampleRate / mRate;
1241 
1242             for( unsigned int i = 0; i < mCaptureTracks.size(); i++ )
1243             {
1244                mCaptureBuffers[i] = std::make_unique<RingBuffer>(
1245                   mCaptureTracks[i]->GetSampleFormat(), captureBufferSize );
1246                mResample[i] =
1247                   std::make_unique<Resample>(true, mFactor, mFactor);
1248                   // constant rate resampling
1249             }
1250          }
1251       }
1252       catch(std::bad_alloc&)
1253       {
1254          // Oops!  Ran out of memory.  This is pretty rare, so we'll just
1255          // try deleting everything, halving our buffer size, and try again.
1256          StartStreamCleanup(true);
1257          mPlaybackRingBufferSecs *= 0.5;
1258          mPlaybackSamplesToCopy /= 2;
1259          mCaptureRingBufferSecs *= 0.5;
1260          mMinCaptureSecsToCopy *= 0.5;
1261          bDone = false;
1262 
1263          // In the extraordinarily rare case that we can't even afford 100
1264          // samples, just give up.
1265          auto playbackBufferSize =
1266             (size_t)lrint(mRate * mPlaybackRingBufferSecs);
1267          if(playbackBufferSize < 100 || mPlaybackSamplesToCopy < 100)
1268          {
1269             AudacityMessageBox( XO("Out of memory!") );
1270             return false;
1271          }
1272       }
1273    } while(!bDone);
1274 
1275    success = true;
1276    return true;
1277 }
1278 
StartStreamCleanup(bool bOnlyBuffers)1279 void AudioIO::StartStreamCleanup(bool bOnlyBuffers)
1280 {
1281    if (mNumPlaybackChannels > 0)
1282    {
1283       RealtimeEffectManager::Get().RealtimeFinalize();
1284    }
1285 
1286    mPlaybackBuffers.reset();
1287    mPlaybackMixers.clear();
1288    mCaptureBuffers.reset();
1289    mResample.reset();
1290    mPlaybackSchedule.mTimeQueue.Clear();
1291 
1292    if(!bOnlyBuffers)
1293    {
1294       Pa_AbortStream( mPortStreamV19 );
1295       Pa_CloseStream( mPortStreamV19 );
1296       mPortStreamV19 = NULL;
1297       mStreamToken = 0;
1298    }
1299 
1300    mPlaybackSchedule.GetPolicy().Finalize( mPlaybackSchedule );
1301 }
1302 
IsAvailable(AudacityProject & project) const1303 bool AudioIO::IsAvailable(AudacityProject &project) const
1304 {
1305    auto pOwningProject = mOwningProject.lock();
1306    return !pOwningProject || pOwningProject.get() == &project;
1307 }
1308 
SetMeters()1309 void AudioIO::SetMeters()
1310 {
1311    if (auto pInputMeter = mInputMeter.lock())
1312       pInputMeter->Reset(mRate, true);
1313    if (auto pOutputMeter = mOutputMeter.lock())
1314       pOutputMeter->Reset(mRate, true);
1315 
1316    mUpdateMeters = true;
1317 }
1318 
StopStream()1319 void AudioIO::StopStream()
1320 {
1321    auto cleanup = finally ( [this] {
1322       ClearRecordingException();
1323       mRecordingSchedule.mCrossfadeData.clear(); // free arrays
1324    } );
1325 
1326    if( mPortStreamV19 == NULL )
1327       return;
1328 
1329    // DV: This code seems to be unnecessary.
1330    // We do not leave mPortStreamV19 open in stopped
1331    // state. (Do we?)
1332    // This breaks WASAPI backend, as it sets the `running`
1333    // flag to `false` asynchronously.
1334    // Previously we have patched PortAudio and the patch
1335    // was breaking IsStreamStopped() == !IsStreamActive()
1336    // invariant.
1337    /*
1338    if ( Pa_IsStreamStopped(mPortStreamV19) )
1339       return;
1340    */
1341 
1342 #if (defined(__WXMAC__) || defined(__WXMSW__)) && wxCHECK_VERSION(3,1,0)
1343    // Re-enable system sleep
1344    wxPowerResource::Release(wxPOWER_RESOURCE_SCREEN);
1345 #endif
1346 
1347    if( mAudioThreadTrackBufferExchangeLoopRunning )
1348    {
1349       // PortAudio callback can use the information that we are stopping to fade
1350       // out the audio.  Give PortAudio callback a chance to do so.
1351       mForceFadeOut.store(true, std::memory_order_relaxed);
1352       auto latency = static_cast<long>(AudioIOLatencyDuration.Read());
1353       // If we can gracefully fade out in 200ms, with the faded-out play buffers making it through
1354       // the sound card, then do so.  If we can't, don't wait around.  Just stop quickly and accept
1355       // there will be a click.
1356       if( mbMicroFades  && (latency < 150 ))
1357          wxMilliSleep( latency + 50);
1358    }
1359 
1360    wxMutexLocker locker(mSuspendAudioThread);
1361 
1362    // No longer need effects processing
1363    if (mNumPlaybackChannels > 0)
1364    {
1365       RealtimeEffectManager::Get().RealtimeFinalize();
1366    }
1367 
1368    //
1369    // We got here in one of two ways:
1370    //
1371    // 1. The user clicked the stop button and we therefore want to stop
1372    //    as quickly as possible.  So we use AbortStream().  If this is
1373    //    the case the portaudio stream is still in the Running state
1374    //    (see PortAudio state machine docs).
1375    //
1376    // 2. The callback told PortAudio to stop the stream since it had
1377    //    reached the end of the selection.  The UI thread discovered
1378    //    this by noticing that AudioIO::IsActive() returned false.
1379    //    IsActive() (which calls Pa_GetStreamActive()) will not return
1380    //    false until all buffers have finished playing, so we can call
1381    //    AbortStream without losing any samples.  If this is the case
1382    //    we are in the "callback finished state" (see PortAudio state
1383    //    machine docs).
1384    //
1385    // The moral of the story: We can call AbortStream safely, without
1386    // losing samples.
1387    //
1388    // DMM: This doesn't seem to be true; it seems to be necessary to
1389    // call StopStream if the callback brought us here, and AbortStream
1390    // if the user brought us here.
1391    //
1392    // DV: Seems that Pa_CloseStream calls Pa_AbortStream internally,
1393    // at least for PortAudio 19.7.0+
1394 
1395    mAudioThreadTrackBufferExchangeLoopRunning = false;
1396 
1397    // Audacity can deadlock if it tries to update meters while
1398    // we're stopping PortAudio (because the meter updating code
1399    // tries to grab a UI mutex while PortAudio tries to join a
1400    // pthread).  So we tell the callback to stop updating meters,
1401    // and wait until the callback has left this part of the code
1402    // if it was already there.
1403    mUpdateMeters = false;
1404    while(mUpdatingMeters) {
1405       ::wxSafeYield();
1406       wxMilliSleep( 50 );
1407    }
1408 
1409    // Turn off HW playthrough if PortMixer is being used
1410 
1411   #if defined(USE_PORTMIXER)
1412    if( mPortMixer ) {
1413       #if __WXMAC__
1414       if (Px_SupportsPlaythrough(mPortMixer) && mPreviousHWPlaythrough >= 0.0)
1415          Px_SetPlaythrough(mPortMixer, mPreviousHWPlaythrough);
1416          mPreviousHWPlaythrough = -1.0;
1417       #endif
1418    }
1419   #endif
1420 
1421    if (mPortStreamV19) {
1422       // DV: Pa_CloseStream will close Pa_AbortStream internally,
1423       // but it doesn't hurt to do it ourselves.
1424       // PA_AbortStream will silently fail if stream is stopped.
1425       if (!Pa_IsStreamStopped( mPortStreamV19 ))
1426         Pa_AbortStream( mPortStreamV19 );
1427 
1428       Pa_CloseStream( mPortStreamV19 );
1429 
1430       mPortStreamV19 = NULL;
1431    }
1432 
1433    for( auto &ext : Extensions() )
1434       ext.StopOtherStream();
1435 
1436    auto pListener = GetListener();
1437 
1438    // If there's no token, we were just monitoring, so we can
1439    // skip this next part...
1440    if (mStreamToken > 0) {
1441       // In either of the above cases, we want to make sure that any
1442       // capture data that made it into the PortAudio callback makes it
1443       // to the target WaveTrack.  To do this, we ask the audio thread to
1444       // call TrackBufferExchange one last time (it normally would not do so since
1445       // Pa_GetStreamActive() would now return false
1446       mAudioThreadShouldCallTrackBufferExchangeOnce = true;
1447 
1448       while( mAudioThreadShouldCallTrackBufferExchangeOnce )
1449       {
1450          //FIXME: Seems like this block of the UI thread isn't bounded,
1451          //but we cannot allow event handlers to see incompletely terminated
1452          //AudioIO state with wxYield (or similar functions)
1453          wxMilliSleep( 50 );
1454       }
1455 
1456       //
1457       // Everything is taken care of.  Now, just free all the resources
1458       // we allocated in StartStream()
1459       //
1460 
1461       if (mPlaybackTracks.size() > 0)
1462       {
1463          mPlaybackBuffers.reset();
1464          mPlaybackMixers.clear();
1465          mPlaybackSchedule.mTimeQueue.Clear();
1466       }
1467 
1468       //
1469       // Offset all recorded tracks to account for latency
1470       //
1471       if (mCaptureTracks.size() > 0)
1472       {
1473          mCaptureBuffers.reset();
1474          mResample.reset();
1475 
1476          //
1477          // We only apply latency correction when we actually played back
1478          // tracks during the recording. If we did not play back tracks,
1479          // there's nothing we could be out of sync with. This also covers the
1480          // case that we do not apply latency correction when recording the
1481          // first track in a project.
1482          //
1483 
1484          for (unsigned int i = 0; i < mCaptureTracks.size(); i++) {
1485             // The calls to Flush
1486             // may cause exceptions because of exhaustion of disk space.
1487             // Stop those exceptions here, or else they propagate through too
1488             // many parts of Audacity that are not effects or editing
1489             // operations.  GuardedCall ensures that the user sees a warning.
1490 
1491             // Also be sure to Flush each track, at the top of the guarded call,
1492             // relying on the guarantee that the track will be left in a flushed
1493             // state, though the append buffer may be lost.
1494 
1495             GuardedCall( [&] {
1496                WaveTrack* track = mCaptureTracks[i].get();
1497 
1498                // use No-fail-guarantee that track is flushed,
1499                // Partial-guarantee that some initial length of the recording
1500                // is saved.
1501                // See comments in TrackBufferExchange().
1502                track->Flush();
1503             } );
1504          }
1505 
1506 
1507          if (!mLostCaptureIntervals.empty())
1508          {
1509             // This scope may combine many splittings of wave tracks
1510             // into one transaction, lessening the number of checkpoints
1511             Optional<TransactionScope> pScope;
1512             auto pOwningProject = mOwningProject.lock();
1513             if (pOwningProject) {
1514                auto &pIO = ProjectFileIO::Get(*pOwningProject);
1515                pScope.emplace(pIO.GetConnection(), "Dropouts");
1516             }
1517             for (auto &interval : mLostCaptureIntervals) {
1518                auto &start = interval.first;
1519                auto duration = interval.second;
1520                for (auto &track : mCaptureTracks) {
1521                   GuardedCall([&] {
1522                      track->SyncLockAdjust(start, start + duration);
1523                   });
1524                }
1525             }
1526             if (pScope)
1527                pScope->Commit();
1528          }
1529 
1530          if (pListener)
1531             pListener->OnCommitRecording();
1532       }
1533    }
1534 
1535    if (auto pInputMeter = mInputMeter.lock())
1536       pInputMeter->Reset(mRate, false);
1537 
1538    if (auto pOutputMeter = mOutputMeter.lock())
1539       pOutputMeter->Reset(mRate, false);
1540 
1541    mInputMeter.reset();
1542    mOutputMeter.reset();
1543    ResetOwningProject();
1544 
1545    if (pListener && mNumCaptureChannels > 0)
1546       pListener->OnAudioIOStopRecording();
1547 
1548    wxTheApp->CallAfter([this]{
1549       if (mPortStreamV19 && mNumCaptureChannels > 0)
1550          // Recording was restarted between StopStream and idle time
1551          // So the actions can keep waiting
1552          return;
1553       // In case some other thread was waiting on the mutex too:
1554       std::this_thread::yield();
1555       std::lock_guard<std::mutex> guard{ mPostRecordingActionMutex };
1556       if (mPostRecordingAction) {
1557          mPostRecordingAction();
1558          mPostRecordingAction = {};
1559       }
1560       DelayActions(false);
1561    });
1562 
1563    //
1564    // Only set token to 0 after we're totally finished with everything
1565    //
1566    bool wasMonitoring = mStreamToken == 0;
1567    mStreamToken = 0;
1568 
1569    if (mNumPlaybackChannels > 0)
1570    {
1571       wxCommandEvent e(EVT_AUDIOIO_PLAYBACK);
1572       auto pOwningProject = mOwningProject.lock();
1573       e.SetEventObject(pOwningProject.get());
1574       e.SetInt(false);
1575       wxTheApp->ProcessEvent(e);
1576    }
1577 
1578    if (mNumCaptureChannels > 0)
1579    {
1580       wxCommandEvent e(wasMonitoring ? EVT_AUDIOIO_MONITOR : EVT_AUDIOIO_CAPTURE);
1581       auto pOwningProject = mOwningProject.lock();
1582       e.SetEventObject(pOwningProject.get());
1583       e.SetInt(false);
1584       wxTheApp->ProcessEvent(e);
1585    }
1586 
1587    mNumCaptureChannels = 0;
1588    mNumPlaybackChannels = 0;
1589 
1590    mPlaybackTracks.clear();
1591    mCaptureTracks.clear();
1592 
1593    mPlaybackSchedule.GetPolicy().Finalize( mPlaybackSchedule );
1594 
1595    if (pListener) {
1596       // Tell UI to hide sample rate
1597       pListener->OnAudioIORate(0);
1598    }
1599 
1600    // Don't cause a busy wait in the audio thread after stopping scrubbing
1601    mPlaybackSchedule.ResetMode();
1602 }
1603 
SetPaused(bool state)1604 void AudioIO::SetPaused(bool state)
1605 {
1606    if (state != mPaused)
1607    {
1608       if (state)
1609       {
1610          RealtimeEffectManager::Get().RealtimeSuspend();
1611       }
1612       else
1613       {
1614          RealtimeEffectManager::Get().RealtimeResume();
1615       }
1616    }
1617 
1618    mPaused = state;
1619 }
1620 
GetBestRate(bool capturing,bool playing,double sampleRate)1621 double AudioIO::GetBestRate(bool capturing, bool playing, double sampleRate)
1622 {
1623    // Check if we can use the cached value
1624    if (mCachedBestRateIn != 0.0 && mCachedBestRateIn == sampleRate
1625       && mCachedBestRatePlaying == playing && mCachedBestRateCapturing == capturing) {
1626       return mCachedBestRateOut;
1627    }
1628 
1629    // In order to cache the value, all early returns should instead set retval
1630    // and jump to finished
1631    double retval;
1632 
1633    std::vector<long> rates;
1634    if (capturing) wxLogDebug(wxT("AudioIO::GetBestRate() for capture"));
1635    if (playing) wxLogDebug(wxT("AudioIO::GetBestRate() for playback"));
1636    wxLogDebug(wxT("GetBestRate() suggested rate %.0lf Hz"), sampleRate);
1637 
1638    if (capturing && !playing) {
1639       rates = GetSupportedCaptureRates(-1, sampleRate);
1640    }
1641    else if (playing && !capturing) {
1642       rates = GetSupportedPlaybackRates(-1, sampleRate);
1643    }
1644    else {   // we assume capturing and playing - the alternative would be a
1645             // bit odd
1646       rates = GetSupportedSampleRates(-1, -1, sampleRate);
1647    }
1648    /* rem rates is the array of hardware-supported sample rates (in the current
1649     * configuration), sampleRate is the Project Rate (desired sample rate) */
1650    long rate = (long)sampleRate;
1651 
1652    if (make_iterator_range(rates).contains(rate)) {
1653       wxLogDebug(wxT("GetBestRate() Returning %.0ld Hz"), rate);
1654       retval = rate;
1655       goto finished;
1656       /* the easy case - the suggested rate (project rate) is in the list, and
1657        * we can just accept that and send back to the caller. This should be
1658        * the case for most users most of the time (all of the time on
1659        * Win MME as the OS does resampling) */
1660    }
1661 
1662    /* if we get here, there is a problem - the project rate isn't supported
1663     * on our hardware, so we can't us it. Need to come up with an alternative
1664     * rate to use. The process goes like this:
1665     * * If there are no rates to pick from, we're stuck and return 0 (error)
1666     * * If there are some rates, we pick the next one higher than the requested
1667     *   rate to use.
1668     * * If there aren't any higher, we use the highest available rate */
1669 
1670    if (rates.empty()) {
1671       /* we're stuck - there are no supported rates with this hardware. Error */
1672       wxLogDebug(wxT("GetBestRate() Error - no supported sample rates"));
1673       retval = 0.0;
1674       goto finished;
1675    }
1676    int i;
1677    for (i = 0; i < (int)rates.size(); i++)  // for each supported rate
1678          {
1679          if (rates[i] > rate) {
1680             // supported rate is greater than requested rate
1681             wxLogDebug(wxT("GetBestRate() Returning next higher rate - %.0ld Hz"), rates[i]);
1682             retval = rates[i];
1683             goto finished;
1684          }
1685          }
1686 
1687    wxLogDebug(wxT("GetBestRate() Returning highest rate - %.0ld Hz"), rates.back());
1688    retval = rates.back(); // the highest available rate
1689    goto finished;
1690 
1691 finished:
1692    mCachedBestRateIn = sampleRate;
1693    mCachedBestRateOut = retval;
1694    mCachedBestRatePlaying = playing;
1695    mCachedBestRateCapturing = capturing;
1696    return retval;
1697 }
1698 
GetStreamTime()1699 double AudioIO::GetStreamTime()
1700 {
1701    // Track time readout for the main thread
1702 
1703    if( !IsStreamActive() )
1704       return BAD_STREAM_TIME;
1705 
1706    return mPlaybackSchedule.GetTrackTime();
1707 }
1708 
1709 
1710 //////////////////////////////////////////////////////////////////////
1711 //
1712 //     Audio Thread Context
1713 //
1714 //////////////////////////////////////////////////////////////////////
1715 
Entry()1716 AudioThread::ExitCode AudioThread::Entry()
1717 {
1718    AudioIO *gAudioIO;
1719    while( !TestDestroy() &&
1720       nullptr != ( gAudioIO = AudioIO::Get() ) )
1721    {
1722       using Clock = std::chrono::steady_clock;
1723       auto loopPassStart = Clock::now();
1724       auto &schedule = gAudioIO->mPlaybackSchedule;
1725       const auto interval = schedule.GetPolicy().SleepInterval(schedule);
1726 
1727       // Set LoopActive outside the tests to avoid race condition
1728       gAudioIO->mAudioThreadTrackBufferExchangeLoopActive = true;
1729       if( gAudioIO->mAudioThreadShouldCallTrackBufferExchangeOnce )
1730       {
1731          gAudioIO->TrackBufferExchange();
1732          gAudioIO->mAudioThreadShouldCallTrackBufferExchangeOnce = false;
1733       }
1734       else if( gAudioIO->mAudioThreadTrackBufferExchangeLoopRunning )
1735       {
1736          gAudioIO->TrackBufferExchange();
1737       }
1738       gAudioIO->mAudioThreadTrackBufferExchangeLoopActive = false;
1739 
1740       std::this_thread::sleep_until( loopPassStart + interval );
1741    }
1742 
1743    return 0;
1744 }
1745 
1746 
GetCommonlyFreePlayback()1747 size_t AudioIO::GetCommonlyFreePlayback()
1748 {
1749    auto commonlyAvail = mPlaybackBuffers[0]->AvailForPut();
1750    for (unsigned i = 1; i < mPlaybackTracks.size(); ++i)
1751       commonlyAvail = std::min(commonlyAvail,
1752          mPlaybackBuffers[i]->AvailForPut());
1753    // MB: subtract a few samples because the code in TrackBufferExchange has rounding
1754    // errors
1755    return commonlyAvail - std::min(size_t(10), commonlyAvail);
1756 }
1757 
GetCommonlyReadyPlayback()1758 size_t AudioIoCallback::GetCommonlyReadyPlayback()
1759 {
1760    auto commonlyAvail = mPlaybackBuffers[0]->AvailForGet();
1761    for (unsigned i = 1; i < mPlaybackTracks.size(); ++i)
1762       commonlyAvail = std::min(commonlyAvail,
1763          mPlaybackBuffers[i]->AvailForGet());
1764    return commonlyAvail;
1765 }
1766 
GetCommonlyAvailCapture()1767 size_t AudioIO::GetCommonlyAvailCapture()
1768 {
1769    auto commonlyAvail = mCaptureBuffers[0]->AvailForGet();
1770    for (unsigned i = 1; i < mCaptureTracks.size(); ++i)
1771       commonlyAvail = std::min(commonlyAvail,
1772          mCaptureBuffers[i]->AvailForGet());
1773    return commonlyAvail;
1774 }
1775 
1776 // This method is the data gateway between the audio thread (which
1777 // communicates with the disk) and the PortAudio callback thread
1778 // (which communicates with the audio device).
TrackBufferExchange()1779 void AudioIO::TrackBufferExchange()
1780 {
1781    FillPlayBuffers();
1782    DrainRecordBuffers();
1783 }
1784 
FillPlayBuffers()1785 void AudioIO::FillPlayBuffers()
1786 {
1787    if (mNumPlaybackChannels == 0)
1788       return;
1789 
1790    // Though extremely unlikely, it is possible that some buffers
1791    // will have more samples available than others.  This could happen
1792    // if we hit this code during the PortAudio callback.  To keep
1793    // things simple, we only write as much data as is vacant in
1794    // ALL buffers, and advance the global time by that much.
1795    auto nAvailable = GetCommonlyFreePlayback();
1796 
1797    // Don't fill the buffers at all unless we can do the
1798    // full mMaxPlaybackSecsToCopy.  This improves performance
1799    // by not always trying to process tiny chunks, eating the
1800    // CPU unnecessarily.
1801    if (nAvailable < mPlaybackSamplesToCopy)
1802       return;
1803 
1804    auto &policy = mPlaybackSchedule.GetPolicy();
1805 
1806    // More than mPlaybackSamplesToCopy might be copied:
1807    // May produce a larger amount when initially priming the buffer, or
1808    // perhaps again later in play to avoid underfilling the queue and falling
1809    // behind the real-time demand on the consumer side in the callback.
1810    auto nReady = GetCommonlyReadyPlayback();
1811    auto nNeeded =
1812       mPlaybackQueueMinimum - std::min(mPlaybackQueueMinimum, nReady);
1813 
1814    // wxASSERT( nNeeded <= nAvailable );
1815 
1816    // Limit maximum buffer size (increases performance)
1817    auto available = std::min( nAvailable,
1818       std::max( nNeeded, mPlaybackSamplesToCopy ) );
1819 
1820    // msmeyer: When playing a very short selection in looped
1821    // mode, the selection must be copied to the buffer multiple
1822    // times, to ensure, that the buffer has a reasonable size
1823    // This is the purpose of this loop.
1824    // PRL: or, when scrubbing, we may get work repeatedly from the
1825    // user interface.
1826    bool done = false;
1827    do {
1828       const auto [frames, toProduce] =
1829          policy.GetPlaybackSlice(mPlaybackSchedule, available);
1830 
1831       // Update the time queue.  This must be done before writing to the
1832       // ring buffers of samples, for proper synchronization with the
1833       // consumer side in the PortAudio thread, which reads the time
1834       // queue after reading the sample queues.  The sample queues use
1835       // atomic variables, the time queue doesn't.
1836       mPlaybackSchedule.mTimeQueue.Producer(mPlaybackSchedule, frames);
1837 
1838       for (size_t i = 0; i < mPlaybackTracks.size(); i++)
1839       {
1840          // The mixer here isn't actually mixing: it's just doing
1841          // resampling, format conversion, and possibly time track
1842          // warping
1843          if (frames > 0)
1844          {
1845             size_t produced = 0;
1846             if ( toProduce )
1847                produced = mPlaybackMixers[i]->Process( toProduce );
1848             //wxASSERT(produced <= toProduce);
1849             auto warpedSamples = mPlaybackMixers[i]->GetBuffer();
1850             const auto put = mPlaybackBuffers[i]->Put(
1851                warpedSamples, floatSample, produced, frames - produced);
1852             // wxASSERT(put == frames);
1853             // but we can't assert in this thread
1854             wxUnusedVar(put);
1855          }
1856       }
1857 
1858       if (mPlaybackTracks.empty())
1859          // Produce silence in the single ring buffer
1860          mPlaybackBuffers[0]->Put(nullptr, floatSample, 0, frames);
1861 
1862       available -= frames;
1863       // wxASSERT(available >= 0); // don't assert on this thread
1864 
1865       done = policy.RepositionPlayback( mPlaybackSchedule, mPlaybackMixers,
1866          frames, available );
1867    } while (available && !done);
1868 }
1869 
DrainRecordBuffers()1870 void AudioIO::DrainRecordBuffers()
1871 {
1872    if (mRecordingException || mCaptureTracks.empty())
1873       return;
1874 
1875    auto delayedHandler = [this] ( AudacityException * pException ) {
1876       // In the main thread, stop recording
1877       // This is one place where the application handles disk
1878       // exhaustion exceptions from wave track operations, without rolling
1879       // back to the last pushed undo state.  Instead, partial recording
1880       // results are pushed as a NEW undo state.  For this reason, as
1881       // commented elsewhere, we want an exception safety guarantee for
1882       // the output wave tracks, after the failed append operation, that
1883       // the tracks remain as they were after the previous successful
1884       // (block-level) appends.
1885 
1886       // Note that the Flush in StopStream() may throw another exception,
1887       // but StopStream() contains that exception, and the logic in
1888       // AudacityException::DelayedHandlerAction prevents redundant message
1889       // boxes.
1890       StopStream();
1891       DefaultDelayedHandlerAction{}( pException );
1892    };
1893 
1894    GuardedCall( [&] {
1895       // start record buffering
1896       const auto avail = GetCommonlyAvailCapture(); // samples
1897       const auto remainingTime =
1898          std::max(0.0, mRecordingSchedule.ToConsume());
1899       // This may be a very big double number:
1900       const auto remainingSamples = remainingTime * mRate;
1901       bool latencyCorrected = true;
1902 
1903       double deltat = avail / mRate;
1904 
1905       if (mAudioThreadShouldCallTrackBufferExchangeOnce ||
1906           deltat >= mMinCaptureSecsToCopy)
1907       {
1908          // This scope may combine many appendings of wave tracks,
1909          // and also an autosave, into one transaction,
1910          // lessening the number of checkpoints
1911          Optional<TransactionScope> pScope;
1912          auto pOwningProject = mOwningProject.lock();
1913          if (pOwningProject) {
1914             auto &pIO = ProjectFileIO::Get( *pOwningProject );
1915             pScope.emplace(pIO.GetConnection(), "Recording");
1916          }
1917 
1918          bool newBlocks = false;
1919 
1920          // Append captured samples to the end of the WaveTracks.
1921          // The WaveTracks have their own buffering for efficiency.
1922          auto numChannels = mCaptureTracks.size();
1923 
1924          for( size_t i = 0; i < numChannels; i++ )
1925          {
1926             sampleFormat trackFormat = mCaptureTracks[i]->GetSampleFormat();
1927 
1928             size_t discarded = 0;
1929 
1930             if (!mRecordingSchedule.mLatencyCorrected) {
1931                const auto correction = mRecordingSchedule.TotalCorrection();
1932                if (correction >= 0) {
1933                   // Rightward shift
1934                   // Once only (per track per recording), insert some initial
1935                   // silence.
1936                   size_t size = floor( correction * mRate * mFactor);
1937                   SampleBuffer temp(size, trackFormat);
1938                   ClearSamples(temp.ptr(), trackFormat, 0, size);
1939                   mCaptureTracks[i]->Append(temp.ptr(), trackFormat, size, 1);
1940                }
1941                else {
1942                   // Leftward shift
1943                   // discard some samples from the ring buffers.
1944                   size_t size = floor(
1945                      mRecordingSchedule.ToDiscard() * mRate );
1946 
1947                   // The ring buffer might have grown concurrently -- don't discard more
1948                   // than the "avail" value noted above.
1949                   discarded = mCaptureBuffers[i]->Discard(std::min(avail, size));
1950 
1951                   if (discarded < size)
1952                      // We need to visit this again to complete the
1953                      // discarding.
1954                      latencyCorrected = false;
1955                }
1956             }
1957 
1958             const float *pCrossfadeSrc = nullptr;
1959             size_t crossfadeStart = 0, totalCrossfadeLength = 0;
1960             if (i < mRecordingSchedule.mCrossfadeData.size())
1961             {
1962                // Do crossfading
1963                // The supplied crossfade samples are at the same rate as the track
1964                const auto &data = mRecordingSchedule.mCrossfadeData[i];
1965                totalCrossfadeLength = data.size();
1966                if (totalCrossfadeLength) {
1967                   crossfadeStart =
1968                      floor(mRecordingSchedule.Consumed() * mCaptureTracks[i]->GetRate());
1969                   if (crossfadeStart < totalCrossfadeLength)
1970                      pCrossfadeSrc = data.data() + crossfadeStart;
1971                }
1972             }
1973 
1974             wxASSERT(discarded <= avail);
1975             size_t toGet = avail - discarded;
1976             SampleBuffer temp;
1977             size_t size;
1978             sampleFormat format;
1979             if( mFactor == 1.0 )
1980             {
1981                // Take captured samples directly
1982                size = toGet;
1983                if (pCrossfadeSrc)
1984                   // Change to float for crossfade calculation
1985                   format = floatSample;
1986                else
1987                   format = trackFormat;
1988                temp.Allocate(size, format);
1989                const auto got =
1990                   mCaptureBuffers[i]->Get(temp.ptr(), format, toGet);
1991                // wxASSERT(got == toGet);
1992                // but we can't assert in this thread
1993                wxUnusedVar(got);
1994                if (double(size) > remainingSamples)
1995                   size = floor(remainingSamples);
1996             }
1997             else
1998             {
1999                size = lrint(toGet * mFactor);
2000                format = floatSample;
2001                SampleBuffer temp1(toGet, floatSample);
2002                temp.Allocate(size, format);
2003                const auto got =
2004                   mCaptureBuffers[i]->Get(temp1.ptr(), floatSample, toGet);
2005                // wxASSERT(got == toGet);
2006                // but we can't assert in this thread
2007                wxUnusedVar(got);
2008                /* we are re-sampling on the fly. The last resampling call
2009                 * must flush any samples left in the rate conversion buffer
2010                 * so that they get recorded
2011                 */
2012                if (toGet > 0 ) {
2013                   if (double(toGet) > remainingSamples)
2014                      toGet = floor(remainingSamples);
2015                   const auto results =
2016                   mResample[i]->Process(mFactor, (float *)temp1.ptr(), toGet,
2017                                         !IsStreamActive(), (float *)temp.ptr(), size);
2018                   size = results.second;
2019                }
2020             }
2021 
2022             if (pCrossfadeSrc) {
2023                wxASSERT(format == floatSample);
2024                size_t crossfadeLength = std::min(size, totalCrossfadeLength - crossfadeStart);
2025                if (crossfadeLength) {
2026                   auto ratio = double(crossfadeStart) / totalCrossfadeLength;
2027                   auto ratioStep = 1.0 / totalCrossfadeLength;
2028                   auto pCrossfadeDst = (float*)temp.ptr();
2029 
2030                   // Crossfade loop here
2031                   for (size_t ii = 0; ii < crossfadeLength; ++ii) {
2032                      *pCrossfadeDst = ratio * *pCrossfadeDst + (1.0 - ratio) * *pCrossfadeSrc;
2033                      ++pCrossfadeSrc, ++pCrossfadeDst;
2034                      ratio += ratioStep;
2035                   }
2036                }
2037             }
2038 
2039             // Now append
2040             // see comment in second handler about guarantee
2041             newBlocks = mCaptureTracks[i]->Append(temp.ptr(), format, size, 1)
2042                || newBlocks;
2043          } // end loop over capture channels
2044 
2045          // Now update the recording schedule position
2046          mRecordingSchedule.mPosition += avail / mRate;
2047          mRecordingSchedule.mLatencyCorrected = latencyCorrected;
2048 
2049          auto pListener = GetListener();
2050          if (pListener && newBlocks)
2051             pListener->OnAudioIONewBlocks(&mCaptureTracks);
2052 
2053          if (pScope)
2054             pScope->Commit();
2055       }
2056       // end of record buffering
2057    },
2058    // handler
2059    [this] ( AudacityException *pException ) {
2060       if ( pException ) {
2061          // So that we don't attempt to fill the recording buffer again
2062          // before the main thread stops recording
2063          SetRecordingException();
2064          return ;
2065       }
2066       else
2067          // Don't want to intercept other exceptions (?)
2068          throw;
2069    },
2070    delayedHandler );
2071 }
2072 
SetListener(const std::shared_ptr<AudioIOListener> & listener)2073 void AudioIoCallback::SetListener(
2074    const std::shared_ptr< AudioIOListener > &listener)
2075 {
2076    if (IsBusy())
2077       return;
2078 
2079    mListener = listener;
2080 }
2081 
2082 // Automated Input Level Adjustment - Automatically tries to find an acceptable input volume
2083 #ifdef EXPERIMENTAL_AUTOMATED_INPUT_LEVEL_ADJUSTMENT
2084 
2085 #include "ProjectStatus.h"
2086 
AILAInitialize()2087 void AudioIO::AILAInitialize() {
2088    gPrefs->Read(wxT("/AudioIO/AutomatedInputLevelAdjustment"), &mAILAActive,         false);
2089    gPrefs->Read(wxT("/AudioIO/TargetPeak"),            &mAILAGoalPoint,      AILA_DEF_TARGET_PEAK);
2090    gPrefs->Read(wxT("/AudioIO/DeltaPeakVolume"),       &mAILAGoalDelta,      AILA_DEF_DELTA_PEAK);
2091    gPrefs->Read(wxT("/AudioIO/AnalysisTime"),          &mAILAAnalysisTime,   AILA_DEF_ANALYSIS_TIME);
2092    gPrefs->Read(wxT("/AudioIO/NumberAnalysis"),        &mAILATotalAnalysis,  AILA_DEF_NUMBER_ANALYSIS);
2093    mAILAGoalDelta         /= 100.0;
2094    mAILAGoalPoint         /= 100.0;
2095    mAILAAnalysisTime      /= 1000.0;
2096    mAILAMax                = 0.0;
2097    mAILALastStartTime      = max(0.0, mPlaybackSchedule.mT0);
2098    mAILAClipped            = false;
2099    mAILAAnalysisCounter    = 0;
2100    mAILAChangeFactor       = 1.0;
2101    mAILALastChangeType     = 0;
2102    mAILATopLevel           = 1.0;
2103    mAILAAnalysisEndTime    = -1.0;
2104 }
2105 
AILADisable()2106 void AudioIO::AILADisable() {
2107    mAILAActive = false;
2108 }
2109 
AILAIsActive()2110 bool AudioIO::AILAIsActive() {
2111    return mAILAActive;
2112 }
2113 
AILASetStartTime()2114 void AudioIO::AILASetStartTime() {
2115    mAILAAbsolutStartTime = Pa_GetStreamTime(mPortStreamV19);
2116    wxPrintf("START TIME %f\n\n", mAILAAbsolutStartTime);
2117 }
2118 
AILAGetLastDecisionTime()2119 double AudioIO::AILAGetLastDecisionTime() {
2120    return mAILAAnalysisEndTime;
2121 }
2122 
AILAProcess(double maxPeak)2123 void AudioIO::AILAProcess(double maxPeak) {
2124    const auto proj = mOwningProject.lock();
2125    if (proj && mAILAActive) {
2126       if (mInputMeter && mInputMeter->IsClipping()) {
2127          mAILAClipped = true;
2128          wxPrintf("clipped");
2129       }
2130 
2131       mAILAMax = max(mAILAMax, maxPeak);
2132 
2133       if ((mAILATotalAnalysis == 0 || mAILAAnalysisCounter < mAILATotalAnalysis) && mPlaybackSchedule.GetTrackTime() - mAILALastStartTime >= mAILAAnalysisTime) {
2134          auto ToLinearIfDB = [](double value, int dbRange) {
2135             if (dbRange >= 0)
2136                value = pow(10.0, (-(1.0-value) * dbRange)/20.0);
2137             return value;
2138          };
2139 
2140          putchar('\n');
2141          mAILAMax = mInputMeter ? ToLinearIfDB(mAILAMax, mInputMeter->GetDBRange()) : 0.0;
2142          double iv = (double) Px_GetInputVolume(mPortMixer);
2143          unsigned short changetype = 0; //0 - no change, 1 - increase change, 2 - decrease change
2144          wxPrintf("mAILAAnalysisCounter:%d\n", mAILAAnalysisCounter);
2145          wxPrintf("\tmAILAClipped:%d\n", mAILAClipped);
2146          wxPrintf("\tmAILAMax (linear):%f\n", mAILAMax);
2147          wxPrintf("\tmAILAGoalPoint:%f\n", mAILAGoalPoint);
2148          wxPrintf("\tmAILAGoalDelta:%f\n", mAILAGoalDelta);
2149          wxPrintf("\tiv:%f\n", iv);
2150          wxPrintf("\tmAILAChangeFactor:%f\n", mAILAChangeFactor);
2151          if (mAILAClipped || mAILAMax > mAILAGoalPoint + mAILAGoalDelta) {
2152             wxPrintf("too high:\n");
2153             mAILATopLevel = min(mAILATopLevel, iv);
2154             wxPrintf("\tmAILATopLevel:%f\n", mAILATopLevel);
2155             //if clipped or too high
2156             if (iv <= LOWER_BOUND) {
2157                //we can't improve it more now
2158                if (mAILATotalAnalysis != 0) {
2159                   mAILAActive = false;
2160                   ProjectStatus::Get( *proj ).Set(
2161                      XO(
2162 "Automated Recording Level Adjustment stopped. It was not possible to optimize it more. Still too high.") );
2163                }
2164                wxPrintf("\talready min vol:%f\n", iv);
2165             }
2166             else {
2167                float vol = (float) max(LOWER_BOUND, iv+(mAILAGoalPoint-mAILAMax)*mAILAChangeFactor);
2168                Px_SetInputVolume(mPortMixer, vol);
2169                auto msg = XO(
2170 "Automated Recording Level Adjustment decreased the volume to %f.").Format( vol );
2171                ProjectStatus::Get( *proj ).Set(msg);
2172                changetype = 1;
2173                wxPrintf("\tnew vol:%f\n", vol);
2174                float check = Px_GetInputVolume(mPortMixer);
2175                wxPrintf("\tverified %f\n", check);
2176             }
2177          }
2178          else if ( mAILAMax < mAILAGoalPoint - mAILAGoalDelta ) {
2179             //if too low
2180             wxPrintf("too low:\n");
2181             if (iv >= UPPER_BOUND || iv + 0.005 > mAILATopLevel) { //condition for too low volumes and/or variable volumes that cause mAILATopLevel to decrease too much
2182                //we can't improve it more
2183                if (mAILATotalAnalysis != 0) {
2184                   mAILAActive = false;
2185                   ProjectStatus::Get( *proj ).Set(
2186                      XO(
2187 "Automated Recording Level Adjustment stopped. It was not possible to optimize it more. Still too low.") );
2188                }
2189                wxPrintf("\talready max vol:%f\n", iv);
2190             }
2191             else {
2192                float vol = (float) min(UPPER_BOUND, iv+(mAILAGoalPoint-mAILAMax)*mAILAChangeFactor);
2193                if (vol > mAILATopLevel) {
2194                   vol = (iv + mAILATopLevel)/2.0;
2195                   wxPrintf("\tTruncated vol:%f\n", vol);
2196                }
2197                Px_SetInputVolume(mPortMixer, vol);
2198                auto msg = XO(
2199 "Automated Recording Level Adjustment increased the volume to %.2f.")
2200                   .Format( vol );
2201                ProjectStatus::Get( *proj ).Set(msg);
2202                changetype = 2;
2203                wxPrintf("\tnew vol:%f\n", vol);
2204                float check = Px_GetInputVolume(mPortMixer);
2205                wxPrintf("\tverified %f\n", check);
2206             }
2207          }
2208 
2209          mAILAAnalysisCounter++;
2210          //const PaStreamInfo* info = Pa_GetStreamInfo(mPortStreamV19);
2211          //double latency = 0.0;
2212          //if (info)
2213          //   latency = info->inputLatency;
2214          //mAILAAnalysisEndTime = mTime+latency;
2215          mAILAAnalysisEndTime = Pa_GetStreamTime(mPortStreamV19) - mAILAAbsolutStartTime;
2216          mAILAMax             = 0;
2217          wxPrintf("\tA decision was made @ %f\n", mAILAAnalysisEndTime);
2218          mAILAClipped         = false;
2219          mAILALastStartTime   = mPlaybackSchedule.GetTrackTime();
2220 
2221          if (changetype == 0)
2222             mAILAChangeFactor *= 0.8; //time factor
2223          else if (mAILALastChangeType == changetype)
2224             mAILAChangeFactor *= 1.1; //concordance factor
2225          else
2226             mAILAChangeFactor *= 0.7; //discordance factor
2227          mAILALastChangeType = changetype;
2228          putchar('\n');
2229       }
2230 
2231       if (mAILAActive && mAILATotalAnalysis != 0 && mAILAAnalysisCounter >= mAILATotalAnalysis) {
2232          mAILAActive = false;
2233          if (mAILAMax > mAILAGoalPoint + mAILAGoalDelta)
2234             ProjectStatus::Get( *proj ).Set(
2235                XO(
2236 "Automated Recording Level Adjustment stopped. The total number of analyses has been exceeded without finding an acceptable volume. Still too high.") );
2237          else if (mAILAMax < mAILAGoalPoint - mAILAGoalDelta)
2238             ProjectStatus::Get( *proj ).Set(
2239                XO(
2240 "Automated Recording Level Adjustment stopped. The total number of analyses has been exceeded without finding an acceptable volume. Still too low.") );
2241          else {
2242             auto msg = XO(
2243 "Automated Recording Level Adjustment stopped. %.2f seems an acceptable volume.")
2244                .Format( Px_GetInputVolume(mPortMixer) );
2245             ProjectStatus::Get( *proj ).Set(msg);
2246          }
2247       }
2248    }
2249 }
2250 #endif
2251 
2252 #define MAX(a,b) ((a) > (b) ? (a) : (b))
2253 
DoSoftwarePlaythrough(constSamplePtr inputBuffer,sampleFormat inputFormat,unsigned inputChannels,float * outputBuffer,unsigned long len)2254 static void DoSoftwarePlaythrough(constSamplePtr inputBuffer,
2255                                   sampleFormat inputFormat,
2256                                   unsigned inputChannels,
2257                                   float *outputBuffer,
2258                                   unsigned long len)
2259 {
2260    for (unsigned int i=0; i < inputChannels; i++) {
2261       auto inputPtr = inputBuffer + (i * SAMPLE_SIZE(inputFormat));
2262 
2263       SamplesToFloats(inputPtr, inputFormat,
2264          outputBuffer + i, len, inputChannels, 2);
2265    }
2266 
2267    // One mono input channel goes to both output channels...
2268    if (inputChannels == 1)
2269       for (int i=0; i < len; i++)
2270          outputBuffer[2*i + 1] = outputBuffer[2*i];
2271 }
2272 
audacityAudioCallback(const void * inputBuffer,void * outputBuffer,unsigned long framesPerBuffer,const PaStreamCallbackTimeInfo * timeInfo,const PaStreamCallbackFlags statusFlags,void * userData)2273 int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
2274                           unsigned long framesPerBuffer,
2275                           const PaStreamCallbackTimeInfo *timeInfo,
2276                           const PaStreamCallbackFlags statusFlags, void *userData )
2277 {
2278    auto gAudioIO = AudioIO::Get();
2279    return gAudioIO->AudioCallback(
2280       static_cast<constSamplePtr>(inputBuffer),
2281       static_cast<float*>(outputBuffer), framesPerBuffer,
2282       timeInfo, statusFlags, userData);
2283 }
2284 
2285 // Stop recording if 'silence' is detected
2286 // Start recording if sound detected.
2287 //
2288 //   By using CallAfter(), we can schedule the call to the toolbar
2289 //   to run in the main GUI thread after the next event loop iteration.
2290 //   That's important, because Pause() updates GUI, such as status bar,
2291 //   and that should NOT happen in this audio non-gui thread.
CheckSoundActivatedRecordingLevel(float * inputSamples,unsigned long framesPerBuffer)2292 void AudioIoCallback::CheckSoundActivatedRecordingLevel(
2293       float *inputSamples,
2294       unsigned long framesPerBuffer
2295    )
2296 {
2297    // Quick returns if next to nothing to do.
2298    if( !mPauseRec )
2299       return;
2300 
2301    float maxPeak = 0.;
2302    for( unsigned long i = 0, cnt = framesPerBuffer * mNumCaptureChannels; i < cnt; ++i ) {
2303       float sample = fabs(*(inputSamples++));
2304       if (sample > maxPeak) {
2305          maxPeak = sample;
2306       }
2307    }
2308 
2309    bool bShouldBePaused = maxPeak < mSilenceLevel;
2310    if( bShouldBePaused != IsPaused() )
2311    {
2312       auto pListener = GetListener();
2313       if ( pListener )
2314          pListener->OnSoundActivationThreshold();
2315    }
2316 }
2317 
2318 // A function to apply the requested gain, fading up or down from the
2319 // most recently applied gain.
AddToOutputChannel(unsigned int chan,float * outputMeterFloats,float * outputFloats,const float * tempBuf,bool drop,unsigned long len,WaveTrack * vt)2320 void AudioIoCallback::AddToOutputChannel( unsigned int chan,
2321    float * outputMeterFloats,
2322    float * outputFloats,
2323    const float * tempBuf,
2324    bool drop,
2325    unsigned long len,
2326    WaveTrack *vt
2327    )
2328 {
2329    const auto numPlaybackChannels = mNumPlaybackChannels;
2330 
2331    float gain = vt->GetChannelGain(chan);
2332    if (drop || mForceFadeOut.load(std::memory_order_relaxed) || mPaused)
2333       gain = 0.0;
2334 
2335    // Output volume emulation: possibly copy meter samples, then
2336    // apply volume, then copy to the output buffer
2337    if (outputMeterFloats != outputFloats)
2338       for ( unsigned i = 0; i < len; ++i)
2339          outputMeterFloats[numPlaybackChannels*i+chan] +=
2340             gain*tempBuf[i];
2341 
2342    // DV: We use gain to emulate panning.
2343    // Let's keep the old behavior for panning.
2344    gain *= ExpGain(mMixerOutputVol);
2345 
2346    float oldGain = vt->GetOldChannelGain(chan);
2347    if( gain != oldGain )
2348       vt->SetOldChannelGain(chan, gain);
2349    // if no microfades, jump in volume.
2350    if( !mbMicroFades )
2351       oldGain =gain;
2352    wxASSERT(len > 0);
2353 
2354    // Linear interpolate.
2355    float deltaGain = (gain - oldGain) / len;
2356    for (unsigned i = 0; i < len; i++)
2357       outputFloats[numPlaybackChannels*i+chan] += (oldGain + deltaGain * i) *tempBuf[i];
2358 };
2359 
2360 // Limit values to -1.0..+1.0
ClampBuffer(float * pBuffer,unsigned long len)2361 void ClampBuffer(float * pBuffer, unsigned long len){
2362    for(unsigned i = 0; i < len; i++)
2363       pBuffer[i] = wxClip( pBuffer[i], -1.0f, 1.0f);
2364 };
2365 
2366 
2367 // return true, IFF we have fully handled the callback.
2368 //
2369 // Mix and copy to PortAudio's output buffer
2370 // from our intermediate playback buffers
2371 //
FillOutputBuffers(float * outputBuffer,unsigned long framesPerBuffer,float * outputMeterFloats)2372 bool AudioIoCallback::FillOutputBuffers(
2373    float *outputBuffer,
2374    unsigned long framesPerBuffer,
2375    float *outputMeterFloats
2376 )
2377 {
2378    const auto numPlaybackTracks = mPlaybackTracks.size();
2379    const auto numPlaybackChannels = mNumPlaybackChannels;
2380    const auto numCaptureChannels = mNumCaptureChannels;
2381 
2382    mMaxFramesOutput = 0;
2383 
2384    // Quick returns if next to nothing to do.
2385    if (mStreamToken <= 0 ||
2386        !outputBuffer ||
2387        numPlaybackChannels <= 0) {
2388       // So that UpdateTimePosition() will be correct, in case of MIDI play with
2389       // no audio output channels
2390       mMaxFramesOutput = framesPerBuffer;
2391       return false;
2392    }
2393 
2394    float *outputFloats = outputBuffer;
2395 
2396    if (mSeek && !mPlaybackSchedule.GetPolicy().AllowSeek(mPlaybackSchedule))
2397       mSeek = 0.0;
2398 
2399    if (mSeek){
2400       mCallbackReturn = CallbackDoSeek();
2401       return true;
2402    }
2403 
2404    // ------ MEMORY ALLOCATION ----------------------
2405    // These are small structures.
2406    WaveTrack **chans = (WaveTrack **) alloca(numPlaybackChannels * sizeof(WaveTrack *));
2407    float **tempBufs = (float **) alloca(numPlaybackChannels * sizeof(float *));
2408 
2409    // And these are larger structures....
2410    for (unsigned int c = 0; c < numPlaybackChannels; c++)
2411       tempBufs[c] = (float *) alloca(framesPerBuffer * sizeof(float));
2412    // ------ End of MEMORY ALLOCATION ---------------
2413 
2414    auto & em = RealtimeEffectManager::Get();
2415    em.RealtimeProcessStart();
2416 
2417    bool selected = false;
2418    int group = 0;
2419    int chanCnt = 0;
2420 
2421    // Choose a common size to take from all ring buffers
2422    const auto toGet =
2423       std::min<size_t>(framesPerBuffer, GetCommonlyReadyPlayback());
2424 
2425    // The drop and dropQuickly booleans are so named for historical reasons.
2426    // JKC: The original code attempted to be faster by doing nothing on silenced audio.
2427    // This, IMHO, is 'premature optimisation'.  Instead clearer and cleaner code would
2428    // simply use a gain of 0.0 for silent audio and go on through to the stage of
2429    // applying that 0.0 gain to the data mixed into the buffer.
2430    // Then (and only then) we would have if needed fast paths for:
2431    // - Applying a uniform gain of 0.0.
2432    // - Applying a uniform gain of 1.0.
2433    // - Applying some other uniform gain.
2434    // - Applying a linearly interpolated gain.
2435    // I would expect us not to need the fast paths, since linearly interpolated gain
2436    // is very cheap to process.
2437 
2438    bool drop = false;        // Track should become silent.
2439    bool dropQuickly = false; // Track has already been faded to silence.
2440    for (unsigned t = 0; t < numPlaybackTracks; t++)
2441    {
2442       WaveTrack *vt = mPlaybackTracks[t].get();
2443       chans[chanCnt] = vt;
2444 
2445       // TODO: more-than-two-channels
2446       auto nextTrack =
2447          t + 1 < numPlaybackTracks
2448             ? mPlaybackTracks[t + 1].get()
2449             : nullptr;
2450 
2451       // First and last channel in this group (for example left and right
2452       // channels of stereo).
2453       bool firstChannel = vt->IsLeader();
2454       bool lastChannel = !nextTrack || nextTrack->IsLeader();
2455 
2456       if ( firstChannel )
2457       {
2458          selected = vt->GetSelected();
2459          // IF mono THEN clear 'the other' channel.
2460          if ( lastChannel && (numPlaybackChannels>1)) {
2461             // TODO: more-than-two-channels
2462             memset(tempBufs[1], 0, framesPerBuffer * sizeof(float));
2463          }
2464          drop = TrackShouldBeSilent( *vt );
2465          dropQuickly = drop;
2466       }
2467 
2468       if( mbMicroFades )
2469          dropQuickly = dropQuickly && TrackHasBeenFadedOut( *vt );
2470 
2471       decltype(framesPerBuffer) len = 0;
2472 
2473       if (dropQuickly)
2474       {
2475          len = mPlaybackBuffers[t]->Discard(toGet);
2476          // keep going here.
2477          // we may still need to issue a paComplete.
2478       }
2479       else
2480       {
2481          len = mPlaybackBuffers[t]->Get((samplePtr)tempBufs[chanCnt],
2482                                                    floatSample,
2483                                                    toGet);
2484          // wxASSERT( len == toGet );
2485          if (len < framesPerBuffer)
2486             // This used to happen normally at the end of non-looping
2487             // plays, but it can also be an anomalous case where the
2488             // supply from TrackBufferExchange fails to keep up with the
2489             // real-time demand in this thread (see bug 1932).  We
2490             // must supply something to the sound card, so pad it with
2491             // zeroes and not random garbage.
2492             memset((void*)&tempBufs[chanCnt][len], 0,
2493                (framesPerBuffer - len) * sizeof(float));
2494          chanCnt++;
2495       }
2496 
2497       // PRL:  Bug1104:
2498       // There can be a difference of len in different loop passes if one channel
2499       // of a stereo track ends before the other!  Take a max!
2500 
2501       // PRL:  More recent rewrites of TrackBufferExchange should guarantee a
2502       // padding out of the ring buffers so that equal lengths are
2503       // available, so maxLen ought to increase from 0 only once
2504       mMaxFramesOutput = std::max(mMaxFramesOutput, len);
2505 
2506       if ( !lastChannel )
2507          continue;
2508 
2509       // Last channel of a track seen now
2510       len = mMaxFramesOutput;
2511 
2512       if( !dropQuickly && selected )
2513          len = em.RealtimeProcess(group, chanCnt, tempBufs, len);
2514       group++;
2515 
2516       CallbackCheckCompletion(mCallbackReturn, len);
2517       if (dropQuickly) // no samples to process, they've been discarded
2518          continue;
2519 
2520       // Our channels aren't silent.  We need to pass their data on.
2521       //
2522       // Note that there are two kinds of channel count.
2523       // c and chanCnt are counting channels in the Tracks.
2524       // chan (and numPlayBackChannels) is counting output channels on the device.
2525       // chan = 0 is left channel
2526       // chan = 1 is right channel.
2527       //
2528       // Each channel in the tracks can output to more than one channel on the device.
2529       // For example mono channels output to both left and right output channels.
2530       if (len > 0) for (int c = 0; c < chanCnt; c++)
2531       {
2532          vt = chans[c];
2533 
2534          if (vt->GetChannelIgnoringPan() == Track::LeftChannel ||
2535                vt->GetChannelIgnoringPan() == Track::MonoChannel )
2536             AddToOutputChannel( 0, outputMeterFloats, outputFloats,
2537                tempBufs[c], drop, len, vt);
2538 
2539          if (vt->GetChannelIgnoringPan() == Track::RightChannel ||
2540                vt->GetChannelIgnoringPan() == Track::MonoChannel  )
2541             AddToOutputChannel( 1, outputMeterFloats, outputFloats,
2542                tempBufs[c], drop, len, vt);
2543       }
2544 
2545       chanCnt = 0;
2546    }
2547 
2548    // Poke: If there are no playback tracks, then the earlier check
2549    // about the time indicator being past the end won't happen;
2550    // do it here instead (but not if looping or scrubbing)
2551    // PRL:  Also consume from the single playback ring buffer
2552    if (numPlaybackTracks == 0) {
2553       mMaxFramesOutput = mPlaybackBuffers[0]->Discard(toGet);
2554       CallbackCheckCompletion(mCallbackReturn, 0);
2555    }
2556 
2557    // wxASSERT( maxLen == toGet );
2558 
2559    em.RealtimeProcessEnd();
2560    mLastPlaybackTimeMillis = ::wxGetUTCTimeMillis();
2561 
2562    ClampBuffer( outputFloats, framesPerBuffer*numPlaybackChannels );
2563    if (outputMeterFloats != outputFloats)
2564       ClampBuffer( outputMeterFloats, framesPerBuffer*numPlaybackChannels );
2565 
2566    return false;
2567 }
2568 
UpdateTimePosition(unsigned long framesPerBuffer)2569 void AudioIoCallback::UpdateTimePosition(unsigned long framesPerBuffer)
2570 {
2571    // Quick returns if next to nothing to do.
2572    if (mStreamToken <= 0)
2573       return;
2574 
2575    // Update the position seen by drawing code
2576    mPlaybackSchedule.SetTrackTime(
2577       mPlaybackSchedule.mTimeQueue.Consumer( mMaxFramesOutput, mRate ) );
2578 }
2579 
2580 // return true, IFF we have fully handled the callback.
2581 //
2582 // Copy from PortAudio input buffers to our intermediate recording buffers.
2583 //
DrainInputBuffers(constSamplePtr inputBuffer,unsigned long framesPerBuffer,const PaStreamCallbackFlags statusFlags,float * tempFloats)2584 void AudioIoCallback::DrainInputBuffers(
2585    constSamplePtr inputBuffer,
2586    unsigned long framesPerBuffer,
2587    const PaStreamCallbackFlags statusFlags,
2588    float * tempFloats
2589 )
2590 {
2591    const auto numPlaybackTracks = mPlaybackTracks.size();
2592    const auto numPlaybackChannels = mNumPlaybackChannels;
2593    const auto numCaptureChannels = mNumCaptureChannels;
2594 
2595    // Quick returns if next to nothing to do.
2596    if (mStreamToken <= 0)
2597       return;
2598    if( !inputBuffer )
2599       return;
2600    if( numCaptureChannels <= 0 )
2601       return;
2602 
2603    // If there are no playback tracks, and we are recording, then the
2604    // earlier checks for being past the end won't happen, so do it here.
2605    if (mPlaybackSchedule.GetPolicy().Done(mPlaybackSchedule, 0)) {
2606       mCallbackReturn = paComplete;
2607    }
2608 
2609    // The error likely from a too-busy CPU falling behind real-time data
2610    // is paInputOverflow
2611    bool inputError =
2612       (statusFlags & (paInputOverflow))
2613       && !(statusFlags & paPrimingOutput);
2614 
2615    // But it seems it's easy to get false positives, at least on Mac
2616    // So we have not decided to enable this extra detection yet in
2617    // production
2618 
2619    size_t len = framesPerBuffer;
2620    for(unsigned t = 0; t < numCaptureChannels; t++)
2621       len = std::min( len, mCaptureBuffers[t]->AvailForPut() );
2622 
2623    if (mSimulateRecordingErrors && 100LL * rand() < RAND_MAX)
2624       // Make spurious errors for purposes of testing the error
2625       // reporting
2626       len = 0;
2627 
2628    // A different symptom is that len < framesPerBuffer because
2629    // the other thread, executing TrackBufferExchange, isn't consuming fast
2630    // enough from mCaptureBuffers; maybe it's CPU-bound, or maybe the
2631    // storage device it writes is too slow
2632    if (mDetectDropouts &&
2633          ((mDetectUpstreamDropouts && inputError) ||
2634          len < framesPerBuffer) ) {
2635       // Assume that any good partial buffer should be written leftmost
2636       // and zeroes will be padded after; label the zeroes.
2637       auto start = mPlaybackSchedule.GetTrackTime() +
2638             len / mRate + mRecordingSchedule.mLatencyCorrection;
2639       auto duration = (framesPerBuffer - len) / mRate;
2640       auto pLast = mLostCaptureIntervals.empty()
2641          ? nullptr : &mLostCaptureIntervals.back();
2642       if (pLast &&
2643           fabs(pLast->first + pLast->second - start) < 0.5/mRate)
2644          // Make one bigger interval, not two butting intervals
2645          pLast->second = start + duration - pLast->first;
2646       else
2647          mLostCaptureIntervals.emplace_back( start, duration );
2648    }
2649 
2650    if (len < framesPerBuffer)
2651    {
2652       mLostSamples += (framesPerBuffer - len);
2653       wxPrintf(wxT("lost %d samples\n"), (int)(framesPerBuffer - len));
2654    }
2655 
2656    if (len <= 0)
2657       return;
2658 
2659    // We have an ASSERT in the AudioIO constructor to alert us to
2660    // possible issues with the (short*) cast.  We'd have a problem if
2661    // sizeof(short) > sizeof(float) since our buffers are sized for floats.
2662    for(unsigned t = 0; t < numCaptureChannels; t++) {
2663 
2664       // dmazzoni:
2665       // Un-interleave.  Ugly special-case code required because the
2666       // capture channels could be in three different sample formats;
2667       // it'd be nice to be able to call CopySamples, but it can't
2668       // handle multiplying by the gain and then clipping.  Bummer.
2669 
2670       switch(mCaptureFormat) {
2671          case floatSample: {
2672             auto inputFloats = (const float *)inputBuffer;
2673             for(unsigned i = 0; i < len; i++)
2674                tempFloats[i] =
2675                   inputFloats[numCaptureChannels*i+t];
2676          } break;
2677          case int24Sample:
2678             // We should never get here. Audacity's int24Sample format
2679             // is different from PortAudio's sample format and so we
2680             // make PortAudio return float samples when recording in
2681             // 24-bit samples.
2682             wxASSERT(false);
2683             break;
2684          case int16Sample: {
2685             auto inputShorts = (const short *)inputBuffer;
2686             short *tempShorts = (short *)tempFloats;
2687             for( unsigned i = 0; i < len; i++) {
2688                float tmp = inputShorts[numCaptureChannels*i+t];
2689                tmp = wxClip( -32768, tmp, 32767 );
2690                tempShorts[i] = (short)(tmp);
2691             }
2692          } break;
2693       } // switch
2694 
2695       // JKC: mCaptureFormat must be for samples with sizeof(float) or
2696       // fewer bytes (because tempFloats is sized for floats).  All
2697       // formats are 2 or 4 bytes, so we are OK.
2698       const auto put =
2699          mCaptureBuffers[t]->Put(
2700             (samplePtr)tempFloats, mCaptureFormat, len);
2701       // wxASSERT(put == len);
2702       // but we can't assert in this thread
2703       wxUnusedVar(put);
2704    }
2705 }
2706 
2707 
2708 #if 0
2709 // Record the reported latency from PortAudio.
2710 // TODO: Don't recalculate this with every callback?
2711 // 01/21/2009:  Disabled until a better solution presents itself.
2712 void OldCodeToCalculateLatency()
2713 {
2714    // As of 06/17/2006, portaudio v19 returns inputBufferAdcTime set to
2715    // zero.  It is being worked on, but for now we just can't do much
2716    // but follow the leader.
2717    //
2718    // 08/27/2006: too inconsistent for now...just leave it a zero.
2719    //
2720    // 04/16/2008: Looks like si->inputLatency comes back with something useful though.
2721    // This rearranged logic uses si->inputLatency, but if PortAudio fixes inputBufferAdcTime,
2722    // this code won't have to be modified to use it.
2723    // Also avoids setting mLastRecordingOffset except when simultaneously playing and recording.
2724    //
2725    if (numCaptureChannels > 0 && numPlaybackChannels > 0) // simultaneously playing and recording
2726    {
2727       if (timeInfo->inputBufferAdcTime > 0)
2728          mLastRecordingOffset = timeInfo->inputBufferAdcTime - timeInfo->outputBufferDacTime;
2729       else if (mLastRecordingOffset == 0.0)
2730       {
2731          const PaStreamInfo* si = Pa_GetStreamInfo( mPortStreamV19 );
2732          mLastRecordingOffset = -si->inputLatency;
2733       }
2734    }
2735 }
2736 #endif
2737 
2738 
2739 // return true, IFF we have fully handled the callback.
2740 // Prime the output buffer with 0's, optionally adding in the playthrough.
DoPlaythrough(constSamplePtr inputBuffer,float * outputBuffer,unsigned long framesPerBuffer,float * outputMeterFloats)2741 void AudioIoCallback::DoPlaythrough(
2742       constSamplePtr inputBuffer,
2743       float *outputBuffer,
2744       unsigned long framesPerBuffer,
2745       float *outputMeterFloats
2746    )
2747 {
2748    const auto numCaptureChannels = mNumCaptureChannels;
2749    const auto numPlaybackChannels = mNumPlaybackChannels;
2750 
2751    // Quick returns if next to nothing to do.
2752    if( !outputBuffer )
2753       return;
2754    if( numPlaybackChannels <= 0 )
2755       return;
2756 
2757    float *outputFloats = outputBuffer;
2758    for(unsigned i = 0; i < framesPerBuffer*numPlaybackChannels; i++)
2759       outputFloats[i] = 0.0;
2760 
2761    if (inputBuffer && mSoftwarePlaythrough) {
2762       DoSoftwarePlaythrough(inputBuffer, mCaptureFormat,
2763                               numCaptureChannels,
2764                               outputBuffer, framesPerBuffer);
2765    }
2766 
2767    // Copy the results to outputMeterFloats if necessary
2768    if (outputMeterFloats != outputFloats) {
2769       for (unsigned i = 0; i < framesPerBuffer*numPlaybackChannels; ++i) {
2770          outputMeterFloats[i] = outputFloats[i];
2771       }
2772    }
2773 }
2774 
2775 /* Send data to recording VU meter if applicable */
2776 // Also computes rms
SendVuInputMeterData(const float * inputSamples,unsigned long framesPerBuffer)2777 void AudioIoCallback::SendVuInputMeterData(
2778    const float *inputSamples,
2779    unsigned long framesPerBuffer
2780    )
2781 {
2782    const auto numCaptureChannels = mNumCaptureChannels;
2783 
2784    auto pInputMeter = mInputMeter.lock();
2785    if ( !pInputMeter )
2786       return;
2787    if( pInputMeter->IsMeterDisabled())
2788       return;
2789 
2790    // get here if meters are actually live , and being updated
2791    /* It's critical that we don't update the meters while StopStream is
2792       * trying to stop PortAudio, otherwise it can lead to a freeze.  We use
2793       * two variables to synchronize:
2794       *   mUpdatingMeters tells StopStream when the callback is about to enter
2795       *     the code where it might update the meters, and
2796       *   mUpdateMeters is how the rest of the code tells the callback when it
2797       *     is allowed to actually do the updating.
2798       * Note that mUpdatingMeters must be set first to avoid a race condition.
2799       */
2800    //TODO use atomics instead.
2801    mUpdatingMeters = true;
2802    if (mUpdateMeters) {
2803          pInputMeter->UpdateDisplay(numCaptureChannels,
2804                                     framesPerBuffer,
2805                                     inputSamples);
2806    }
2807    mUpdatingMeters = false;
2808 }
2809 
2810 /* Send data to playback VU meter if applicable */
SendVuOutputMeterData(const float * outputMeterFloats,unsigned long framesPerBuffer)2811 void AudioIoCallback::SendVuOutputMeterData(
2812    const float *outputMeterFloats,
2813    unsigned long framesPerBuffer)
2814 {
2815    const auto numPlaybackChannels = mNumPlaybackChannels;
2816 
2817    auto pOutputMeter = mOutputMeter.lock();
2818    if (!pOutputMeter)
2819       return;
2820    if( pOutputMeter->IsMeterDisabled() )
2821       return;
2822    if( !outputMeterFloats)
2823       return;
2824 
2825    // Get here if playback meter is live
2826    /* It's critical that we don't update the meters while StopStream is
2827       * trying to stop PortAudio, otherwise it can lead to a freeze.  We use
2828       * two variables to synchronize:
2829       *  mUpdatingMeters tells StopStream when the callback is about to enter
2830       *    the code where it might update the meters, and
2831       *  mUpdateMeters is how the rest of the code tells the callback when it
2832       *    is allowed to actually do the updating.
2833       * Note that mUpdatingMeters must be set first to avoid a race condition.
2834       */
2835    mUpdatingMeters = true;
2836    if (mUpdateMeters) {
2837       pOutputMeter->UpdateDisplay(numPlaybackChannels,
2838                                              framesPerBuffer,
2839                                              outputMeterFloats);
2840 
2841       //v Vaughan, 2011-02-25: Moved this update back to TrackPanel::OnTimer()
2842       //    as it helps with playback issues reported by Bill and noted on Bug 258.
2843       //    The problem there occurs if Software Playthrough is on.
2844       //    Could conditionally do the update here if Software Playthrough is off,
2845       //    and in TrackPanel::OnTimer() if Software Playthrough is on, but not now.
2846       // PRL 12 Jul 2015: and what was in TrackPanel::OnTimer is now handled by means of event
2847       // type EVT_TRACK_PANEL_TIMER
2848       //MixerBoard* pMixerBoard = mOwningProject->GetMixerBoard();
2849       //if (pMixerBoard)
2850       //   pMixerBoard->UpdateMeters(GetStreamTime(),
2851       //                              (pProj->GetControlToolBar()->GetLastPlayMode() == loopedPlay));
2852    }
2853    mUpdatingMeters = false;
2854 }
2855 
CountSoloingTracks()2856 unsigned AudioIoCallback::CountSoloingTracks(){
2857    const auto numPlaybackTracks = mPlaybackTracks.size();
2858 
2859    // MOVE_TO: CountSoloedTracks() function
2860    unsigned numSolo = 0;
2861    for(unsigned t = 0; t < numPlaybackTracks; t++ )
2862       if( mPlaybackTracks[t]->GetSolo() )
2863          numSolo++;
2864    auto range = Extensions();
2865    numSolo += std::accumulate(range.begin(), range.end(), 0,
2866       [](unsigned sum, auto &ext){
2867          return sum + ext.CountOtherSoloTracks(); });
2868    return numSolo;
2869 }
2870 
2871 // TODO: Consider making the two Track status functions into functions of
2872 // WaveTrack.
2873 
2874 // true IFF the track should be silent.
2875 // The track may not yet be silent, since it may still be
2876 // fading out.
TrackShouldBeSilent(const WaveTrack & wt)2877 bool AudioIoCallback::TrackShouldBeSilent( const WaveTrack &wt )
2878 {
2879    return mPaused || (!wt.GetSolo() && (
2880       // Cut if somebody else is soloing
2881       mbHasSoloTracks ||
2882       // Cut if we're muted (and not soloing)
2883       wt.GetMute()
2884    ));
2885 }
2886 
2887 // This is about micro-fades.
TrackHasBeenFadedOut(const WaveTrack & wt)2888 bool AudioIoCallback::TrackHasBeenFadedOut( const WaveTrack &wt )
2889 {
2890    const auto channel = wt.GetChannelIgnoringPan();
2891    if ((channel == Track::LeftChannel  || channel == Track::MonoChannel) &&
2892       wt.GetOldChannelGain(0) != 0.0)
2893       return false;
2894    if ((channel == Track::RightChannel || channel == Track::MonoChannel) &&
2895       wt.GetOldChannelGain(1) != 0.0)
2896       return false;
2897    return true;
2898 }
2899 
AllTracksAlreadySilent()2900 bool AudioIoCallback::AllTracksAlreadySilent()
2901 {
2902    const bool dropAllQuickly = std::all_of(
2903       mPlaybackTracks.begin(), mPlaybackTracks.end(),
2904       [&]( const std::shared_ptr< WaveTrack > &vt )
2905          { return
2906       TrackShouldBeSilent( *vt ) &&
2907       TrackHasBeenFadedOut( *vt ); }
2908    );
2909    return dropAllQuickly;
2910 }
2911 
AudioIoCallback()2912 AudioIoCallback::AudioIoCallback()
2913 {
2914    auto &factories = AudioIOExt::GetFactories();
2915    for (auto &factory: factories)
2916       if (auto pExt = factory(mPlaybackSchedule))
2917          mAudioIOExt.push_back( move(pExt) );
2918 }
2919 
2920 
~AudioIoCallback()2921 AudioIoCallback::~AudioIoCallback()
2922 {
2923 }
2924 
2925 
AudioCallback(constSamplePtr inputBuffer,float * outputBuffer,unsigned long framesPerBuffer,const PaStreamCallbackTimeInfo * timeInfo,const PaStreamCallbackFlags statusFlags,void * WXUNUSED (userData))2926 int AudioIoCallback::AudioCallback(
2927    constSamplePtr inputBuffer, float *outputBuffer,
2928    unsigned long framesPerBuffer,
2929    const PaStreamCallbackTimeInfo *timeInfo,
2930    const PaStreamCallbackFlags statusFlags, void * WXUNUSED(userData) )
2931 {
2932    // Poll tracks for change of state.  User might click mute and solo buttons.
2933    mbHasSoloTracks = CountSoloingTracks() > 0 ;
2934    mCallbackReturn = paContinue;
2935 
2936    if (IsPaused()
2937        // PRL:  Why was this added?  Was it only because of the mysterious
2938        // initial leading zeroes, now solved by setting mStreamToken early?
2939        // JKC: I think it's used for the MIDI time cursor.  See comments
2940        // at head of file about AudioTime().
2941        || mStreamToken <= 0
2942        )
2943       mNumPauseFrames += framesPerBuffer;
2944 
2945    for( auto &ext : Extensions() ) {
2946       ext.ComputeOtherTimings(mRate,
2947          timeInfo,
2948          framesPerBuffer);
2949       ext.FillOtherBuffers(
2950          mRate, mNumPauseFrames, IsPaused(), mbHasSoloTracks);
2951    }
2952 
2953    // ------ MEMORY ALLOCATIONS -----------------------------------------------
2954    // tempFloats will be a reusable scratch pad for (possibly format converted)
2955    // audio data.  One temporary use is for the InputMeter data.
2956    const auto numPlaybackChannels = mNumPlaybackChannels;
2957    const auto numCaptureChannels = mNumCaptureChannels;
2958    float *tempFloats = (float *)alloca(framesPerBuffer*sizeof(float)*
2959                              MAX(numCaptureChannels,numPlaybackChannels));
2960 
2961    bool bVolEmulationActive =
2962       (outputBuffer && mMixerOutputVol != 1.0);
2963    // outputMeterFloats is the scratch pad for the output meter.
2964    // we can often reuse the existing outputBuffer and save on allocating
2965    // something new.
2966    float *outputMeterFloats = bVolEmulationActive ?
2967          (float *)alloca(framesPerBuffer*numPlaybackChannels * sizeof(float)) :
2968          outputBuffer;
2969    // ----- END of MEMORY ALLOCATIONS ------------------------------------------
2970 
2971    if (inputBuffer && numCaptureChannels) {
2972       float *inputSamples;
2973 
2974       if (mCaptureFormat == floatSample) {
2975          inputSamples = (float *) inputBuffer;
2976       }
2977       else {
2978          SamplesToFloats(reinterpret_cast<constSamplePtr>(inputBuffer),
2979             mCaptureFormat, tempFloats, framesPerBuffer * numCaptureChannels);
2980          inputSamples = tempFloats;
2981       }
2982 
2983       SendVuInputMeterData(
2984          inputSamples,
2985          framesPerBuffer);
2986 
2987       // This function may queue up a pause or resume.
2988       // TODO this is a bit dodgy as it toggles the Pause, and
2989       // relies on an idle event to have handled that, so could
2990       // queue up multiple toggle requests and so do nothing.
2991       // Eventually it will sort itself out by random luck, but
2992       // the net effect is a delay in starting/stopping sound activated
2993       // recording.
2994       CheckSoundActivatedRecordingLevel(
2995          inputSamples,
2996          framesPerBuffer);
2997    }
2998 
2999    // Even when paused, we do playthrough.
3000    // Initialise output buffer to zero or to playthrough data.
3001    // Initialise output meter values.
3002    DoPlaythrough(
3003       inputBuffer,
3004       outputBuffer,
3005       framesPerBuffer,
3006       outputMeterFloats);
3007 
3008    // Test for no track audio to play (because we are paused and have faded out)
3009    if( mPaused &&  (( !mbMicroFades ) || AllTracksAlreadySilent() ))
3010       return mCallbackReturn;
3011 
3012    // To add track output to output (to play sound on speaker)
3013    // possible exit, if we were seeking.
3014    if( FillOutputBuffers(
3015          outputBuffer,
3016          framesPerBuffer,
3017          outputMeterFloats))
3018       return mCallbackReturn;
3019 
3020    // To move the cursor onwards.  (uses mMaxFramesOutput)
3021    UpdateTimePosition(framesPerBuffer);
3022 
3023    // To capture input into track (sound from microphone)
3024    DrainInputBuffers(
3025       inputBuffer,
3026       framesPerBuffer,
3027       statusFlags,
3028       tempFloats);
3029 
3030    SendVuOutputMeterData( outputMeterFloats, framesPerBuffer);
3031 
3032    return mCallbackReturn;
3033 }
3034 
CallbackDoSeek()3035 int AudioIoCallback::CallbackDoSeek()
3036 {
3037    const int token = mStreamToken;
3038    wxMutexLocker locker(mSuspendAudioThread);
3039    if (token != mStreamToken)
3040       // This stream got destroyed while we waited for it
3041       return paAbort;
3042 
3043    const auto numPlaybackTracks = mPlaybackTracks.size();
3044 
3045    // Pause audio thread and wait for it to finish
3046    mAudioThreadTrackBufferExchangeLoopRunning = false;
3047    while( mAudioThreadTrackBufferExchangeLoopActive )
3048    {
3049       wxMilliSleep( 50 );
3050    }
3051 
3052    // Calculate the NEW time position, in the PortAudio callback
3053    const auto time =
3054       mPlaybackSchedule.GetPolicy().OffsetTrackTime( mPlaybackSchedule, mSeek );
3055 
3056    mPlaybackSchedule.SetTrackTime( time );
3057    mSeek = 0.0;
3058 
3059 
3060    // Reset mixer positions and flush buffers for all tracks
3061    for (size_t i = 0; i < numPlaybackTracks; i++)
3062    {
3063       const bool skipping = true;
3064       mPlaybackMixers[i]->Reposition( time, skipping );
3065       const auto toDiscard =
3066          mPlaybackBuffers[i]->AvailForGet();
3067       const auto discarded =
3068          mPlaybackBuffers[i]->Discard( toDiscard );
3069       // wxASSERT( discarded == toDiscard );
3070       // but we can't assert in this thread
3071       wxUnusedVar(discarded);
3072    }
3073 
3074    mPlaybackSchedule.mTimeQueue.Prime(time);
3075 
3076    // Reload the ring buffers
3077    mAudioThreadShouldCallTrackBufferExchangeOnce = true;
3078    while( mAudioThreadShouldCallTrackBufferExchangeOnce )
3079    {
3080       wxMilliSleep( 50 );
3081    }
3082 
3083    // Reenable the audio thread
3084    mAudioThreadTrackBufferExchangeLoopRunning = true;
3085 
3086    return paContinue;
3087 }
3088 
CallbackCheckCompletion(int & callbackReturn,unsigned long len)3089 void AudioIoCallback::CallbackCheckCompletion(
3090    int &callbackReturn, unsigned long len)
3091 {
3092    if (mPaused)
3093       return;
3094 
3095    bool done =
3096       mPlaybackSchedule.GetPolicy().Done(mPlaybackSchedule, len);
3097    if (!done)
3098       return;
3099 
3100    for( auto &ext : Extensions() )
3101       ext.SignalOtherCompletion();
3102    callbackReturn = paComplete;
3103 }
3104 
operator *() const3105 auto AudioIoCallback::AudioIOExtIterator::operator *() const -> AudioIOExt &
3106 {
3107    // Down-cast and dereference are safe because only AudioIOCallback
3108    // populates the array
3109    return *static_cast<AudioIOExt*>(mIterator->get());
3110 }
3111 
IsCapturing() const3112 bool AudioIO::IsCapturing() const
3113 {
3114    // Includes a test of mTime, used in the main thread
3115    return IsStreamActive() &&
3116       GetNumCaptureChannels() > 0 &&
3117       mPlaybackSchedule.GetTrackTime() >=
3118          mPlaybackSchedule.mT0 + mRecordingSchedule.mPreRoll;
3119 }
3120