1 /*
2  * An example showing how to play a stream sync'd to video, using ffmpeg.
3  *
4  * Requires C++14.
5  */
6 
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <utility>
12 #include <iomanip>
13 #include <cstdint>
14 #include <cstring>
15 #include <cstdlib>
16 #include <atomic>
17 #include <cerrno>
18 #include <chrono>
19 #include <cstdio>
20 #include <memory>
21 #include <string>
22 #include <thread>
23 #include <vector>
24 #include <array>
25 #include <cmath>
26 #include <deque>
27 #include <mutex>
28 #include <ratio>
29 
30 extern "C" {
31 #ifdef __GNUC__
32 _Pragma("GCC diagnostic push")
33 _Pragma("GCC diagnostic ignored \"-Wconversion\"")
34 _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
35 #endif
36 #include "libavcodec/avcodec.h"
37 #include "libavformat/avformat.h"
38 #include "libavformat/avio.h"
39 #include "libavformat/version.h"
40 #include "libavutil/avutil.h"
41 #include "libavutil/error.h"
42 #include "libavutil/frame.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/pixfmt.h"
45 #include "libavutil/rational.h"
46 #include "libavutil/samplefmt.h"
47 #include "libavutil/time.h"
48 #include "libavutil/version.h"
49 #include "libavutil/channel_layout.h"
50 #include "libswscale/swscale.h"
51 #include "libswresample/swresample.h"
52 
53 constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
54 constexpr auto AVErrorEOF = AVERROR_EOF;
55 
56 struct SwsContext;
57 #ifdef __GNUC__
58 _Pragma("GCC diagnostic pop")
59 #endif
60 }
61 
62 #include "SDL.h"
63 
64 #include "AL/alc.h"
65 #include "AL/al.h"
66 #include "AL/alext.h"
67 
68 #include "common/alhelpers.h"
69 
70 extern "C" {
71 /* Undefine this to disable use of experimental extensions. Don't use for
72  * production code! Interfaces and behavior may change prior to being
73  * finalized.
74  */
75 #define ALLOW_EXPERIMENTAL_EXTS
76 
77 #ifdef ALLOW_EXPERIMENTAL_EXTS
78 #ifndef AL_SOFT_callback_buffer
79 #define AL_SOFT_callback_buffer
80 typedef unsigned int ALbitfieldSOFT;
81 #define AL_BUFFER_CALLBACK_FUNCTION_SOFT         0x19A0
82 #define AL_BUFFER_CALLBACK_USER_PARAM_SOFT       0x19A1
83 typedef ALsizei (AL_APIENTRY*LPALBUFFERCALLBACKTYPESOFT)(ALvoid *userptr, ALvoid *sampledata, ALsizei numsamples);
84 typedef void (AL_APIENTRY*LPALBUFFERCALLBACKSOFT)(ALuint buffer, ALenum format, ALsizei freq, LPALBUFFERCALLBACKTYPESOFT callback, ALvoid *userptr, ALbitfieldSOFT flags);
85 typedef void (AL_APIENTRY*LPALGETBUFFERPTRSOFT)(ALuint buffer, ALenum param, ALvoid **value);
86 typedef void (AL_APIENTRY*LPALGETBUFFER3PTRSOFT)(ALuint buffer, ALenum param, ALvoid **value1, ALvoid **value2, ALvoid **value3);
87 typedef void (AL_APIENTRY*LPALGETBUFFERPTRVSOFT)(ALuint buffer, ALenum param, ALvoid **values);
88 #endif
89 #endif /* ALLOW_EXPERIMENTAL_EXTS */
90 }
91 
92 namespace {
93 
94 inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
95 
96 #ifndef M_PI
97 #define M_PI (3.14159265358979323846)
98 #endif
99 
100 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
101 using nanoseconds = std::chrono::nanoseconds;
102 using microseconds = std::chrono::microseconds;
103 using milliseconds = std::chrono::milliseconds;
104 using seconds = std::chrono::seconds;
105 using seconds_d64 = std::chrono::duration<double>;
106 using std::chrono::duration_cast;
107 
108 const std::string AppName{"alffplay"};
109 
110 ALenum DirectOutMode{AL_FALSE};
111 bool EnableWideStereo{false};
112 bool DisableVideo{false};
113 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
114 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
115 
116 #ifdef AL_SOFT_events
117 LPALEVENTCONTROLSOFT alEventControlSOFT;
118 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
119 #endif
120 
121 #ifdef AL_SOFT_callback_buffer
122 LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT;
123 #endif
124 
125 const seconds AVNoSyncThreshold{10};
126 
127 #define VIDEO_PICTURE_QUEUE_SIZE 24
128 
129 const seconds_d64 AudioSyncThreshold{0.03};
130 const milliseconds AudioSampleCorrectionMax{50};
131 /* Averaging filter coefficient for audio sync. */
132 #define AUDIO_DIFF_AVG_NB 20
133 const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
134 /* Per-buffer size, in time */
135 constexpr milliseconds AudioBufferTime{20};
136 /* Buffer total size, in time (should be divisible by the buffer time) */
137 constexpr milliseconds AudioBufferTotalTime{800};
138 constexpr auto AudioBufferCount = AudioBufferTotalTime / AudioBufferTime;
139 
140 enum {
141     FF_MOVIE_DONE_EVENT = SDL_USEREVENT
142 };
143 
144 enum class SyncMaster {
145     Audio,
146     Video,
147     External,
148 
149     Default = External
150 };
151 
152 
153 inline microseconds get_avtime()
154 { return microseconds{av_gettime()}; }
155 
156 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
157 struct AVIOContextDeleter {
158     void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
159 };
160 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
161 
162 struct AVFormatCtxDeleter {
163     void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
164 };
165 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
166 
167 struct AVCodecCtxDeleter {
168     void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
169 };
170 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
171 
172 struct AVFrameDeleter {
173     void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
174 };
175 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
176 
177 struct SwrContextDeleter {
178     void operator()(SwrContext *ptr) { swr_free(&ptr); }
179 };
180 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
181 
182 struct SwsContextDeleter {
183     void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
184 };
185 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
186 
187 
188 template<size_t SizeLimit>
189 class PacketQueue {
190     std::mutex mMutex;
191     std::condition_variable mCondVar;
192     std::deque<AVPacket> mPackets;
193     size_t mTotalSize{0};
194     bool mFinished{false};
195 
196     AVPacket *getPacket(std::unique_lock<std::mutex> &lock)
197     {
198         while(mPackets.empty() && !mFinished)
199             mCondVar.wait(lock);
200         return mPackets.empty() ? nullptr : &mPackets.front();
201     }
202 
203     void pop()
204     {
205         AVPacket *pkt = &mPackets.front();
206         mTotalSize -= static_cast<unsigned int>(pkt->size);
207         av_packet_unref(pkt);
208         mPackets.pop_front();
209     }
210 
211 public:
212     ~PacketQueue()
213     {
214         for(AVPacket &pkt : mPackets)
215             av_packet_unref(&pkt);
216         mPackets.clear();
217         mTotalSize = 0;
218     }
219 
220     int sendTo(AVCodecContext *codecctx)
221     {
222         std::unique_lock<std::mutex> lock{mMutex};
223 
224         AVPacket *pkt{getPacket(lock)};
225         if(!pkt) return avcodec_send_packet(codecctx, nullptr);
226 
227         const int ret{avcodec_send_packet(codecctx, pkt)};
228         if(ret != AVERROR(EAGAIN))
229         {
230             if(ret < 0)
231                 std::cerr<< "Failed to send packet: "<<ret <<std::endl;
232             pop();
233         }
234         return ret;
235     }
236 
237     void setFinished()
238     {
239         {
240             std::lock_guard<std::mutex> _{mMutex};
241             mFinished = true;
242         }
243         mCondVar.notify_one();
244     }
245 
246     bool put(const AVPacket *pkt)
247     {
248         {
249             std::unique_lock<std::mutex> lock{mMutex};
250             if(mTotalSize >= SizeLimit)
251                 return false;
252 
253             mPackets.push_back(AVPacket{});
254             if(av_packet_ref(&mPackets.back(), pkt) != 0)
255             {
256                 mPackets.pop_back();
257                 return true;
258             }
259 
260             mTotalSize += static_cast<unsigned int>(mPackets.back().size);
261         }
262         mCondVar.notify_one();
263         return true;
264     }
265 };
266 
267 
268 struct MovieState;
269 
270 struct AudioState {
271     MovieState &mMovie;
272 
273     AVStream *mStream{nullptr};
274     AVCodecCtxPtr mCodecCtx;
275 
276     PacketQueue<2*1024*1024> mPackets;
277 
278     /* Used for clock difference average computation */
279     seconds_d64 mClockDiffAvg{0};
280 
281     /* Time of the next sample to be buffered */
282     nanoseconds mCurrentPts{0};
283 
284     /* Device clock time that the stream started at. */
285     nanoseconds mDeviceStartTime{nanoseconds::min()};
286 
287     /* Decompressed sample frame, and swresample context for conversion */
288     AVFramePtr    mDecodedFrame;
289     SwrContextPtr mSwresCtx;
290 
291     /* Conversion format, for what gets fed to OpenAL */
292     uint64_t       mDstChanLayout{0};
293     AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
294 
295     /* Storage of converted samples */
296     uint8_t *mSamples{nullptr};
297     int mSamplesLen{0}; /* In samples */
298     int mSamplesPos{0};
299     int mSamplesMax{0};
300 
301     std::unique_ptr<uint8_t[]> mBufferData;
302     size_t mBufferDataSize{0};
303     std::atomic<size_t> mReadPos{0};
304     std::atomic<size_t> mWritePos{0};
305 
306     /* OpenAL format */
307     ALenum mFormat{AL_NONE};
308     ALuint mFrameSize{0};
309 
310     std::mutex mSrcMutex;
311     std::condition_variable mSrcCond;
312     std::atomic_flag mConnected;
313     ALuint mSource{0};
314     std::array<ALuint,AudioBufferCount> mBuffers{};
315     ALuint mBufferIdx{0};
316 
317     AudioState(MovieState &movie) : mMovie(movie)
318     { mConnected.test_and_set(std::memory_order_relaxed); }
319     ~AudioState()
320     {
321         if(mSource)
322             alDeleteSources(1, &mSource);
323         if(mBuffers[0])
324             alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
325 
326         av_freep(&mSamples);
327     }
328 
329 #ifdef AL_SOFT_events
330     static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
331         ALsizei length, const ALchar *message, void *userParam);
332 #endif
333 #ifdef AL_SOFT_callback_buffer
334     static ALsizei AL_APIENTRY bufferCallbackC(void *userptr, void *data, ALsizei size)
335     { return static_cast<AudioState*>(userptr)->bufferCallback(data, size); }
336     ALsizei bufferCallback(void *data, ALsizei size);
337 #endif
338 
339     nanoseconds getClockNoLock();
340     nanoseconds getClock()
341     {
342         std::lock_guard<std::mutex> lock{mSrcMutex};
343         return getClockNoLock();
344     }
345 
346     bool startPlayback();
347 
348     int getSync();
349     int decodeFrame();
350     bool readAudio(uint8_t *samples, unsigned int length, int &sample_skip);
351     void readAudio(int sample_skip);
352 
353     int handler();
354 };
355 
356 struct VideoState {
357     MovieState &mMovie;
358 
359     AVStream *mStream{nullptr};
360     AVCodecCtxPtr mCodecCtx;
361 
362     PacketQueue<14*1024*1024> mPackets;
363 
364     /* The pts of the currently displayed frame, and the time (av_gettime) it
365      * was last updated - used to have running video pts
366      */
367     nanoseconds mDisplayPts{0};
368     microseconds mDisplayPtsTime{microseconds::min()};
369     std::mutex mDispPtsMutex;
370 
371     /* Swscale context for format conversion */
372     SwsContextPtr mSwscaleCtx;
373 
374     struct Picture {
375         AVFramePtr mFrame{};
376         nanoseconds mPts{nanoseconds::min()};
377     };
378     std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
379     std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u};
380     std::mutex mPictQMutex;
381     std::condition_variable mPictQCond;
382 
383     SDL_Texture *mImage{nullptr};
384     int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
385     bool mFirstUpdate{true};
386 
387     std::atomic<bool> mEOS{false};
388     std::atomic<bool> mFinalUpdate{false};
389 
390     VideoState(MovieState &movie) : mMovie(movie) { }
391     ~VideoState()
392     {
393         if(mImage)
394             SDL_DestroyTexture(mImage);
395         mImage = nullptr;
396     }
397 
398     nanoseconds getClock();
399 
400     void display(SDL_Window *screen, SDL_Renderer *renderer);
401     void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw);
402     int handler();
403 };
404 
405 struct MovieState {
406     AVIOContextPtr mIOContext;
407     AVFormatCtxPtr mFormatCtx;
408 
409     SyncMaster mAVSyncType{SyncMaster::Default};
410 
411     microseconds mClockBase{microseconds::min()};
412 
413     std::atomic<bool> mQuit{false};
414 
415     AudioState mAudio;
416     VideoState mVideo;
417 
418     std::thread mParseThread;
419     std::thread mAudioThread;
420     std::thread mVideoThread;
421 
422     std::string mFilename;
423 
424     MovieState(std::string fname)
425       : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
426     { }
427     ~MovieState()
428     {
429         mQuit = true;
430         if(mParseThread.joinable())
431             mParseThread.join();
432     }
433 
434     static int decode_interrupt_cb(void *ctx);
435     bool prepare();
436     void setTitle(SDL_Window *window);
437 
438     nanoseconds getClock();
439 
440     nanoseconds getMasterClock();
441 
442     nanoseconds getDuration();
443 
444     int streamComponentOpen(unsigned int stream_index);
445     int parse_handler();
446 };
447 
448 
449 nanoseconds AudioState::getClockNoLock()
450 {
451     // The audio clock is the timestamp of the sample currently being heard.
452     if(alcGetInteger64vSOFT)
453     {
454         // If device start time = min, we aren't playing yet.
455         if(mDeviceStartTime == nanoseconds::min())
456             return nanoseconds::zero();
457 
458         // Get the current device clock time and latency.
459         auto device = alcGetContextsDevice(alcGetCurrentContext());
460         ALCint64SOFT devtimes[2]{0,0};
461         alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
462         auto latency = nanoseconds{devtimes[1]};
463         auto device_time = nanoseconds{devtimes[0]};
464 
465         // The clock is simply the current device time relative to the recorded
466         // start time. We can also subtract the latency to get more a accurate
467         // position of where the audio device actually is in the output stream.
468         return device_time - mDeviceStartTime - latency;
469     }
470 
471     if(mBufferDataSize > 0)
472     {
473         if(mDeviceStartTime == nanoseconds::min())
474             return nanoseconds::zero();
475 
476         /* With a callback buffer and no device clock, mDeviceStartTime is
477          * actually the timestamp of the first sample frame played. The audio
478          * clock, then, is that plus the current source offset.
479          */
480         ALint64SOFT offset[2];
481         if(alGetSourcei64vSOFT)
482             alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
483         else
484         {
485             ALint ioffset;
486             alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
487             offset[0] = ALint64SOFT{ioffset} << 32;
488             offset[1] = 0;
489         }
490         /* NOTE: The source state must be checked last, in case an underrun
491          * occurs and the source stops between getting the state and retrieving
492          * the offset+latency.
493          */
494         ALint status;
495         alGetSourcei(mSource, AL_SOURCE_STATE, &status);
496 
497         nanoseconds pts{};
498         if(status == AL_PLAYING || status == AL_PAUSED)
499             pts = mDeviceStartTime - nanoseconds{offset[1]} +
500                 duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
501         else
502         {
503             /* If the source is stopped, the pts of the next sample to be heard
504              * is the pts of the next sample to be buffered, minus the amount
505              * already in the buffer ready to play.
506              */
507             const size_t woffset{mWritePos.load(std::memory_order_acquire)};
508             const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
509             const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
510                 roffset};
511 
512             pts = mCurrentPts - nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
513         }
514 
515         return pts;
516     }
517 
518     /* The source-based clock is based on 4 components:
519      * 1 - The timestamp of the next sample to buffer (mCurrentPts)
520      * 2 - The length of the source's buffer queue
521      *     (AudioBufferTime*AL_BUFFERS_QUEUED)
522      * 3 - The offset OpenAL is currently at in the source (the first value
523      *     from AL_SAMPLE_OFFSET_LATENCY_SOFT)
524      * 4 - The latency between OpenAL and the DAC (the second value from
525      *     AL_SAMPLE_OFFSET_LATENCY_SOFT)
526      *
527      * Subtracting the length of the source queue from the next sample's
528      * timestamp gives the timestamp of the sample at the start of the source
529      * queue. Adding the source offset to that results in the timestamp for the
530      * sample at OpenAL's current position, and subtracting the source latency
531      * from that gives the timestamp of the sample currently at the DAC.
532      */
533     nanoseconds pts{mCurrentPts};
534     if(mSource)
535     {
536         ALint64SOFT offset[2];
537         if(alGetSourcei64vSOFT)
538             alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
539         else
540         {
541             ALint ioffset;
542             alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
543             offset[0] = ALint64SOFT{ioffset} << 32;
544             offset[1] = 0;
545         }
546         ALint queued, status;
547         alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
548         alGetSourcei(mSource, AL_SOURCE_STATE, &status);
549 
550         /* If the source is AL_STOPPED, then there was an underrun and all
551          * buffers are processed, so ignore the source queue. The audio thread
552          * will put the source into an AL_INITIAL state and clear the queue
553          * when it starts recovery.
554          */
555         if(status != AL_STOPPED)
556         {
557             pts -= AudioBufferTime*queued;
558             pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
559         }
560         /* Don't offset by the latency if the source isn't playing. */
561         if(status == AL_PLAYING)
562             pts -= nanoseconds{offset[1]};
563     }
564 
565     return std::max(pts, nanoseconds::zero());
566 }
567 
568 bool AudioState::startPlayback()
569 {
570     const size_t woffset{mWritePos.load(std::memory_order_acquire)};
571     const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
572     const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
573         roffset};
574 
575     if(mBufferDataSize > 0)
576     {
577         if(readable == 0)
578             return false;
579         if(!alcGetInteger64vSOFT)
580             mDeviceStartTime = mCurrentPts -
581                 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
582     }
583     else
584     {
585         ALint queued{};
586         alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
587         if(queued == 0) return false;
588     }
589 
590     alSourcePlay(mSource);
591     if(alcGetInteger64vSOFT)
592     {
593         /* Subtract the total buffer queue time from the current pts to get the
594          * pts of the start of the queue.
595          */
596         int64_t srctimes[2]{0,0};
597         alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
598         auto device_time = nanoseconds{srctimes[1]};
599         auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
600             mCodecCtx->sample_rate;
601 
602         /* The mixer may have ticked and incremented the device time and sample
603          * offset, so subtract the source offset from the device time to get
604          * the device time the source started at. Also subtract startpts to get
605          * the device time the stream would have started at to reach where it
606          * is now.
607          */
608         if(mBufferDataSize > 0)
609         {
610             nanoseconds startpts{mCurrentPts -
611                 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate};
612             mDeviceStartTime = device_time - src_offset - startpts;
613         }
614         else
615         {
616             nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
617             mDeviceStartTime = device_time - src_offset - startpts;
618         }
619     }
620     return true;
621 }
622 
623 int AudioState::getSync()
624 {
625     if(mMovie.mAVSyncType == SyncMaster::Audio)
626         return 0;
627 
628     auto ref_clock = mMovie.getMasterClock();
629     auto diff = ref_clock - getClockNoLock();
630 
631     if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
632     {
633         /* Difference is TOO big; reset accumulated average */
634         mClockDiffAvg = seconds_d64::zero();
635         return 0;
636     }
637 
638     /* Accumulate the diffs */
639     mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
640     auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
641     if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
642         return 0;
643 
644     /* Constrain the per-update difference to avoid exceedingly large skips */
645     diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
646     return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
647 }
648 
649 int AudioState::decodeFrame()
650 {
651     while(!mMovie.mQuit.load(std::memory_order_relaxed))
652     {
653         int ret;
654         while((ret=avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get())) == AVERROR(EAGAIN))
655             mPackets.sendTo(mCodecCtx.get());
656         if(ret != 0)
657         {
658             if(ret == AVErrorEOF) break;
659             std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
660             continue;
661         }
662 
663         if(mDecodedFrame->nb_samples <= 0)
664             continue;
665 
666         /* If provided, update w/ pts */
667         if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
668             mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
669                 static_cast<double>(mDecodedFrame->best_effort_timestamp)});
670 
671         if(mDecodedFrame->nb_samples > mSamplesMax)
672         {
673             av_freep(&mSamples);
674             av_samples_alloc(&mSamples, nullptr, mCodecCtx->channels, mDecodedFrame->nb_samples,
675                 mDstSampleFmt, 0);
676             mSamplesMax = mDecodedFrame->nb_samples;
677         }
678         /* Return the amount of sample frames converted */
679         int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
680             const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)};
681 
682         av_frame_unref(mDecodedFrame.get());
683         return data_size;
684     }
685 
686     return 0;
687 }
688 
689 /* Duplicates the sample at in to out, count times. The frame size is a
690  * multiple of the template type size.
691  */
692 template<typename T>
693 static void sample_dup(uint8_t *out, const uint8_t *in, size_t count, size_t frame_size)
694 {
695     auto *sample = reinterpret_cast<const T*>(in);
696     auto *dst = reinterpret_cast<T*>(out);
697     if(frame_size == sizeof(T))
698         std::fill_n(dst, count, *sample);
699     else
700     {
701         /* NOTE: frame_size is a multiple of sizeof(T). */
702         size_t type_mult{frame_size / sizeof(T)};
703         size_t i{0};
704         std::generate_n(dst, count*type_mult,
705             [sample,type_mult,&i]() -> T
706             {
707                 T ret = sample[i];
708                 i = (i+1)%type_mult;
709                 return ret;
710             }
711         );
712     }
713 }
714 
715 
716 bool AudioState::readAudio(uint8_t *samples, unsigned int length, int &sample_skip)
717 {
718     unsigned int audio_size{0};
719 
720     /* Read the next chunk of data, refill the buffer, and queue it
721      * on the source */
722     length /= mFrameSize;
723     while(mSamplesLen > 0 && audio_size < length)
724     {
725         unsigned int rem{length - audio_size};
726         if(mSamplesPos >= 0)
727         {
728             const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos);
729             if(rem > len) rem = len;
730             std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize,
731                 rem*mFrameSize, samples);
732         }
733         else
734         {
735             rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos));
736 
737             /* Add samples by copying the first sample */
738             if((mFrameSize&7) == 0)
739                 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
740             else if((mFrameSize&3) == 0)
741                 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
742             else if((mFrameSize&1) == 0)
743                 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
744             else
745                 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
746         }
747 
748         mSamplesPos += rem;
749         mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
750         samples += rem*mFrameSize;
751         audio_size += rem;
752 
753         while(mSamplesPos >= mSamplesLen)
754         {
755             int frame_len = decodeFrame();
756             if(frame_len <= 0) break;
757 
758             mSamplesLen = frame_len;
759             mSamplesPos = std::min(mSamplesLen, sample_skip);
760             sample_skip -= mSamplesPos;
761 
762             // Adjust the device start time and current pts by the amount we're
763             // skipping/duplicating, so that the clock remains correct for the
764             // current stream position.
765             auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
766             mDeviceStartTime -= skip;
767             mCurrentPts += skip;
768             continue;
769         }
770     }
771     if(audio_size <= 0)
772         return false;
773 
774     if(audio_size < length)
775     {
776         const unsigned int rem{length - audio_size};
777         std::fill_n(samples, rem*mFrameSize,
778             (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
779         mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
780         audio_size += rem;
781     }
782     return true;
783 }
784 
785 void AudioState::readAudio(int sample_skip)
786 {
787     size_t woffset{mWritePos.load(std::memory_order_acquire)};
788     while(mSamplesLen > 0)
789     {
790         const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
791 
792         if(mSamplesPos < 0)
793         {
794             size_t rem{(((roffset > woffset) ? roffset-1
795                 : ((roffset == 0) ? mBufferDataSize-1
796                 : mBufferDataSize)) - woffset) / mFrameSize};
797             rem = std::min<size_t>(rem, static_cast<ALuint>(-mSamplesPos));
798             if(rem == 0) break;
799 
800             auto *splout{&mBufferData[woffset]};
801             if((mFrameSize&7) == 0)
802                 sample_dup<uint64_t>(splout, mSamples, rem, mFrameSize);
803             else if((mFrameSize&3) == 0)
804                 sample_dup<uint32_t>(splout, mSamples, rem, mFrameSize);
805             else if((mFrameSize&1) == 0)
806                 sample_dup<uint16_t>(splout, mSamples, rem, mFrameSize);
807             else
808                 sample_dup<uint8_t>(splout, mSamples, rem, mFrameSize);
809             woffset += rem * mFrameSize;
810             if(woffset == mBufferDataSize)
811                 woffset = 0;
812             mWritePos.store(woffset, std::memory_order_release);
813             mSamplesPos += static_cast<int>(rem);
814             mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
815             continue;
816         }
817 
818         const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
819         const size_t nbytes{static_cast<ALuint>(mSamplesLen)*size_t{mFrameSize} -
820             boffset};
821         if(roffset > woffset)
822         {
823             const size_t writable{roffset-woffset-1};
824             if(writable < nbytes) break;
825 
826             memcpy(&mBufferData[woffset], mSamples+boffset, nbytes);
827             woffset += nbytes;
828         }
829         else
830         {
831             const size_t writable{mBufferDataSize+roffset-woffset-1};
832             if(writable < nbytes) break;
833 
834             const size_t todo1{std::min<size_t>(nbytes, mBufferDataSize-woffset)};
835             const size_t todo2{nbytes - todo1};
836 
837             memcpy(&mBufferData[woffset], mSamples+boffset, todo1);
838             woffset += todo1;
839             if(woffset == mBufferDataSize)
840             {
841                 woffset = 0;
842                 if(todo2 > 0)
843                 {
844                     memcpy(&mBufferData[woffset], mSamples+boffset+todo1, todo2);
845                     woffset += todo2;
846                 }
847             }
848         }
849         mWritePos.store(woffset, std::memory_order_release);
850         mCurrentPts += nanoseconds{seconds{mSamplesLen-mSamplesPos}} / mCodecCtx->sample_rate;
851 
852         do {
853             mSamplesLen = decodeFrame();
854             if(mSamplesLen <= 0) break;
855 
856             mSamplesPos = std::min(mSamplesLen, sample_skip);
857             sample_skip -= mSamplesPos;
858 
859             auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
860             mDeviceStartTime -= skip;
861             mCurrentPts += skip;
862         } while(mSamplesPos >= mSamplesLen);
863     }
864 }
865 
866 
867 #ifdef AL_SOFT_events
868 void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
869     ALsizei length, const ALchar *message, void *userParam)
870 {
871     auto self = static_cast<AudioState*>(userParam);
872 
873     if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
874     {
875         /* Temporarily lock the source mutex to ensure it's not between
876          * checking the processed count and going to sleep.
877          */
878         std::unique_lock<std::mutex>{self->mSrcMutex}.unlock();
879         self->mSrcCond.notify_one();
880         return;
881     }
882 
883     std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
884     switch(eventType)
885     {
886     case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
887     case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
888     case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
889     default:
890         std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<std::dec<<
891             std::setw(0)<<std::setfill(' '); break;
892     }
893     std::cout<< "\n"
894         "Object ID: "<<object<<"\n"
895         "Parameter: "<<param<<"\n"
896         "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<<
897         std::endl;
898 
899     if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
900     {
901         {
902             std::lock_guard<std::mutex> lock{self->mSrcMutex};
903             self->mConnected.clear(std::memory_order_release);
904         }
905         self->mSrcCond.notify_one();
906     }
907 }
908 #endif
909 
910 #ifdef AL_SOFT_callback_buffer
911 ALsizei AudioState::bufferCallback(void *data, ALsizei size)
912 {
913     ALsizei got{0};
914 
915     size_t roffset{mReadPos.load(std::memory_order_acquire)};
916     while(got < size)
917     {
918         const size_t woffset{mWritePos.load(std::memory_order_relaxed)};
919         if(woffset == roffset) break;
920 
921         size_t todo{((woffset < roffset) ? mBufferDataSize : woffset) - roffset};
922         todo = std::min<size_t>(todo, static_cast<ALuint>(size-got));
923 
924         memcpy(data, &mBufferData[roffset], todo);
925         data = static_cast<ALbyte*>(data) + todo;
926         got += static_cast<ALsizei>(todo);
927 
928         roffset += todo;
929         if(roffset == mBufferDataSize)
930             roffset = 0;
931     }
932     mReadPos.store(roffset, std::memory_order_release);
933 
934     return got;
935 }
936 #endif
937 
938 int AudioState::handler()
939 {
940     std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock};
941     milliseconds sleep_time{AudioBufferTime / 3};
942     ALenum fmt;
943 
944 #ifdef AL_SOFT_events
945     const std::array<ALenum,3> evt_types{{
946         AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
947         AL_EVENT_TYPE_DISCONNECTED_SOFT}};
948     if(alEventControlSOFT)
949     {
950         alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
951         alEventCallbackSOFT(EventCallback, this);
952         sleep_time = AudioBufferTotalTime;
953     }
954 #endif
955 #ifdef AL_SOFT_bformat_ex
956     const bool has_bfmt_ex{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE};
957     ALenum ambi_layout{AL_FUMA_SOFT};
958     ALenum ambi_scale{AL_FUMA_SOFT};
959 #endif
960 
961     /* Find a suitable format for OpenAL. */
962     mDstChanLayout = 0;
963     mFormat = AL_NONE;
964     if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
965        alIsExtensionPresent("AL_EXT_FLOAT32"))
966     {
967         mDstSampleFmt = AV_SAMPLE_FMT_FLT;
968         mFrameSize = 4;
969         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
970            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
971            (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
972         {
973             mDstChanLayout = mCodecCtx->channel_layout;
974             mFrameSize *= 8;
975             mFormat = fmt;
976         }
977         if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
978             mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
979            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
980            (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
981         {
982             mDstChanLayout = mCodecCtx->channel_layout;
983             mFrameSize *= 6;
984             mFormat = fmt;
985         }
986         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
987         {
988             mDstChanLayout = mCodecCtx->channel_layout;
989             mFrameSize *= 1;
990             mFormat = AL_FORMAT_MONO_FLOAT32;
991         }
992         /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
993          * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
994          * have no way to specify if the source is actually B-Format (let alone
995          * if it's 2D or 3D).
996          */
997         if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
998            alIsExtensionPresent("AL_EXT_BFORMAT") &&
999            (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE && fmt != -1)
1000         {
1001             int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
1002             if((order+1)*(order+1) == mCodecCtx->channels ||
1003                (order+1)*(order+1) + 2 == mCodecCtx->channels)
1004             {
1005                 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
1006                  * is 4 channels for 3D buffers.
1007                  */
1008                 mFrameSize *= 4;
1009                 mFormat = fmt;
1010             }
1011         }
1012         if(!mFormat)
1013         {
1014             mDstChanLayout = AV_CH_LAYOUT_STEREO;
1015             mFrameSize *= 2;
1016             mFormat = AL_FORMAT_STEREO_FLOAT32;
1017         }
1018     }
1019     if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
1020     {
1021         mDstSampleFmt = AV_SAMPLE_FMT_U8;
1022         mFrameSize = 1;
1023         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
1024            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1025            (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
1026         {
1027             mDstChanLayout = mCodecCtx->channel_layout;
1028             mFrameSize *= 8;
1029             mFormat = fmt;
1030         }
1031         if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
1032             mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
1033            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1034            (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
1035         {
1036             mDstChanLayout = mCodecCtx->channel_layout;
1037             mFrameSize *= 6;
1038             mFormat = fmt;
1039         }
1040         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
1041         {
1042             mDstChanLayout = mCodecCtx->channel_layout;
1043             mFrameSize *= 1;
1044             mFormat = AL_FORMAT_MONO8;
1045         }
1046         if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
1047            alIsExtensionPresent("AL_EXT_BFORMAT") &&
1048            (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE && fmt != -1)
1049         {
1050             int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
1051             if((order+1)*(order+1) == mCodecCtx->channels ||
1052                (order+1)*(order+1) + 2 == mCodecCtx->channels)
1053             {
1054                 mFrameSize *= 4;
1055                 mFormat = fmt;
1056             }
1057         }
1058         if(!mFormat)
1059         {
1060             mDstChanLayout = AV_CH_LAYOUT_STEREO;
1061             mFrameSize *= 2;
1062             mFormat = AL_FORMAT_STEREO8;
1063         }
1064     }
1065     if(!mFormat)
1066     {
1067         mDstSampleFmt = AV_SAMPLE_FMT_S16;
1068         mFrameSize = 2;
1069         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
1070            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1071            (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
1072         {
1073             mDstChanLayout = mCodecCtx->channel_layout;
1074             mFrameSize *= 8;
1075             mFormat = fmt;
1076         }
1077         if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
1078             mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
1079            alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1080            (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
1081         {
1082             mDstChanLayout = mCodecCtx->channel_layout;
1083             mFrameSize *= 6;
1084             mFormat = fmt;
1085         }
1086         if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
1087         {
1088             mDstChanLayout = mCodecCtx->channel_layout;
1089             mFrameSize *= 1;
1090             mFormat = AL_FORMAT_MONO16;
1091         }
1092         if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
1093            alIsExtensionPresent("AL_EXT_BFORMAT") &&
1094            (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE && fmt != -1)
1095         {
1096             int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
1097             if((order+1)*(order+1) == mCodecCtx->channels ||
1098                (order+1)*(order+1) + 2 == mCodecCtx->channels)
1099             {
1100                 mFrameSize *= 4;
1101                 mFormat = fmt;
1102             }
1103         }
1104         if(!mFormat)
1105         {
1106             mDstChanLayout = AV_CH_LAYOUT_STEREO;
1107             mFrameSize *= 2;
1108             mFormat = AL_FORMAT_STEREO16;
1109         }
1110     }
1111     void *samples{nullptr};
1112     ALsizei buffer_len{0};
1113 
1114     mSamples = nullptr;
1115     mSamplesMax = 0;
1116     mSamplesPos = 0;
1117     mSamplesLen = 0;
1118 
1119     mDecodedFrame.reset(av_frame_alloc());
1120     if(!mDecodedFrame)
1121     {
1122         std::cerr<< "Failed to allocate audio frame" <<std::endl;
1123         goto finish;
1124     }
1125 
1126     if(!mDstChanLayout)
1127     {
1128         /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
1129          * we have to drop any extra channels.
1130          */
1131         mSwresCtx.reset(swr_alloc_set_opts(nullptr,
1132             (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate,
1133             (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
1134             0, nullptr));
1135 
1136         /* Note that ffmpeg/libavcodec has no method to check the ambisonic
1137          * channel order and normalization, so we can only assume AmbiX as the
1138          * defacto-standard. This is not true for .amb files, which use FuMa.
1139          */
1140         std::vector<double> mtx(64*64, 0.0);
1141 #ifdef AL_SOFT_bformat_ex
1142         ambi_layout = AL_ACN_SOFT;
1143         ambi_scale = AL_SN3D_SOFT;
1144         if(has_bfmt_ex)
1145         {
1146             /* An identity matrix that doesn't remix any channels. */
1147             std::cout<< "Found AL_SOFT_bformat_ex" <<std::endl;
1148             mtx[0 + 0*64] = 1.0;
1149             mtx[1 + 1*64] = 1.0;
1150             mtx[2 + 2*64] = 1.0;
1151             mtx[3 + 3*64] = 1.0;
1152         }
1153         else
1154 #endif
1155         {
1156             std::cout<< "Found AL_EXT_BFORMAT" <<std::endl;
1157             /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
1158              * ordering and normalization, so a custom matrix is needed to
1159              * scale and reorder the source from AmbiX.
1160              */
1161             mtx[0 + 0*64] = std::sqrt(0.5);
1162             mtx[3 + 1*64] = 1.0;
1163             mtx[1 + 2*64] = 1.0;
1164             mtx[2 + 3*64] = 1.0;
1165         }
1166         swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
1167     }
1168     else
1169         mSwresCtx.reset(swr_alloc_set_opts(nullptr,
1170             static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate,
1171             mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout)
1172                 : av_get_default_channel_layout(mCodecCtx->channels),
1173             mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
1174             0, nullptr));
1175     if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
1176     {
1177         std::cerr<< "Failed to initialize audio converter" <<std::endl;
1178         goto finish;
1179     }
1180 
1181     alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
1182     alGenSources(1, &mSource);
1183 
1184     if(DirectOutMode)
1185         alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, DirectOutMode);
1186     if(EnableWideStereo)
1187     {
1188         const float angles[2]{static_cast<float>(M_PI / 3.0), static_cast<float>(-M_PI / 3.0)};
1189         alSourcefv(mSource, AL_STEREO_ANGLES, angles);
1190     }
1191 #ifdef AL_SOFT_bformat_ex
1192     if(has_bfmt_ex)
1193     {
1194         for(ALuint bufid : mBuffers)
1195         {
1196             alBufferi(bufid, AL_AMBISONIC_LAYOUT_SOFT, ambi_layout);
1197             alBufferi(bufid, AL_AMBISONIC_SCALING_SOFT, ambi_scale);
1198         }
1199     }
1200 #endif
1201 
1202     if(alGetError() != AL_NO_ERROR)
1203         goto finish;
1204 
1205 #ifdef AL_SOFT_callback_buffer
1206     if(alBufferCallbackSOFT)
1207     {
1208         alBufferCallbackSOFT(mBuffers[0], mFormat, mCodecCtx->sample_rate, bufferCallbackC, this,
1209             0);
1210         alSourcei(mSource, AL_BUFFER, static_cast<ALint>(mBuffers[0]));
1211         if(alGetError() != AL_NO_ERROR)
1212         {
1213             fprintf(stderr, "Failed to set buffer callback\n");
1214             alSourcei(mSource, AL_BUFFER, 0);
1215             buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
1216                 AudioBufferTime).count() * mFrameSize);
1217         }
1218         else
1219         {
1220             mBufferDataSize = static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
1221                 AudioBufferTotalTime).count()) * mFrameSize;
1222             mBufferData.reset(new uint8_t[mBufferDataSize]);
1223             mReadPos.store(0, std::memory_order_relaxed);
1224             mWritePos.store(0, std::memory_order_relaxed);
1225 
1226             ALCint refresh{};
1227             alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH, 1, &refresh);
1228             sleep_time = milliseconds{seconds{1}} / refresh;
1229         }
1230     }
1231     else
1232 #endif
1233         buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
1234             AudioBufferTime).count() * mFrameSize);
1235     if(buffer_len > 0)
1236         samples = av_malloc(static_cast<ALuint>(buffer_len));
1237 
1238     /* Prefill the codec buffer. */
1239     do {
1240         const int ret{mPackets.sendTo(mCodecCtx.get())};
1241         if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
1242             break;
1243     } while(1);
1244 
1245     srclock.lock();
1246     if(alcGetInteger64vSOFT)
1247     {
1248         int64_t devtime{};
1249         alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT,
1250             1, &devtime);
1251         mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1252     }
1253 
1254     mSamplesLen = decodeFrame();
1255     if(mSamplesLen > 0)
1256     {
1257         mSamplesPos = std::min(mSamplesLen, getSync());
1258 
1259         auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
1260         mDeviceStartTime -= skip;
1261         mCurrentPts += skip;
1262     }
1263 
1264     while(!mMovie.mQuit.load(std::memory_order_relaxed)
1265         && mConnected.test_and_set(std::memory_order_relaxed))
1266     {
1267         ALenum state;
1268         if(mBufferDataSize > 0)
1269         {
1270             alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1271             readAudio(getSync());
1272         }
1273         else
1274         {
1275             ALint processed, queued;
1276 
1277             /* First remove any processed buffers. */
1278             alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
1279             while(processed > 0)
1280             {
1281                 ALuint bid;
1282                 alSourceUnqueueBuffers(mSource, 1, &bid);
1283                 --processed;
1284             }
1285 
1286             /* Refill the buffer queue. */
1287             int sync_skip{getSync()};
1288             alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
1289             while(static_cast<ALuint>(queued) < mBuffers.size())
1290             {
1291                 /* Read the next chunk of data, filling the buffer, and queue
1292                  * it on the source.
1293                  */
1294                 const bool got_audio{readAudio(static_cast<uint8_t*>(samples),
1295                     static_cast<ALuint>(buffer_len), sync_skip)};
1296                 if(!got_audio) break;
1297 
1298                 const ALuint bufid{mBuffers[mBufferIdx]};
1299                 mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
1300 
1301                 alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
1302                 alSourceQueueBuffers(mSource, 1, &bufid);
1303                 ++queued;
1304             }
1305 
1306             /* Check that the source is playing. */
1307             alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1308             if(state == AL_STOPPED)
1309             {
1310                 /* AL_STOPPED means there was an underrun. Clear the buffer
1311                  * queue since this likely means we're late, and rewind the
1312                  * source to get it back into an AL_INITIAL state.
1313                  */
1314                 alSourceRewind(mSource);
1315                 alSourcei(mSource, AL_BUFFER, 0);
1316                 if(alcGetInteger64vSOFT)
1317                 {
1318                     /* Also update the device start time with the current
1319                      * device clock, so the decoder knows we're running behind.
1320                      */
1321                     int64_t devtime{};
1322                     alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1323                         ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
1324                     mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1325                 }
1326                 continue;
1327             }
1328         }
1329 
1330         /* (re)start the source if needed, and wait for a buffer to finish */
1331         if(state != AL_PLAYING && state != AL_PAUSED)
1332         {
1333             if(!startPlayback())
1334                 break;
1335         }
1336         if(alGetError() != AL_NO_ERROR)
1337             return false;
1338 
1339         mSrcCond.wait_for(srclock, sleep_time);
1340     }
1341 
1342     alSourceRewind(mSource);
1343     alSourcei(mSource, AL_BUFFER, 0);
1344     srclock.unlock();
1345 
1346 finish:
1347     av_freep(&samples);
1348 
1349 #ifdef AL_SOFT_events
1350     if(alEventControlSOFT)
1351     {
1352         alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
1353         alEventCallbackSOFT(nullptr, nullptr);
1354     }
1355 #endif
1356 
1357     return 0;
1358 }
1359 
1360 
1361 nanoseconds VideoState::getClock()
1362 {
1363     /* NOTE: This returns incorrect times while not playing. */
1364     std::lock_guard<std::mutex> _{mDispPtsMutex};
1365     if(mDisplayPtsTime == microseconds::min())
1366         return nanoseconds::zero();
1367     auto delta = get_avtime() - mDisplayPtsTime;
1368     return mDisplayPts + delta;
1369 }
1370 
1371 /* Called by VideoState::updateVideo to display the next video frame. */
1372 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
1373 {
1374     if(!mImage)
1375         return;
1376 
1377     double aspect_ratio;
1378     int win_w, win_h;
1379     int w, h, x, y;
1380 
1381     if(mCodecCtx->sample_aspect_ratio.num == 0)
1382         aspect_ratio = 0.0;
1383     else
1384     {
1385         aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
1386                        mCodecCtx->height;
1387     }
1388     if(aspect_ratio <= 0.0)
1389         aspect_ratio = static_cast<double>(mCodecCtx->width) / mCodecCtx->height;
1390 
1391     SDL_GetWindowSize(screen, &win_w, &win_h);
1392     h = win_h;
1393     w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3;
1394     if(w > win_w)
1395     {
1396         w = win_w;
1397         h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3;
1398     }
1399     x = (win_w - w) / 2;
1400     y = (win_h - h) / 2;
1401 
1402     SDL_Rect src_rect{ 0, 0, mWidth, mHeight };
1403     SDL_Rect dst_rect{ x, y, w, h };
1404     SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect);
1405     SDL_RenderPresent(renderer);
1406 }
1407 
1408 /* Called regularly on the main thread where the SDL_Renderer was created. It
1409  * handles updating the textures of decoded frames and displaying the latest
1410  * frame.
1411  */
1412 void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw)
1413 {
1414     size_t read_idx{mPictQRead.load(std::memory_order_relaxed)};
1415     Picture *vp{&mPictQ[read_idx]};
1416 
1417     auto clocktime = mMovie.getMasterClock();
1418     bool updated{false};
1419     while(1)
1420     {
1421         size_t next_idx{(read_idx+1)%mPictQ.size()};
1422         if(next_idx == mPictQWrite.load(std::memory_order_acquire))
1423             break;
1424         Picture *nextvp{&mPictQ[next_idx]};
1425         if(clocktime < nextvp->mPts)
1426             break;
1427 
1428         vp = nextvp;
1429         updated = true;
1430         read_idx = next_idx;
1431     }
1432     if(mMovie.mQuit.load(std::memory_order_relaxed))
1433     {
1434         if(mEOS)
1435             mFinalUpdate = true;
1436         mPictQRead.store(read_idx, std::memory_order_release);
1437         std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1438         mPictQCond.notify_one();
1439         return;
1440     }
1441 
1442     if(updated)
1443     {
1444         mPictQRead.store(read_idx, std::memory_order_release);
1445         std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1446         mPictQCond.notify_one();
1447 
1448         /* allocate or resize the buffer! */
1449         bool fmt_updated{false};
1450         if(!mImage || mWidth != mCodecCtx->width || mHeight != mCodecCtx->height)
1451         {
1452             fmt_updated = true;
1453             if(mImage)
1454                 SDL_DestroyTexture(mImage);
1455             mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1456                 mCodecCtx->coded_width, mCodecCtx->coded_height);
1457             if(!mImage)
1458                 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1459             mWidth = mCodecCtx->width;
1460             mHeight = mCodecCtx->height;
1461 
1462             if(mFirstUpdate && mWidth > 0 && mHeight > 0)
1463             {
1464                 /* For the first update, set the window size to the video size. */
1465                 mFirstUpdate = false;
1466 
1467                 int w{mWidth};
1468                 int h{mHeight};
1469                 if(mCodecCtx->sample_aspect_ratio.den != 0)
1470                 {
1471                     double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1472                     if(aspect_ratio >= 1.0)
1473                         w = static_cast<int>(w*aspect_ratio + 0.5);
1474                     else if(aspect_ratio > 0.0)
1475                         h = static_cast<int>(h/aspect_ratio + 0.5);
1476                 }
1477                 SDL_SetWindowSize(screen, w, h);
1478             }
1479         }
1480 
1481         if(mImage)
1482         {
1483             AVFrame *frame{vp->mFrame.get()};
1484             void *pixels{nullptr};
1485             int pitch{0};
1486 
1487             if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1488                 SDL_UpdateYUVTexture(mImage, nullptr,
1489                     frame->data[0], frame->linesize[0],
1490                     frame->data[1], frame->linesize[1],
1491                     frame->data[2], frame->linesize[2]
1492                 );
1493             else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0)
1494                 std::cerr<< "Failed to lock texture" <<std::endl;
1495             else
1496             {
1497                 // Convert the image into YUV format that SDL uses
1498                 int coded_w{mCodecCtx->coded_width};
1499                 int coded_h{mCodecCtx->coded_height};
1500                 int w{mCodecCtx->width};
1501                 int h{mCodecCtx->height};
1502                 if(!mSwscaleCtx || fmt_updated)
1503                 {
1504                     mSwscaleCtx.reset(sws_getContext(
1505                         w, h, mCodecCtx->pix_fmt,
1506                         w, h, AV_PIX_FMT_YUV420P, 0,
1507                         nullptr, nullptr, nullptr
1508                     ));
1509                 }
1510 
1511                 /* point pict at the queue */
1512                 uint8_t *pict_data[3];
1513                 pict_data[0] = static_cast<uint8_t*>(pixels);
1514                 pict_data[1] = pict_data[0] + coded_w*coded_h;
1515                 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1516 
1517                 int pict_linesize[3];
1518                 pict_linesize[0] = pitch;
1519                 pict_linesize[1] = pitch / 2;
1520                 pict_linesize[2] = pitch / 2;
1521 
1522                 sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize,
1523                     0, h, pict_data, pict_linesize);
1524                 SDL_UnlockTexture(mImage);
1525             }
1526         }
1527 
1528         redraw = true;
1529     }
1530 
1531     if(redraw)
1532     {
1533         /* Show the picture! */
1534         display(screen, renderer);
1535     }
1536 
1537     if(updated)
1538     {
1539         auto disp_time = get_avtime();
1540 
1541         std::lock_guard<std::mutex> _{mDispPtsMutex};
1542         mDisplayPts = vp->mPts;
1543         mDisplayPtsTime = disp_time;
1544     }
1545     if(mEOS.load(std::memory_order_acquire))
1546     {
1547         if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire))
1548         {
1549             mFinalUpdate = true;
1550             std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1551             mPictQCond.notify_one();
1552         }
1553     }
1554 }
1555 
1556 int VideoState::handler()
1557 {
1558     std::for_each(mPictQ.begin(), mPictQ.end(),
1559         [](Picture &pict) -> void
1560         { pict.mFrame = AVFramePtr{av_frame_alloc()}; });
1561 
1562     /* Prefill the codec buffer. */
1563     do {
1564         const int ret{mPackets.sendTo(mCodecCtx.get())};
1565         if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
1566             break;
1567     } while(1);
1568 
1569     {
1570         std::lock_guard<std::mutex> _{mDispPtsMutex};
1571         mDisplayPtsTime = get_avtime();
1572     }
1573 
1574     auto current_pts = nanoseconds::zero();
1575     while(!mMovie.mQuit.load(std::memory_order_relaxed))
1576     {
1577         size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)};
1578         Picture *vp{&mPictQ[write_idx]};
1579 
1580         /* Retrieve video frame. */
1581         AVFrame *decoded_frame{vp->mFrame.get()};
1582         int ret;
1583         while((ret=avcodec_receive_frame(mCodecCtx.get(), decoded_frame)) == AVERROR(EAGAIN))
1584             mPackets.sendTo(mCodecCtx.get());
1585         if(ret != 0)
1586         {
1587             if(ret == AVErrorEOF) break;
1588             std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
1589             continue;
1590         }
1591 
1592         /* Get the PTS for this frame. */
1593         if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
1594             current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
1595                 static_cast<double>(decoded_frame->best_effort_timestamp)});
1596         vp->mPts = current_pts;
1597 
1598         /* Update the video clock to the next expected PTS. */
1599         auto frame_delay = av_q2d(mCodecCtx->time_base);
1600         frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
1601         current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
1602 
1603         /* Put the frame in the queue to be loaded into a texture and displayed
1604          * by the rendering thread.
1605          */
1606         write_idx = (write_idx+1)%mPictQ.size();
1607         mPictQWrite.store(write_idx, std::memory_order_release);
1608 
1609         /* Send a packet now so it's hopefully ready by the time it's needed. */
1610         mPackets.sendTo(mCodecCtx.get());
1611 
1612         if(write_idx == mPictQRead.load(std::memory_order_acquire))
1613         {
1614             /* Wait until we have space for a new pic */
1615             std::unique_lock<std::mutex> lock{mPictQMutex};
1616             while(write_idx == mPictQRead.load(std::memory_order_acquire) &&
1617                 !mMovie.mQuit.load(std::memory_order_relaxed))
1618                 mPictQCond.wait(lock);
1619         }
1620     }
1621     mEOS = true;
1622 
1623     std::unique_lock<std::mutex> lock{mPictQMutex};
1624     while(!mFinalUpdate) mPictQCond.wait(lock);
1625 
1626     return 0;
1627 }
1628 
1629 
1630 int MovieState::decode_interrupt_cb(void *ctx)
1631 {
1632     return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1633 }
1634 
1635 bool MovieState::prepare()
1636 {
1637     AVIOContext *avioctx{nullptr};
1638     AVIOInterruptCB intcb{decode_interrupt_cb, this};
1639     if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1640     {
1641         std::cerr<< "Failed to open "<<mFilename <<std::endl;
1642         return false;
1643     }
1644     mIOContext.reset(avioctx);
1645 
1646     /* Open movie file. If avformat_open_input fails it will automatically free
1647      * this context, so don't set it onto a smart pointer yet.
1648      */
1649     AVFormatContext *fmtctx{avformat_alloc_context()};
1650     fmtctx->pb = mIOContext.get();
1651     fmtctx->interrupt_callback = intcb;
1652     if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1653     {
1654         std::cerr<< "Failed to open "<<mFilename <<std::endl;
1655         return false;
1656     }
1657     mFormatCtx.reset(fmtctx);
1658 
1659     /* Retrieve stream information */
1660     if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1661     {
1662         std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1663         return false;
1664     }
1665 
1666     /* Dump information about file onto standard error */
1667     av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1668 
1669     mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this};
1670     return true;
1671 }
1672 
1673 void MovieState::setTitle(SDL_Window *window)
1674 {
1675     auto pos1 = mFilename.rfind('/');
1676     auto pos2 = mFilename.rfind('\\');
1677     auto fpos = ((pos1 == std::string::npos) ? pos2 :
1678                  (pos2 == std::string::npos) ? pos1 :
1679                  std::max(pos1, pos2)) + 1;
1680     SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1681 }
1682 
1683 nanoseconds MovieState::getClock()
1684 {
1685     if(mClockBase == microseconds::min())
1686         return nanoseconds::zero();
1687     return get_avtime() - mClockBase;
1688 }
1689 
1690 nanoseconds MovieState::getMasterClock()
1691 {
1692     if(mAVSyncType == SyncMaster::Video)
1693         return mVideo.getClock();
1694     if(mAVSyncType == SyncMaster::Audio)
1695         return mAudio.getClock();
1696     return getClock();
1697 }
1698 
1699 nanoseconds MovieState::getDuration()
1700 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1701 
1702 int MovieState::streamComponentOpen(unsigned int stream_index)
1703 {
1704     if(stream_index >= mFormatCtx->nb_streams)
1705         return -1;
1706 
1707     /* Get a pointer to the codec context for the stream, and open the
1708      * associated codec.
1709      */
1710     AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)};
1711     if(!avctx) return -1;
1712 
1713     if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1714         return -1;
1715 
1716     AVCodec *codec{avcodec_find_decoder(avctx->codec_id)};
1717     if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1718     {
1719         std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1720             << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1721         return -1;
1722     }
1723 
1724     /* Initialize and start the media type handler */
1725     switch(avctx->codec_type)
1726     {
1727         case AVMEDIA_TYPE_AUDIO:
1728             mAudio.mStream = mFormatCtx->streams[stream_index];
1729             mAudio.mCodecCtx = std::move(avctx);
1730             break;
1731 
1732         case AVMEDIA_TYPE_VIDEO:
1733             mVideo.mStream = mFormatCtx->streams[stream_index];
1734             mVideo.mCodecCtx = std::move(avctx);
1735             break;
1736 
1737         default:
1738             return -1;
1739     }
1740 
1741     return static_cast<int>(stream_index);
1742 }
1743 
1744 int MovieState::parse_handler()
1745 {
1746     auto &audio_queue = mAudio.mPackets;
1747     auto &video_queue = mVideo.mPackets;
1748 
1749     int video_index{-1};
1750     int audio_index{-1};
1751 
1752     /* Find the first video and audio streams */
1753     for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++)
1754     {
1755         auto codecpar = mFormatCtx->streams[i]->codecpar;
1756         if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0)
1757             video_index = streamComponentOpen(i);
1758         else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1759             audio_index = streamComponentOpen(i);
1760     }
1761 
1762     if(video_index < 0 && audio_index < 0)
1763     {
1764         std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1765         mQuit = true;
1766     }
1767 
1768     /* Set the base time 750ms ahead of the current av time. */
1769     mClockBase = get_avtime() + milliseconds{750};
1770 
1771     if(audio_index >= 0)
1772         mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio};
1773     if(video_index >= 0)
1774         mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo};
1775 
1776     /* Main packet reading/dispatching loop */
1777     while(!mQuit.load(std::memory_order_relaxed))
1778     {
1779         AVPacket packet;
1780         if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1781             break;
1782 
1783         /* Copy the packet into the queue it's meant for. */
1784         if(packet.stream_index == video_index)
1785         {
1786             while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(&packet))
1787                 std::this_thread::sleep_for(milliseconds{100});
1788         }
1789         else if(packet.stream_index == audio_index)
1790         {
1791             while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(&packet))
1792                 std::this_thread::sleep_for(milliseconds{100});
1793         }
1794 
1795         av_packet_unref(&packet);
1796     }
1797     /* Finish the queues so the receivers know nothing more is coming. */
1798     if(mVideo.mCodecCtx) video_queue.setFinished();
1799     if(mAudio.mCodecCtx) audio_queue.setFinished();
1800 
1801     /* all done - wait for it */
1802     if(mVideoThread.joinable())
1803         mVideoThread.join();
1804     if(mAudioThread.joinable())
1805         mAudioThread.join();
1806 
1807     mVideo.mEOS = true;
1808     std::unique_lock<std::mutex> lock{mVideo.mPictQMutex};
1809     while(!mVideo.mFinalUpdate)
1810         mVideo.mPictQCond.wait(lock);
1811     lock.unlock();
1812 
1813     SDL_Event evt{};
1814     evt.user.type = FF_MOVIE_DONE_EVENT;
1815     SDL_PushEvent(&evt);
1816 
1817     return 0;
1818 }
1819 
1820 
1821 // Helper class+method to print the time with human-readable formatting.
1822 struct PrettyTime {
1823     seconds mTime;
1824 };
1825 std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1826 {
1827     using hours = std::chrono::hours;
1828     using minutes = std::chrono::minutes;
1829 
1830     seconds t{rhs.mTime};
1831     if(t.count() < 0)
1832     {
1833         os << '-';
1834         t *= -1;
1835     }
1836 
1837     // Only handle up to hour formatting
1838     if(t >= hours{1})
1839         os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1840            << (duration_cast<minutes>(t).count() % 60) << 'm';
1841     else
1842         os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1843     os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1844        << std::setfill(' ');
1845     return os;
1846 }
1847 
1848 } // namespace
1849 
1850 
1851 int main(int argc, char *argv[])
1852 {
1853     std::unique_ptr<MovieState> movState;
1854 
1855     if(argc < 2)
1856     {
1857         std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1858         return 1;
1859     }
1860     /* Register all formats and codecs */
1861 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1862     av_register_all();
1863 #endif
1864     /* Initialize networking protocols */
1865     avformat_network_init();
1866 
1867     if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS))
1868     {
1869         std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1870         return 1;
1871     }
1872 
1873     /* Make a window to put our video */
1874     SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)};
1875     if(!screen)
1876     {
1877         std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1878         return 1;
1879     }
1880     /* Make a renderer to handle the texture image surface and rendering. */
1881     Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC};
1882     SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)};
1883     if(renderer)
1884     {
1885         SDL_RendererInfo rinf{};
1886         bool ok{false};
1887 
1888         /* Make sure the renderer supports IYUV textures. If not, fallback to a
1889          * software renderer. */
1890         if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1891         {
1892             for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++)
1893                 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1894         }
1895         if(!ok)
1896         {
1897             std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1898             SDL_DestroyRenderer(renderer);
1899             renderer = nullptr;
1900         }
1901     }
1902     if(!renderer)
1903     {
1904         render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1905         renderer = SDL_CreateRenderer(screen, -1, render_flags);
1906     }
1907     if(!renderer)
1908     {
1909         std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1910         return 1;
1911     }
1912     SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1913     SDL_RenderFillRect(renderer, nullptr);
1914     SDL_RenderPresent(renderer);
1915 
1916     /* Open an audio device */
1917     ++argv; --argc;
1918     if(InitAL(&argv, &argc))
1919     {
1920         std::cerr<< "Failed to set up audio device" <<std::endl;
1921         return 1;
1922     }
1923 
1924     {
1925         auto device = alcGetContextsDevice(alcGetCurrentContext());
1926         if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1927         {
1928             std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1929             alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1930                 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1931             );
1932         }
1933     }
1934 
1935     if(alIsExtensionPresent("AL_SOFT_source_latency"))
1936     {
1937         std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1938         alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1939             alGetProcAddress("alGetSourcei64vSOFT")
1940         );
1941     }
1942 #ifdef AL_SOFT_events
1943     if(alIsExtensionPresent("AL_SOFT_events"))
1944     {
1945         std::cout<< "Found AL_SOFT_events" <<std::endl;
1946         alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1947             alGetProcAddress("alEventControlSOFT"));
1948         alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
1949             alGetProcAddress("alEventCallbackSOFT"));
1950     }
1951 #endif
1952 #ifdef AL_SOFT_callback_buffer
1953     if(alIsExtensionPresent("AL_SOFTX_callback_buffer"))
1954     {
1955         std::cout<< "Found AL_SOFT_callback_buffer" <<std::endl;
1956         alBufferCallbackSOFT = reinterpret_cast<LPALBUFFERCALLBACKSOFT>(
1957             alGetProcAddress("alBufferCallbackSOFT"));
1958     }
1959 #endif
1960 
1961     int fileidx{0};
1962     for(;fileidx < argc;++fileidx)
1963     {
1964         if(strcmp(argv[fileidx], "-direct") == 0)
1965         {
1966             if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
1967             {
1968                 std::cout<< "Found AL_SOFT_direct_channels_remix" <<std::endl;
1969                 DirectOutMode = AL_REMIX_UNMATCHED_SOFT;
1970             }
1971             else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
1972             {
1973                 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1974                 DirectOutMode = AL_DROP_UNMATCHED_SOFT;
1975             }
1976             else
1977                 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1978         }
1979         else if(strcmp(argv[fileidx], "-wide") == 0)
1980         {
1981             if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
1982                 std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
1983             else
1984             {
1985                 std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
1986                 EnableWideStereo = true;
1987             }
1988         }
1989         else if(strcmp(argv[fileidx], "-novideo") == 0)
1990             DisableVideo = true;
1991         else
1992             break;
1993     }
1994 
1995     while(fileidx < argc && !movState)
1996     {
1997         movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
1998         if(!movState->prepare()) movState = nullptr;
1999     }
2000     if(!movState)
2001     {
2002         std::cerr<< "Could not start a video" <<std::endl;
2003         return 1;
2004     }
2005     movState->setTitle(screen);
2006 
2007     /* Default to going to the next movie at the end of one. */
2008     enum class EomAction {
2009         Next, Quit
2010     } eom_action{EomAction::Next};
2011     seconds last_time{seconds::min()};
2012     while(1)
2013     {
2014         SDL_Event event{};
2015         int have_evt{SDL_WaitEventTimeout(&event, 10)};
2016 
2017         auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
2018         if(cur_time != last_time)
2019         {
2020             auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
2021             std::cout<< "    \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
2022             last_time = cur_time;
2023         }
2024 
2025         bool force_redraw{false};
2026         if(have_evt) do {
2027             switch(event.type)
2028             {
2029             case SDL_KEYDOWN:
2030                 switch(event.key.keysym.sym)
2031                 {
2032                 case SDLK_ESCAPE:
2033                     movState->mQuit = true;
2034                     eom_action = EomAction::Quit;
2035                     break;
2036 
2037                 case SDLK_n:
2038                     movState->mQuit = true;
2039                     eom_action = EomAction::Next;
2040                     break;
2041 
2042                 default:
2043                     break;
2044                 }
2045                 break;
2046 
2047             case SDL_WINDOWEVENT:
2048                 switch(event.window.event)
2049                 {
2050                 case SDL_WINDOWEVENT_RESIZED:
2051                     SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
2052                     SDL_RenderFillRect(renderer, nullptr);
2053                     force_redraw = true;
2054                     break;
2055 
2056                 case SDL_WINDOWEVENT_EXPOSED:
2057                     force_redraw = true;
2058                     break;
2059 
2060                 default:
2061                     break;
2062                 }
2063                 break;
2064 
2065             case SDL_QUIT:
2066                 movState->mQuit = true;
2067                 eom_action = EomAction::Quit;
2068                 break;
2069 
2070             case FF_MOVIE_DONE_EVENT:
2071                 std::cout<<'\n';
2072                 last_time = seconds::min();
2073                 if(eom_action != EomAction::Quit)
2074                 {
2075                     movState = nullptr;
2076                     while(fileidx < argc && !movState)
2077                     {
2078                         movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
2079                         if(!movState->prepare()) movState = nullptr;
2080                     }
2081                     if(movState)
2082                     {
2083                         movState->setTitle(screen);
2084                         break;
2085                     }
2086                 }
2087 
2088                 /* Nothing more to play. Shut everything down and quit. */
2089                 movState = nullptr;
2090 
2091                 CloseAL();
2092 
2093                 SDL_DestroyRenderer(renderer);
2094                 renderer = nullptr;
2095                 SDL_DestroyWindow(screen);
2096                 screen = nullptr;
2097 
2098                 SDL_Quit();
2099                 exit(0);
2100 
2101             default:
2102                 break;
2103             }
2104         } while(SDL_PollEvent(&event));
2105 
2106         movState->mVideo.updateVideo(screen, renderer, force_redraw);
2107     }
2108 
2109     std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
2110     return 1;
2111 }
2112