1 #include "videostate.hpp"
2 
3 #include <algorithm>
4 #include <cassert>
5 #include <cstddef>
6 #include <iostream>
7 #include <thread>
8 #include <chrono>
9 
10 #include <osg/Texture2D>
11 
12 #if defined(_MSC_VER)
13     #pragma warning (push)
14     #pragma warning (disable : 4244)
15 #endif
16 
17 extern "C"
18 {
19     #include <libavcodec/avcodec.h>
20     #include <libavformat/avformat.h>
21     #include <libswscale/swscale.h>
22     #include <libavutil/time.h>
23 }
24 
25 #if defined(_MSC_VER)
26     #pragma warning (pop)
27 #endif
28 
29 static const char* flushString = "FLUSH";
30 struct FlushPacket : AVPacket
31 {
FlushPacketFlushPacket32     FlushPacket()
33         : AVPacket()
34     {
35         data = ( (uint8_t*)flushString);
36     }
37 };
38 
39 static FlushPacket flush_pkt;
40 
41 #include "videoplayer.hpp"
42 #include "audiodecoder.hpp"
43 #include "audiofactory.hpp"
44 
45 namespace
46 {
47     const int MAX_AUDIOQ_SIZE = (5 * 16 * 1024);
48     const int MAX_VIDEOQ_SIZE = (5 * 256 * 1024);
49 
50     struct AVPacketUnref
51     {
operator ()__anonc3324f600111::AVPacketUnref52         void operator()(AVPacket* packet) const
53         {
54             av_packet_unref(packet);
55         }
56     };
57 
58     struct AVFrameFree
59     {
operator ()__anonc3324f600111::AVFrameFree60         void operator()(AVFrame* frame) const
61         {
62             av_frame_free(&frame);
63         }
64     };
65 
66     template<class T>
67     struct AVFree
68     {
operator ()__anonc3324f600111::AVFree69         void operator()(T* frame) const
70         {
71             av_free(&frame);
72         }
73     };
74 }
75 
76 namespace Video
77 {
78 
VideoState()79 VideoState::VideoState()
80     : mAudioFactory(nullptr)
81     , format_ctx(nullptr)
82     , video_ctx(nullptr)
83     , audio_ctx(nullptr)
84     , av_sync_type(AV_SYNC_DEFAULT)
85     , audio_st(nullptr)
86     , video_st(nullptr), frame_last_pts(0.0)
87     , video_clock(0.0), sws_context(nullptr)
88     , sws_context_w(0), sws_context_h(0)
89     , pictq_size(0), pictq_rindex(0), pictq_windex(0)
90     , mSeekRequested(false)
91     , mSeekPos(0)
92     , mVideoEnded(false)
93     , mPaused(false)
94     , mQuit(false)
95 {
96     mFlushPktData = flush_pkt.data;
97 
98 // This is not needed anymore above FFMpeg version 4.0
99 #if LIBAVCODEC_VERSION_INT < 3805796
100     av_register_all();
101 #endif
102 }
103 
~VideoState()104 VideoState::~VideoState()
105 {
106     deinit();
107 }
108 
setAudioFactory(MovieAudioFactory * factory)109 void VideoState::setAudioFactory(MovieAudioFactory *factory)
110 {
111     mAudioFactory = factory;
112 }
113 
114 
put(AVPacket * pkt)115 void PacketQueue::put(AVPacket *pkt)
116 {
117     std::unique_ptr<AVPacketList, AVFree<AVPacketList>> pkt1(static_cast<AVPacketList*>(av_malloc(sizeof(AVPacketList))));
118     if(!pkt1) throw std::bad_alloc();
119 
120     if(pkt == &flush_pkt)
121         pkt1->pkt = *pkt;
122     else
123         av_packet_move_ref(&pkt1->pkt, pkt);
124 
125     pkt1->next = nullptr;
126 
127     std::lock_guard<std::mutex> lock(this->mutex);
128     AVPacketList* ptr = pkt1.release();
129     if(!last_pkt)
130         this->first_pkt = ptr;
131     else
132         this->last_pkt->next = ptr;
133     this->last_pkt = ptr;
134     this->nb_packets++;
135     this->size += ptr->pkt.size;
136     this->cond.notify_one();
137 }
138 
get(AVPacket * pkt,VideoState * is)139 int PacketQueue::get(AVPacket *pkt, VideoState *is)
140 {
141     std::unique_lock<std::mutex> lock(this->mutex);
142     while(!is->mQuit)
143     {
144         AVPacketList *pkt1 = this->first_pkt;
145         if(pkt1)
146         {
147             this->first_pkt = pkt1->next;
148             if(!this->first_pkt)
149                 this->last_pkt = nullptr;
150             this->nb_packets--;
151             this->size -= pkt1->pkt.size;
152 
153             av_packet_unref(pkt);
154             av_packet_move_ref(pkt, &pkt1->pkt);
155             av_free(pkt1);
156 
157             return 1;
158         }
159 
160         if(this->flushing)
161             break;
162         this->cond.wait(lock);
163     }
164 
165     return -1;
166 }
167 
flush()168 void PacketQueue::flush()
169 {
170     this->flushing = true;
171     this->cond.notify_one();
172 }
173 
clear()174 void PacketQueue::clear()
175 {
176     AVPacketList *pkt, *pkt1;
177 
178     std::lock_guard<std::mutex> lock(this->mutex);
179     for(pkt = this->first_pkt; pkt != nullptr; pkt = pkt1)
180     {
181         pkt1 = pkt->next;
182         if (pkt->pkt.data != flush_pkt.data)
183             av_packet_unref(&pkt->pkt);
184         av_freep(&pkt);
185     }
186     this->last_pkt = nullptr;
187     this->first_pkt = nullptr;
188     this->nb_packets = 0;
189     this->size = 0;
190 }
191 
set_dimensions(int w,int h)192 int VideoPicture::set_dimensions(int w, int h) {
193   if (this->rgbaFrame != nullptr && this->rgbaFrame->width == w &&
194       this->rgbaFrame->height == h) {
195     return 0;
196   }
197 
198   std::unique_ptr<AVFrame, VideoPicture::AVFrameDeleter> frame{
199       av_frame_alloc()};
200   if (frame == nullptr) {
201     std::cerr << "av_frame_alloc failed" << std::endl;
202     return -1;
203   }
204 
205   constexpr AVPixelFormat kPixFmt = AV_PIX_FMT_RGBA;
206   frame->format = kPixFmt;
207   frame->width = w;
208   frame->height = h;
209   if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height,
210                      kPixFmt, 1) < 0) {
211     std::cerr << "av_image_alloc failed" << std::endl;
212     return -1;
213   }
214 
215   this->rgbaFrame = std::move(frame);
216   return 0;
217 }
218 
operator ()(AVFrame * frame) const219 void VideoPicture::AVFrameDeleter::operator()(AVFrame* frame) const
220 {
221     av_freep(frame->data);
222     av_frame_free(&frame);
223 }
224 
istream_read(void * user_data,uint8_t * buf,int buf_size)225 int VideoState::istream_read(void *user_data, uint8_t *buf, int buf_size)
226 {
227     try
228     {
229         std::istream& stream = *static_cast<VideoState*>(user_data)->stream;
230         stream.clear();
231         stream.read((char*)buf, buf_size);
232         return stream.gcount();
233     }
234     catch (std::exception& )
235     {
236         return 0;
237     }
238 }
239 
istream_write(void *,uint8_t *,int)240 int VideoState::istream_write(void *, uint8_t *, int)
241 {
242     throw std::runtime_error("can't write to read-only stream");
243 }
244 
istream_seek(void * user_data,int64_t offset,int whence)245 int64_t VideoState::istream_seek(void *user_data, int64_t offset, int whence)
246 {
247     std::istream& stream = *static_cast<VideoState*>(user_data)->stream;
248 
249     whence &= ~AVSEEK_FORCE;
250 
251     stream.clear();
252 
253     if(whence == AVSEEK_SIZE)
254     {
255         size_t prev = stream.tellg();
256         stream.seekg(0, std::ios_base::end);
257         size_t size = stream.tellg();
258         stream.seekg(prev, std::ios_base::beg);
259         return size;
260     }
261 
262     if(whence == SEEK_SET)
263         stream.seekg(offset, std::ios_base::beg);
264     else if(whence == SEEK_CUR)
265         stream.seekg(offset, std::ios_base::cur);
266     else if(whence == SEEK_END)
267         stream.seekg(offset, std::ios_base::end);
268     else
269         return -1;
270 
271     return stream.tellg();
272 }
273 
video_display(VideoPicture * vp)274 void VideoState::video_display(VideoPicture *vp)
275 {
276     if(this->video_ctx->width != 0 && this->video_ctx->height != 0)
277     {
278         if (!mTexture.get())
279         {
280             mTexture = new osg::Texture2D;
281             mTexture->setDataVariance(osg::Object::DYNAMIC);
282             mTexture->setResizeNonPowerOfTwoHint(false);
283             mTexture->setWrap(osg::Texture::WRAP_S, osg::Texture::REPEAT);
284             mTexture->setWrap(osg::Texture::WRAP_T, osg::Texture::REPEAT);
285         }
286 
287         osg::ref_ptr<osg::Image> image = new osg::Image;
288 
289         image->setImage(this->video_ctx->width, this->video_ctx->height,
290                         1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, vp->rgbaFrame->data[0], osg::Image::NO_DELETE);
291 
292         mTexture->setImage(image);
293     }
294 }
295 
video_refresh()296 void VideoState::video_refresh()
297 {
298     std::lock_guard<std::mutex> lock(this->pictq_mutex);
299     if(this->pictq_size == 0)
300         return;
301 
302     if (this->av_sync_type == AV_SYNC_VIDEO_MASTER)
303     {
304         VideoPicture* vp = &this->pictq[this->pictq_rindex];
305         this->video_display(vp);
306 
307         this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE;
308         this->frame_last_pts = vp->pts;
309         this->pictq_size--;
310         this->pictq_cond.notify_one();
311     }
312     else
313     {
314         const float threshold = 0.03f;
315         if (this->pictq[pictq_rindex].pts > this->get_master_clock() + threshold)
316             return; // not ready yet to show this picture
317 
318         // TODO: the conversion to RGBA is done in the decoding thread, so if a picture is skipped here, then it was
319         // unnecessarily converted. But we may want to replace the conversion by a pixel shader anyway (see comment in queue_picture)
320         int i=0;
321         for (; i<this->pictq_size-1; ++i)
322         {
323             if (this->pictq[pictq_rindex].pts + threshold <= this->get_master_clock())
324                 this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; // not enough time to show this picture
325             else
326                 break;
327         }
328 
329         assert (this->pictq_rindex < VIDEO_PICTURE_ARRAY_SIZE);
330         VideoPicture* vp = &this->pictq[this->pictq_rindex];
331 
332         this->video_display(vp);
333 
334         this->frame_last_pts = vp->pts;
335 
336         this->pictq_size -= i;
337         // update queue for next picture
338         this->pictq_size--;
339         this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE;
340         this->pictq_cond.notify_one();
341     }
342 }
343 
344 
queue_picture(const AVFrame & pFrame,double pts)345 int VideoState::queue_picture(const AVFrame &pFrame, double pts)
346 {
347     VideoPicture *vp;
348 
349     /* wait until we have a new pic */
350     {
351         std::unique_lock<std::mutex> lock(this->pictq_mutex);
352         while(this->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !this->mQuit)
353             this->pictq_cond.wait_for(lock, std::chrono::milliseconds(1));
354     }
355     if(this->mQuit)
356         return -1;
357 
358     std::lock_guard<std::mutex> lock(this->pictq_mutex);
359 
360     // windex is set to 0 initially
361     vp = &this->pictq[this->pictq_windex];
362 
363     // Convert the image into RGBA format
364     // TODO: we could do this in a pixel shader instead, if the source format
365     // matches a commonly used format (ie YUV420P)
366     const int w = pFrame.width;
367     const int h = pFrame.height;
368     if(this->sws_context == nullptr || this->sws_context_w != w || this->sws_context_h != h)
369     {
370         if (this->sws_context != nullptr)
371             sws_freeContext(this->sws_context);
372         this->sws_context = sws_getContext(w, h, this->video_ctx->pix_fmt,
373                                            w, h, AV_PIX_FMT_RGBA, SWS_BICUBIC,
374                                            nullptr, nullptr, nullptr);
375         if(this->sws_context == nullptr)
376             throw std::runtime_error("Cannot initialize the conversion context!\n");
377         this->sws_context_w = w;
378         this->sws_context_h = h;
379     }
380 
381     vp->pts = pts;
382     if (vp->set_dimensions(w, h) < 0)
383         return -1;
384 
385     sws_scale(this->sws_context, pFrame.data, pFrame.linesize,
386               0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize);
387 
388     // now we inform our display thread that we have a pic ready
389     this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE;
390     this->pictq_size++;
391 
392     return 0;
393 }
394 
synchronize_video(const AVFrame & src_frame,double pts)395 double VideoState::synchronize_video(const AVFrame &src_frame, double pts)
396 {
397     double frame_delay;
398 
399     /* if we have pts, set video clock to it */
400     if(pts != 0)
401         this->video_clock = pts;
402     else
403         pts = this->video_clock;
404 
405     /* update the video clock */
406     frame_delay = av_q2d(this->video_ctx->pkt_timebase);
407 
408     /* if we are repeating a frame, adjust clock accordingly */
409     frame_delay += src_frame.repeat_pict * (frame_delay * 0.5);
410     this->video_clock += frame_delay;
411 
412     return pts;
413 }
414 
415 class VideoThread
416 {
417 public:
VideoThread(VideoState * self)418     VideoThread(VideoState* self)
419         : mVideoState(self)
420         , mThread([this]
421         {
422             try
423             {
424                 run();
425             }
426             catch(std::exception& e)
427             {
428                 std::cerr << "An error occurred playing the video: " << e.what () << std::endl;
429             }
430         })
431     {
432     }
433 
~VideoThread()434     ~VideoThread()
435     {
436         mThread.join();
437     }
438 
run()439     void run()
440     {
441         VideoState* self = mVideoState;
442         AVPacket packetData;
443         av_init_packet(&packetData);
444         std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
445         std::unique_ptr<AVFrame, AVFrameFree> pFrame{av_frame_alloc()};
446 
447         while(self->videoq.get(packet.get(), self) >= 0)
448         {
449             if(packet->data == flush_pkt.data)
450             {
451                 avcodec_flush_buffers(self->video_ctx);
452 
453                 self->pictq_mutex.lock();
454                 self->pictq_size = 0;
455                 self->pictq_rindex = 0;
456                 self->pictq_windex = 0;
457                 self->pictq_mutex.unlock();
458 
459                 self->frame_last_pts = packet->pts * av_q2d((*self->video_st)->time_base);
460                 continue;
461             }
462 
463             // Decode video frame
464             int ret = avcodec_send_packet(self->video_ctx, packet.get());
465             // EAGAIN is not expected
466             if (ret < 0)
467                 throw std::runtime_error("Error decoding video frame");
468 
469             while (!ret)
470             {
471                 ret = avcodec_receive_frame(self->video_ctx, pFrame.get());
472                 if (!ret)
473                 {
474                     double pts = pFrame->best_effort_timestamp;
475                     pts *= av_q2d((*self->video_st)->time_base);
476 
477                     pts = self->synchronize_video(*pFrame, pts);
478 
479                     if(self->queue_picture(*pFrame, pts) < 0)
480                         break;
481                 }
482             }
483         }
484     }
485 
486 private:
487     VideoState* mVideoState;
488     std::thread mThread;
489 };
490 
491 class ParseThread
492 {
493 public:
ParseThread(VideoState * self)494     ParseThread(VideoState* self)
495         : mVideoState(self)
496         , mThread([this] { run(); })
497     {
498     }
499 
~ParseThread()500     ~ParseThread()
501     {
502         mThread.join();
503     }
504 
run()505     void run()
506     {
507         VideoState* self = mVideoState;
508 
509         AVFormatContext *pFormatCtx = self->format_ctx;
510         AVPacket packetData;
511         av_init_packet(&packetData);
512         std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
513 
514         try
515         {
516             if(!self->video_st && !self->audio_st)
517                 throw std::runtime_error("No streams to decode");
518 
519             // main decode loop
520             while(!self->mQuit)
521             {
522                 if(self->mSeekRequested)
523                 {
524                     uint64_t seek_target = self->mSeekPos;
525                     int streamIndex = -1;
526 
527                     int videoStreamIndex = -1;;
528                     int audioStreamIndex = -1;
529                     if (self->video_st)
530                         videoStreamIndex = self->video_st - self->format_ctx->streams;
531                     if (self->audio_st)
532                         audioStreamIndex = self->audio_st - self->format_ctx->streams;
533 
534                     if(videoStreamIndex >= 0)
535                         streamIndex = videoStreamIndex;
536                     else if(audioStreamIndex >= 0)
537                         streamIndex = audioStreamIndex;
538 
539                     uint64_t timestamp = seek_target;
540 
541                     // QtCreator's highlighter doesn't like AV_TIME_BASE_Q's {} initializer for some reason
542                     AVRational avTimeBaseQ = AVRational(); // = AV_TIME_BASE_Q;
543                     avTimeBaseQ.num = 1;
544                     avTimeBaseQ.den = AV_TIME_BASE;
545 
546                     if(streamIndex >= 0)
547                         timestamp = av_rescale_q(seek_target, avTimeBaseQ, self->format_ctx->streams[streamIndex]->time_base);
548 
549                     // AVSEEK_FLAG_BACKWARD appears to be needed, otherwise ffmpeg may seek to a keyframe *after* the given time
550                     // we want to seek to any keyframe *before* the given time, so we can continue decoding as normal from there on
551                     if(av_seek_frame(self->format_ctx, streamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0)
552                     {
553 // In the FFMpeg 4.0 a "filename" field was replaced by "url"
554 #if LIBAVCODEC_VERSION_INT < 3805796
555                         std::cerr << "Error seeking " << self->format_ctx->filename << std::endl;
556 #else
557                         std::cerr << "Error seeking " << self->format_ctx->url << std::endl;
558 #endif
559                     }
560                     else
561                     {
562                         // Clear the packet queues and put a special packet with the new clock time
563                         if(audioStreamIndex >= 0)
564                         {
565                             self->audioq.clear();
566                             flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ,
567                                 self->format_ctx->streams[audioStreamIndex]->time_base);
568                             self->audioq.put(&flush_pkt);
569                         }
570                         if(videoStreamIndex >= 0)
571                         {
572                             self->videoq.clear();
573                             flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ,
574                                 self->format_ctx->streams[videoStreamIndex]->time_base);
575                             self->videoq.put(&flush_pkt);
576                         }
577                         self->pictq_mutex.lock();
578                         self->pictq_size = 0;
579                         self->pictq_rindex = 0;
580                         self->pictq_windex = 0;
581                         self->pictq_mutex.unlock();
582                         self->mExternalClock.set(seek_target);
583                     }
584                     self->mSeekRequested = false;
585                 }
586 
587 
588                 if((self->audio_st && self->audioq.size > MAX_AUDIOQ_SIZE) ||
589                    (self->video_st && self->videoq.size > MAX_VIDEOQ_SIZE))
590                 {
591                     std::this_thread::sleep_for(std::chrono::milliseconds(10));
592                     continue;
593                 }
594 
595                 if(av_read_frame(pFormatCtx, packet.get()) < 0)
596                 {
597                     if (self->audioq.nb_packets == 0 && self->videoq.nb_packets == 0 && self->pictq_size == 0)
598                         self->mVideoEnded = true;
599                     continue;
600                 }
601                 else
602                     self->mVideoEnded = false;
603 
604                 // Is this a packet from the video stream?
605                 if(self->video_st && packet->stream_index == self->video_st-pFormatCtx->streams)
606                     self->videoq.put(packet.get());
607                 else if(self->audio_st && packet->stream_index == self->audio_st-pFormatCtx->streams)
608                     self->audioq.put(packet.get());
609                 else
610                     av_packet_unref(packet.get());
611             }
612         }
613         catch(std::exception& e) {
614             std::cerr << "An error occurred playing the video: " << e.what () << std::endl;
615         }
616 
617         self->mQuit = true;
618     }
619 
620 private:
621     VideoState* mVideoState;
622     std::thread mThread;
623 };
624 
625 
update()626 bool VideoState::update()
627 {
628     this->video_refresh();
629     return !this->mVideoEnded;
630 }
631 
632 
stream_open(int stream_index,AVFormatContext * pFormatCtx)633 int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
634 {
635     const AVCodec *codec;
636 
637     if(stream_index < 0 || stream_index >= static_cast<int>(pFormatCtx->nb_streams))
638         return -1;
639 
640     // Get a pointer to the codec context for the video stream
641     codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
642     if(!codec)
643     {
644         fprintf(stderr, "Unsupported codec!\n");
645         return -1;
646     }
647 
648     switch(pFormatCtx->streams[stream_index]->codecpar->codec_type)
649     {
650     case AVMEDIA_TYPE_AUDIO:
651         this->audio_st = pFormatCtx->streams + stream_index;
652 
653         // Get a pointer to the codec context for the video stream
654         this->audio_ctx = avcodec_alloc_context3(codec);
655         avcodec_parameters_to_context(this->audio_ctx, pFormatCtx->streams[stream_index]->codecpar);
656 
657 // This is not needed anymore above FFMpeg version 4.0
658 #if LIBAVCODEC_VERSION_INT < 3805796
659         av_codec_set_pkt_timebase(this->audio_ctx, pFormatCtx->streams[stream_index]->time_base);
660 #endif
661 
662         if (avcodec_open2(this->audio_ctx, codec, nullptr) < 0)
663         {
664             fprintf(stderr, "Unsupported codec!\n");
665             return -1;
666         }
667 
668         if (!mAudioFactory)
669         {
670             std::cerr << "No audio factory registered, can not play audio stream" << std::endl;
671             avcodec_free_context(&this->audio_ctx);
672             this->audio_st = nullptr;
673             return -1;
674         }
675 
676         mAudioDecoder = mAudioFactory->createDecoder(this);
677         if (!mAudioDecoder.get())
678         {
679             std::cerr << "Failed to create audio decoder, can not play audio stream" << std::endl;
680             avcodec_free_context(&this->audio_ctx);
681             this->audio_st = nullptr;
682             return -1;
683         }
684         mAudioDecoder->setupFormat();
685         break;
686 
687     case AVMEDIA_TYPE_VIDEO:
688         this->video_st = pFormatCtx->streams + stream_index;
689 
690         // Get a pointer to the codec context for the video stream
691         this->video_ctx = avcodec_alloc_context3(codec);
692         avcodec_parameters_to_context(this->video_ctx, pFormatCtx->streams[stream_index]->codecpar);
693 
694 // This is not needed anymore above FFMpeg version 4.0
695 #if LIBAVCODEC_VERSION_INT < 3805796
696         av_codec_set_pkt_timebase(this->video_ctx, pFormatCtx->streams[stream_index]->time_base);
697 #endif
698 
699         if (avcodec_open2(this->video_ctx, codec, nullptr) < 0)
700         {
701             fprintf(stderr, "Unsupported codec!\n");
702             return -1;
703         }
704 
705         this->video_thread.reset(new VideoThread(this));
706         break;
707 
708     default:
709         break;
710     }
711 
712     return 0;
713 }
714 
init(std::shared_ptr<std::istream> inputstream,const std::string & name)715 void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::string &name)
716 {
717     int video_index = -1;
718     int audio_index = -1;
719     unsigned int i;
720 
721     this->av_sync_type = AV_SYNC_DEFAULT;
722     this->mQuit = false;
723 
724     this->stream = inputstream;
725     if(!this->stream.get())
726         throw std::runtime_error("Failed to open video resource");
727 
728     AVIOContext *ioCtx = avio_alloc_context(nullptr, 0, 0, this, istream_read, istream_write, istream_seek);
729     if(!ioCtx) throw std::runtime_error("Failed to allocate AVIOContext");
730 
731     this->format_ctx = avformat_alloc_context();
732     if(this->format_ctx)
733         this->format_ctx->pb = ioCtx;
734 
735     // Open video file
736     ///
737     /// format_ctx->pb->buffer must be freed by hand,
738     /// if not, valgrind will show memleak, see:
739     ///
740     /// https://trac.ffmpeg.org/ticket/1357
741     ///
742     if(!this->format_ctx || avformat_open_input(&this->format_ctx, name.c_str(), nullptr, nullptr))
743     {
744         if (this->format_ctx != nullptr)
745         {
746           if (this->format_ctx->pb != nullptr)
747           {
748               av_freep(&this->format_ctx->pb->buffer);
749 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 80, 100)
750               avio_context_free(&this->format_ctx->pb);
751 #else
752               av_freep(&this->format_ctx->pb);
753 #endif
754           }
755         }
756         // "Note that a user-supplied AVFormatContext will be freed on failure."
757         this->format_ctx = nullptr;
758 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 80, 100)
759         avio_context_free(&ioCtx);
760 #else
761         av_freep(&ioCtx);
762 #endif
763         throw std::runtime_error("Failed to open video input");
764     }
765 
766     // Retrieve stream information
767     if(avformat_find_stream_info(this->format_ctx, nullptr) < 0)
768         throw std::runtime_error("Failed to retrieve stream information");
769 
770     // Dump information about file onto standard error
771     av_dump_format(this->format_ctx, 0, name.c_str(), 0);
772 
773     for(i = 0;i < this->format_ctx->nb_streams;i++)
774     {
775         if(this->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
776             video_index = i;
777         if(this->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
778             audio_index = i;
779     }
780 
781     mExternalClock.set(0);
782 
783     if(audio_index >= 0)
784         this->stream_open(audio_index, this->format_ctx);
785 
786     if(video_index >= 0)
787     {
788         this->stream_open(video_index, this->format_ctx);
789     }
790 
791 
792     this->parse_thread.reset(new ParseThread(this));
793 }
794 
deinit()795 void VideoState::deinit()
796 {
797     this->mQuit = true;
798 
799     this->audioq.flush();
800     this->videoq.flush();
801 
802     mAudioDecoder.reset();
803 
804     if (this->parse_thread.get())
805     {
806         this->parse_thread.reset();
807     }
808     if (this->video_thread.get())
809     {
810         this->video_thread.reset();
811     }
812 
813     if(this->audio_ctx)
814         avcodec_free_context(&this->audio_ctx);
815     this->audio_st = nullptr;
816     this->audio_ctx = nullptr;
817     if(this->video_ctx)
818         avcodec_free_context(&this->video_ctx);
819     this->video_st = nullptr;
820     this->video_ctx = nullptr;
821 
822     if(this->sws_context)
823         sws_freeContext(this->sws_context);
824     this->sws_context = nullptr;
825 
826     if(this->format_ctx)
827     {
828         ///
829         /// format_ctx->pb->buffer must be freed by hand,
830         /// if not, valgrind will show memleak, see:
831         ///
832         /// https://trac.ffmpeg.org/ticket/1357
833         ///
834         if (this->format_ctx->pb != nullptr)
835         {
836             av_freep(&this->format_ctx->pb->buffer);
837 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 80, 100)
838             avio_context_free(&this->format_ctx->pb);
839 #else
840             av_freep(&this->format_ctx->pb);
841 #endif
842         }
843         avformat_close_input(&this->format_ctx);
844     }
845 
846     if (mTexture)
847     {
848         // reset Image separately, it's pointing to *this and there might still be outside references to mTexture
849         mTexture->setImage(nullptr);
850         mTexture = nullptr;
851     }
852 
853     // Dellocate RGBA frame queue.
854     for (std::size_t i = 0; i < VIDEO_PICTURE_ARRAY_SIZE; ++i)
855         this->pictq[i].rgbaFrame = nullptr;
856 
857 }
858 
get_external_clock()859 double VideoState::get_external_clock()
860 {
861     return mExternalClock.get() / 1000000.0;
862 }
863 
get_master_clock()864 double VideoState::get_master_clock()
865 {
866     if(this->av_sync_type == AV_SYNC_VIDEO_MASTER)
867         return this->get_video_clock();
868     if(this->av_sync_type == AV_SYNC_AUDIO_MASTER)
869         return this->get_audio_clock();
870     return this->get_external_clock();
871 }
872 
get_video_clock()873 double VideoState::get_video_clock()
874 {
875     return this->frame_last_pts;
876 }
877 
get_audio_clock()878 double VideoState::get_audio_clock()
879 {
880     if (!mAudioDecoder.get())
881         return 0.0;
882     return mAudioDecoder->getAudioClock();
883 }
884 
setPaused(bool isPaused)885 void VideoState::setPaused(bool isPaused)
886 {
887     this->mPaused = isPaused;
888     mExternalClock.setPaused(isPaused);
889 }
890 
seekTo(double time)891 void VideoState::seekTo(double time)
892 {
893     time = std::max(0.0, time);
894     time = std::min(getDuration(), time);
895     mSeekPos = (uint64_t) (time * AV_TIME_BASE);
896     mSeekRequested = true;
897 }
898 
getDuration()899 double VideoState::getDuration()
900 {
901     return this->format_ctx->duration / 1000000.0;
902 }
903 
904 
ExternalClock()905 ExternalClock::ExternalClock()
906     : mTimeBase(av_gettime())
907     , mPausedAt(0)
908     , mPaused(false)
909 {
910 }
911 
setPaused(bool paused)912 void ExternalClock::setPaused(bool paused)
913 {
914     std::lock_guard<std::mutex> lock(mMutex);
915     if (mPaused == paused)
916         return;
917     if (paused)
918     {
919         mPausedAt = av_gettime() - mTimeBase;
920     }
921     else
922         mTimeBase = av_gettime() - mPausedAt;
923     mPaused = paused;
924 }
925 
get()926 uint64_t ExternalClock::get()
927 {
928     std::lock_guard<std::mutex> lock(mMutex);
929     if (mPaused)
930         return mPausedAt;
931     else
932         return av_gettime() - mTimeBase;
933 }
934 
set(uint64_t time)935 void ExternalClock::set(uint64_t time)
936 {
937     std::lock_guard<std::mutex> lock(mMutex);
938     mTimeBase = av_gettime() - time;
939     mPausedAt = time;
940 }
941 
942 }
943 
944