1 // MediaParser.h: Base class for media parsers
2 //
3 //   Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016
4 //   Free Software Foundation, Inc.
5 //
6 // This program is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 3 of the License, or
9 // (at your option) any later version.
10 //
11 // This program is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 //
16 // You should have received a copy of the GNU General Public License
17 // along with this program; if not, write to the Free Software
18 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19 
20 #ifndef GNASH_MEDIAPARSER_H
21 #define GNASH_MEDIAPARSER_H
22 
23 #include <atomic>
24 #include <thread>
25 #include <mutex>
26 #include <condition_variable>
27 #include <memory>
28 #include <deque>
29 #include <map>
30 #include <vector>
31 #include <iostream> // for output operator
32 #include <boost/optional.hpp>
33 
34 #include "IOChannel.h" // for inlines
35 #include "dsodefs.h" // DSOEXPORT
36 
37 // Undefine this to load/parse media files in main thread
38 #define LOAD_MEDIA_IN_A_SEPARATE_THREAD 1
39 
40 namespace gnash {
41     class SimpleBuffer;
42     namespace media {
43         struct Id3Info;
44     }
45 }
46 
47 namespace gnash {
48 namespace media {
49 
50 
51 /// Video frame types
52 enum videoFrameType
53 {
54 	/// Key frames
55 	KEY_FRAME = 1,
56 
57 	/// Interlaced frames
58 	INTER_FRAME = 2,
59 
60 	/// Disposable interlaced frames
61 	DIS_INTER_FRAME = 3
62 };
63 
64 /// The type of the codec id passed in the AudioInfo or VideoInfo class
65 enum codecType
66 {
67 	/// The internal flash codec ids
68 	CODEC_TYPE_FLASH,
69 
70 	/// Custom codecs ids
71 	CODEC_TYPE_CUSTOM
72 };
73 
74 /// Video codec ids as defined in flash
75 enum videoCodecType
76 {
77         /// No video codec
78         NO_VIDEO_CODEC = 0,
79 
80 	/// H263/SVQ3 video codec
81 	VIDEO_CODEC_H263 = 2,
82 
83 	/// Screenvideo codec
84 	VIDEO_CODEC_SCREENVIDEO = 3,
85 
86 	/// On2 VP6 video codec
87 	VIDEO_CODEC_VP6 = 4,
88 
89 	/// On2 VP6 Alpha video codec
90 	VIDEO_CODEC_VP6A = 5,
91 
92 	/// Screenvideo2 codec
93 	VIDEO_CODEC_SCREENVIDEO2 = 6,
94 
95 	/// MPEG-4 Part 10, or Advanced Video Coding
96 	VIDEO_CODEC_H264 = 7
97 
98 	// NOTE: if you add more elements here remember to
99 	//       also add them to the output operator!
100 };
101 
102 DSOEXPORT std::ostream& operator<< (std::ostream& os, const videoCodecType& t);
103 
104 /// Audio codec ids as defined in flash
105 //
106 /// For some encodings, audio data is organized
107 /// in logical frames. The structure of such frames
108 /// (header/payload) is codec dependent.
109 /// The actual size of each frame may not be known
110 /// w/out parsing the encoded stream, as it
111 /// might be specified in the header of each frame.
112 ///
113 /// Other encodings are loosier on frames. For these
114 /// you can define a frame any way you want, as long
115 /// as a frame doesn't contain partial samples.
116 ///
117 /// For FFMPEG, you can NOT construct a parser for the
118 /// loosy-framed codecs.
119 ///
120 /// Parser-needing codecs will be documented as such.
121 ///
122 enum audioCodecType
123 {
124 	/// Signed Linear PCM, unspecified byte order
125    	//
126    	/// Use of this codec is deprecated (but still supported) due to
127    	/// the unspecified byte order (you can only play >8bit samples
128    	/// in a sane way when the endiannes of encoding and decoding
129    	/// hosts match).
130    	///
131    	/// 90% of the times the actual encoder did run on windows, so
132    	/// it is a good bet to guess for little-endian.
133    	/// SampleSize may be 8 or 16 bits.
134     ///
135 	AUDIO_CODEC_RAW = 0,
136 
137 	/// ADPCM format
138     //
139 	/// SWF support 2, 3, 4, and 5 bits / sample.
140 	/// ADPCM "frames" consits of 4096 ADPCM codes per channel.
141 	///
142 	/// For streaming there is no concept of "seekSamples" like
143 	/// MP3 streaming implements. Thus ADPCM ist suboptimal for
144 	/// streaming as it is difficult to match sound frames with
145 	/// movie frames.
146    	/// Uncompressed SampleSize is always 16 bit.
147     ///
148 	AUDIO_CODEC_ADPCM = 1,
149 
150 	/// MP3 format
151    	//
152    	/// MP3 is supported for SWF4 and later.
153    	/// MP3 sound is structured in frames consiting of  a fixed sized
154    	/// header (32Bit) and compressed sound data. Compressed sound
155    	/// data always contains a fixed number of sound samples (576 or 1152).
156    	/// For streaming sound an additional field is necessary (seekSamples)
157    	/// to keep track of samples exceeding movie frame border.
158    	///
159    	/// MP3 header contains all necessary information to decode a single
160    	/// frame. From this information one can derive the number of samples
161    	/// and the frame's size.
162    	/// Uncompressed SampleSize is always 16 bit.
163     ///
164 	AUDIO_CODEC_MP3 = 2,
165 
166 	/// Linear PCM, strictly little-endian
167 	AUDIO_CODEC_UNCOMPRESSED = 3,
168 
169 	/// Proprietary simple format. Always 5Khz mono ?
170     //
171 	/// SWF6 and later.
172 	/// Data is organized in frames of 256 samples.
173     ///
174 	AUDIO_CODEC_NELLYMOSER_8HZ_MONO = 5,
175 
176 	/// Proprietary simple format
177     //
178 	/// SWF6 and later.
179 	/// Data is organized in frames of 256 samples.
180     ///
181 	AUDIO_CODEC_NELLYMOSER = 6,
182 
183 	/// Advanced Audio Coding
184 	AUDIO_CODEC_AAC = 10,
185 
186 	/// Always 16kHz mono
187 	AUDIO_CODEC_SPEEX = 11
188 
189 	// NOTE: if you add more elements here remember to
190 	//       also add them to the output operator!
191 };
192 
193 inline std::ostream&
194 operator<< (std::ostream& os, const audioCodecType& t)
195 {
196     switch (t)
197     {
198         case AUDIO_CODEC_RAW:
199             os << "Raw";
200             break;
201         case AUDIO_CODEC_ADPCM:
202             os << "ADPCM";
203             break;
204         case AUDIO_CODEC_MP3:
205             os << "MP3";
206             break;
207         case AUDIO_CODEC_UNCOMPRESSED:
208             os << "Uncompressed";
209             break;
210         case AUDIO_CODEC_NELLYMOSER_8HZ_MONO:
211             os << "Nellymoser 8Hz mono";
212             break;
213         case AUDIO_CODEC_NELLYMOSER:
214             os << "Nellymoser";
215             break;
216         case AUDIO_CODEC_AAC:
217             os << "Advanced Audio Coding";
218             break;
219         case AUDIO_CODEC_SPEEX:
220             os << "Speex";
221             break;
222         default:
223             os << "unknown/invalid codec " << static_cast<int>(t);
224             break;
225     }
226     return os;
227 }
228 
229 /// Information about an audio stream
230 //
231 /// The information stored is codec-id,
232 /// samplerate, samplesize, stereo, duration and codec-type.
233 ///
234 /// Additionally, an abstract ExtraInfo can be hold.
235 ///
236 class AudioInfo
237 {
238 
239 public:
240 
241     /// Construct an AudioInfo object
242     //
243     /// @param codeci
244     ///     Audio codec id.
245     ///     To be interpreted as a media::audioCodecType if the typei
246     ///     parameter is CODEC_TYPE_FLASH; otherwise it's an opaque number to use
247     ///     for codec information transfer between a MediaParser and a
248     ///     AudioDecoder from the same %media handler module.
249     ///
250     /// @param sampleRatei
251     ///     Nominal sample rate.
252     ///     @todo document units.
253     ///
254     /// @param sampleSizei
255     ///     Sample size, in bytes.
256     ///
257     /// @param stereoi
258     ///     Sample type (stereo if true, mono otherwise).
259     ///     @todo document if and how intepretation of sampleSizei changes
260     ///
261     /// @param durationi
262     ///     Nominal audio stream duration, in milliseconds.
263     ///
264     /// @param typei
265     ///     Changes interpretation of the codeci parameter.
266     ///
AudioInfo(int codeci,std::uint16_t sampleRatei,std::uint16_t sampleSizei,bool stereoi,std::uint64_t durationi,codecType typei)267 	AudioInfo(int codeci, std::uint16_t sampleRatei,
268             std::uint16_t sampleSizei, bool stereoi,
269             std::uint64_t durationi, codecType typei)
270 		:
271         codec(codeci),
272 		sampleRate(sampleRatei),
273 		sampleSize(sampleSizei),
274 		stereo(stereoi),
275 		duration(durationi),
276 		type(typei)
277 		{
278 		}
279 
280 	/// Codec identifier
281 	//
282 	/// This has to be interpreted as audioCodecType if codecType type is CODEC_TYPE_FLASH
283 	/// or interpretation is opaque and we rely on the assumption that the AudioInfo
284 	/// creator and the AudioInfo user have a way to get a shared interpretation
285 	///
286 	int codec;
287 
288 	std::uint16_t sampleRate;
289 
290 	/// Size of each sample, in bytes
291 	std::uint16_t sampleSize;
292 
293 	bool stereo;
294 
295 	std::uint64_t duration;
296 
297 	codecType type;
298 
299 	/// Extra info about an audio stream
300     //
301 	/// Abstract class to hold any additional info
302 	/// when required for proper decoder initialization.
303     ///
304 	class ExtraInfo {
305 	public:
~ExtraInfo()306 		virtual ~ExtraInfo() {}
307 	};
308 
309 	/// Extra info about audio stream, if when needed
310     //
311     /// Could be ExtraVideoInfoFlv or a media-handler specific info
312     ///
313 	std::unique_ptr<ExtraInfo> extra;
314 };
315 
316 /// Information about a video stream
317 //
318 /// The information stored is codec-id, width, height, framerate and duration.
319 ///
320 /// Additionally, an abstract ExtraInfo can be hold.
321 ///
322 class VideoInfo
323 {
324 public:
325 
326     /// Construct a VideoInfo object
327     //
328     /// @param codeci
329     ///     Video codec id.
330     ///     To be interpreted as a media::videoCodecType if the typei
331     ///     parameter is CODEC_TYPE_FLASH; otherwise it's an opaque number to use
332     ///     for codec information transfer between a MediaParser and a
333     ///     VideoDecoder from the same %media handler module.
334     ///
335     /// @param widthi
336     ///     Video frame width.
337     ///     @todo check if still needed.
338     ///
339     /// @param heighti
340     ///     Video frame height.
341     ///     @todo check if still needed.
342     ///
343     /// @param frameRatei
344     ///     Nominal video frame rate.
345     ///     @todo document units.
346     ///
347     /// @param durationi
348     ///     Nominal video duration.
349     ///     @todo check if still needed, if so document units!
350     ///
351     /// @param typei
352     ///     Changes interpretation of the codeci parameter.
353     ///
VideoInfo(int codeci,std::uint16_t widthi,std::uint16_t heighti,std::uint16_t frameRatei,std::uint64_t durationi,codecType typei)354 	VideoInfo(int codeci, std::uint16_t widthi, std::uint16_t heighti,
355             std::uint16_t frameRatei, std::uint64_t durationi,
356             codecType typei)
357 		:
358         codec(codeci),
359 		width(widthi),
360 		height(heighti),
361 		frameRate(frameRatei),
362 		duration(durationi),
363 		type(typei)
364 	{
365 	}
366 
367 	int codec;
368 	std::uint16_t width;
369 	std::uint16_t height;
370 	std::uint16_t frameRate;
371 	std::uint64_t duration;
372 	codecType type;
373 
374 	/// Extra info about a video stream
375     //
376 	/// Abstract class to hold any additional info
377 	/// when required for proper decoder initialization
378     ///
379 	class ExtraInfo {
380 	public:
~ExtraInfo()381 		virtual ~ExtraInfo() {}
382 	};
383 
384 	/// Extra info about video stream, if when needed
385     //
386     /// Could be ExtraAudioInfoFlv or a media-handler specific info
387     ///
388 	std::unique_ptr<ExtraInfo> extra;
389 };
390 
391 DSOEXPORT std::ostream& operator << (std::ostream& os, const VideoInfo& vi);
392 
393 
394 class EncodedExtraData {
395 
396 public:
~EncodedExtraData()397 	virtual ~EncodedExtraData() {}
398 
399 };
400 
401 /// An encoded video frame
402 class EncodedVideoFrame
403 {
404 public:
405 
406 	/// Create an encoded video frame
407 	//
408 	/// @param data
409 	///     Data buffer, ownership transferred
410 	///
411 	/// @param size
412 	///     Size of the data buffer
413 	///
414 	/// @param frameNum
415 	///     Frame number.
416 	///
417 	/// @param timestamp
418 	///     Presentation timestamp, in milliseconds.
419 	///
420 	EncodedVideoFrame(std::uint8_t* data, std::uint32_t size,
421 			unsigned int frameNum,
422 			std::uint64_t timestamp=0)
423 		:
_size(size)424 		_size(size),
425 		_data(data),
426 		_frameNum(frameNum),
427 		_timestamp(timestamp)
428 	{}
429 
430 	/// Return pointer to actual data. Ownership retained by this class.
data()431 	const std::uint8_t* data() const { return _data.get(); }
432 
433 	/// Return size of data buffer.
dataSize()434 	std::uint32_t dataSize() const { return _size; }
435 
436 	/// Return video frame presentation timestamp
timestamp()437 	std::uint64_t timestamp() const { return _timestamp; }
438 
439 	/// Return video frame number
frameNum()440 	unsigned frameNum() const { return _frameNum; }
441 
442 	// FIXME: should have better encapsulation for this sort of stuff.
443 	std::unique_ptr<EncodedExtraData> extradata;
444 private:
445 
446 	std::uint32_t _size;
447 	std::unique_ptr<std::uint8_t[]> _data;
448 	unsigned int _frameNum;
449 	std::uint64_t _timestamp;
450 };
451 
452 /// An encoded audio frame
453 class EncodedAudioFrame
454 {
455 public:
456 	std::uint32_t dataSize;
457 	std::unique_ptr<std::uint8_t[]> data;
458 	std::uint64_t timestamp;
459 
460 	// FIXME: should have better encapsulation for this sort of stuff.
461 	std::unique_ptr<EncodedExtraData> extradata;
462 };
463 
464 /// The MediaParser class provides cursor-based access to encoded %media frames
465 //
466 /// Cursor-based access allow seeking as close as possible to a specified time
467 /// and fetching frames from there on, sequentially.
468 /// See seek(), nextVideoFrame(), nextAudioFrame()
469 ///
470 /// Input is received from a IOChannel object.
471 ///
472 class DSOEXPORT MediaParser
473 {
474 public:
475 
476     /// A container for executable MetaTags contained in media streams.
477     //
478     /// Presently only known in FLV.
479     typedef std::multimap<std::uint64_t, std::shared_ptr<SimpleBuffer> >
480         MetaTags;
481 
482     typedef std::vector<MetaTags::mapped_type> OrderedMetaTags;
483 
484         MediaParser(std::unique_ptr<IOChannel> stream);
485 
486 	// Classes with virtual methods (virtual classes)
487 	// must have a virtual destructor, or the destructors
488 	// of subclasses will never be invoked, tipically resulting
489 	// in memory leaks..
490 	//
491 	virtual ~MediaParser();
492 
493 	/// \brief
494 	/// Seeks to the closest possible position the given position,
495 	/// and returns the new position.
496 	//
497 	///
498 	/// @param time input/output parameter, input requests a time, output
499 	///        return the actual time seeked to.
500 	///
501 	/// @return true if the seek was valid, false otherwise.
502 	///
503 	virtual bool seek(std::uint32_t& time)=0;
504 
505 	/// Returns mininum length of available buffers in milliseconds
506 	//
507 	/// TODO: FIXME: NOTE: this is currently used by NetStream.bufferLength
508 	/// but is bogus as it doesn't take the *current* playhead cursor time
509 	/// into account. A proper way would be having a  getLastBufferTime ()
510 	/// interface here, returning minimun timestamp of last available
511 	/// frames and let NetSTream::bufferLength() use that with playhead
512 	/// time to find out...
513 	///
514 	DSOEXPORT std::uint64_t getBufferLength() const;
515 
516 	/// Return true if both audio and video buffers are empty
517 	//
518 	/// NOTE: locks _qMutex
519 	DSOEXPORT bool isBufferEmpty() const;
520 
521 	/// Return the time we want the parser thread to maintain in the buffer
getBufferTime()522 	DSOEXPORT std::uint_fast64_t getBufferTime() const
523 	{
524 		return _bufferTime.load();
525 	}
526 
527 	/// Set the time we want the parser thread to maintain in the buffer
528 	//
529 	/// @param t
530 	///	Number of milliseconds to keep in the buffers.
531 	///
setBufferTime(std::uint_fast64_t t)532 	DSOEXPORT void setBufferTime(std::uint_fast64_t t)
533 	{
534 		_bufferTime=t;
535 	}
536 
537 	/// Get timestamp of the next frame available, if any
538 	//
539 	/// @param ts will be set to timestamp of next available frame
540 	/// @return false if no frame is available yet
541 	///
542 	/// NOTE: locks _qMutex
543 	///
544 	DSOEXPORT bool nextFrameTimestamp(std::uint64_t& ts) const;
545 
546 	/// Get timestamp of the video frame which would be returned on nextVideoFrame
547 	//
548 	/// @return false if there no video frame left
549 	///         (either none or no more)
550 	///
551 	/// NOTE: locks _qMutex
552 	///
553 	DSOEXPORT bool nextVideoFrameTimestamp(std::uint64_t& ts) const;
554 
555 	/// Returns the next video frame in the parsed buffer, advancing video cursor.
556 	//
557 	/// If no frame has been played before the first frame is returned.
558 	/// If there is no more frames in the parsed buffer NULL is returned.
559 	/// you can check with parsingCompleted() to know wheter this is due to
560 	/// EOF reached.
561 	///
562 	DSOEXPORT std::unique_ptr<EncodedVideoFrame> nextVideoFrame();
563 
564 	/// Get timestamp of the audio frame which would be returned on nextAudioFrame
565 	//
566 	/// @return false if there no video frame left
567 	///         (either none or no more)
568 	///
569 	/// NOTE: locks _qMutex
570 	///
571 	DSOEXPORT bool nextAudioFrameTimestamp(std::uint64_t& ts) const;
572 
573 	/// Returns the next audio frame in the parsed buffer, advancing audio cursor.
574 	//
575 	/// If no frame has been played before the first frame is returned.
576 	/// If there is no more frames in the parsed buffer NULL is returned.
577 	/// you can check with parsingCompleted() to know wheter this is due to
578 	/// EOF reached.
579 	///
580 	DSOEXPORT std::unique_ptr<EncodedAudioFrame> nextAudioFrame();
581 
582 	/// Returns a VideoInfo class about the videostream
583 	//
584 	/// @return a VideoInfo class about the videostream,
585 	///         or zero if unknown (no video or not enough data parsed yet).
586 	///
getVideoInfo()587 	VideoInfo* getVideoInfo() { return _videoInfo.get(); }
588 
589 	/// Returns a AudioInfo class about the audiostream
590 	//
591 	/// @return a AudioInfo class about the audiostream,
592 	///         or zero if unknown (no audio or not enough data parsed yet).
593 	///
getAudioInfo()594 	AudioInfo* getAudioInfo() { return _audioInfo.get(); }
595 
596 	/// Return true of parsing is completed
597 	//
598 	/// If this function returns true, any call to nextVideoFrame()
599 	/// or nextAudioFrame() will always return NULL
600 	///
601 	/// TODO: make thread-safe
602 	///
parsingCompleted()603 	bool parsingCompleted() const { return _parsingComplete; }
604 
605 	/// Return true of indexing is completed
606 	//
607 	/// If this function returns false, parseNextChunk will
608 	/// be called even when buffers are full. Parsers
609 	/// supporting indexing separated from parsing should
610 	/// override this method and have parseNextChunk figure
611 	/// if they only need to index or to parse based on bufferFull.
612 	///
indexingCompleted()613 	virtual bool indexingCompleted() const { return true; }
614 
615 	/// Return number of bytes parsed so far
getBytesLoaded()616 	virtual std::uint64_t getBytesLoaded() const { return 0; }
617 
618 	/// Return total number of bytes in input
getBytesTotal()619 	std::uint64_t getBytesTotal() const
620 	{
621 		return _stream->size();
622 	}
623 
624 	/// Parse next chunk of input
625 	//
626 	/// The implementations are required to parse a small chunk
627 	/// of input, so to avoid blocking too much if parsing conditions
628 	/// change (ie: seek or destruction requested)
629 	///
630 	/// When LOAD_MEDIA_IN_A_SEPARATE_THREAD is defined, this should
631 	/// never be called by users (consider protected).
632 	///
633 	virtual bool parseNextChunk()=0;
634 
635     /// Retrieve any parsed metadata tags up to a specified timestamp.
636     //
637     /// @param ts   The latest timestamp to retrieve metadata for.
638     /// @param tags This is filled with shared pointers to metatags in
639     ///             timestamp order. Ownership of the data is shared. It
640     ///             is destroyed automatically along with the last owner.
641     //
642     /// Metadata is currently only parsed from FLV streams. The default
643     /// is a no-op.
644     virtual void fetchMetaTags(OrderedMetaTags& tags, std::uint64_t ts);
645 
646     /// Get ID3 data from the parsed stream if it exists.
647     //
648     /// It's best to do this only when parsingComplete is true.
649     virtual boost::optional<Id3Info> getId3Info() const;
650 
651 protected:
652 
653 	/// Subclasses *must* set the following variables: @{
654 
655 	/// Info about the video stream (if any)
656 	std::unique_ptr<VideoInfo> _videoInfo;
657 
658 	/// Info about the audio stream (if any)
659 	std::unique_ptr<AudioInfo> _audioInfo;
660 
661 	/// Whether the parsing is complete or not
662 	bool _parsingComplete;
663 
664 	/// Number of bytes loaded
665 	std::atomic<std::uint_fast64_t> _bytesLoaded;
666 
667 	/// }@
668 
669 	/// Start the parser thread
670 	void startParserThread();
671 
672 	/// Stop the parser thread
673 	//
674 	/// This method should be always called
675 	/// by destructors of subclasses to ensure
676 	/// the parser thread won't attempt to access
677 	/// destroyed structures.
678 	///
679 	void stopParserThread();
680 
681 	/// Clear the a/v buffers
682 	void clearBuffers();
683 
684 	/// Push an encoded audio frame to buffer.
685 	//
686 	/// Will wait on a condition if buffer is full or parsing was completed
687 	///
688 	void pushEncodedAudioFrame(std::unique_ptr<EncodedAudioFrame> frame);
689 
690 	/// Push an encoded video frame to buffer.
691 	//
692 	/// Will wait on a condition if buffer is full or parsing was completed
693 	///
694 	void pushEncodedVideoFrame(std::unique_ptr<EncodedVideoFrame> frame);
695 
696 	/// The stream used to access the file
697 	std::unique_ptr<IOChannel> _stream;
698 	mutable std::mutex _streamMutex;
699 
700 	/// The parser loop runs in a separate thread
701 	/// and calls parseNextChunk until killed.
702 	///
703 	/// parseNextChunk is expected to push encoded frames
704 	/// on the queue, which may trigger the thread to be
705 	/// put to sleep when queues are full or parsing
706 	/// was completed.
707 	///
708 	void parserLoop();
709 
parserThreadKillRequested()710 	bool parserThreadKillRequested() const
711 	{
712 		return _parserThreadKillRequested.load();
713 	}
714 
715         std::atomic<std::uint_fast64_t> _bufferTime;
716 
717 	std::thread _parserThread;
718 	std::atomic<bool> _parserThreadKillRequested;
719 	std::condition_variable _parserThreadWakeup;
720 
721 	/// Wait on the _parserThreadWakeup condition if buffer is full
722 	/// or parsing was completed.
723 	///
724 	/// Callers *must* pass a locked lock on _qMutex
725 	///
726 	void waitIfNeeded(std::unique_lock<std::mutex>& qMutexLock);
727 
728 	void wakeupParserThread();
729 
730 	/// mutex protecting access to the a/v encoded frames queues
731 	mutable std::mutex _qMutex;
732 
733 	/// Method to check if buffer is full w/out locking the _qMutex
734 	//
735 	///
736 	/// This is intended for being called by waitIfNeeded, which
737 	/// is passed a locked lock on _qMutex, and by parseNextChunk
738 	/// to determine whether to index-only or also push on queue.
739 	///
740 	bool bufferFull() const;
741 
742 	/// On seek, this flag will be set, while holding a lock on _streamMutex.
743 	/// The parser, when obtained a lock on _streamMutex, will check this
744 	/// flag, if found to be true will clear the buffers and reset to false.
745 	bool _seekRequest;
746 
747 private:
748 
749 	typedef std::deque<std::unique_ptr<EncodedVideoFrame>> VideoFrames;
750 	typedef std::deque<std::unique_ptr<EncodedAudioFrame>> AudioFrames;
751 
752 	/// Return pointer to next encoded video frame in buffer
753 	//
754 	/// If no video is present, or queue is empty, 0 is returned
755 	///
756 	/// NOTE: Caller is expected to hold a lock on _qMutex
757 	///
758 	const EncodedVideoFrame* peekNextVideoFrame() const;
759 
760 	/// Return pointer to next encoded audio frame in buffer
761 	//
762 	/// If no video is present, or queue is empty, 0 is returned
763 	///
764 	/// NOTE: Caller is expected to hold a lock on _qMutex
765 	///
766 	const EncodedAudioFrame* peekNextAudioFrame() const;
767 
768 
769 	/// Queue of video frames (the video buffer)
770 	//
771 	/// Elements owned by this class.
772 	///
773 	VideoFrames _videoFrames;
774 
775 	/// Queue of audio frames (the audio buffer)
776 	//
777 	/// Elements owned by this class.
778 	///
779 	AudioFrames _audioFrames;
780 
requestParserThreadKill()781 	void requestParserThreadKill()
782 	{
783 		_parserThreadKillRequested=true;
784 		_parserThreadWakeup.notify_all();
785 	}
786 
787 	/// Return diff between timestamp of last and first audio frame
788 	std::uint64_t audioBufferLength() const;
789 
790 	/// Return diff between timestamp of last and first video frame
791 	std::uint64_t videoBufferLength() const;
792 
793 	/// A getBufferLength method not locking the _qMutex (expected to be locked by caller already).
794 	std::uint64_t getBufferLengthNoLock() const;
795 
796 };
797 
798 
799 } // gnash.media namespace
800 } // namespace gnash
801 
802 #endif // __MEDIAPARSER_H__
803