1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "media/formats/mp4/mp4_stream_parser.h"
6 
7 #include <stddef.h>
8 
9 #include <limits>
10 #include <memory>
11 #include <utility>
12 #include <vector>
13 
14 #include "base/callback_helpers.h"
15 #include "base/logging.h"
16 #include "base/numerics/math_constants.h"
17 #include "base/strings/string_number_conversions.h"
18 #include "base/time/time.h"
19 #include "build/build_config.h"
20 #include "media/base/audio_decoder_config.h"
21 #include "media/base/encryption_pattern.h"
22 #include "media/base/encryption_scheme.h"
23 #include "media/base/media_tracks.h"
24 #include "media/base/media_util.h"
25 #include "media/base/stream_parser_buffer.h"
26 #include "media/base/text_track_config.h"
27 #include "media/base/timestamp_constants.h"
28 #include "media/base/video_decoder_config.h"
29 #include "media/base/video_util.h"
30 #include "media/formats/mp4/box_definitions.h"
31 #include "media/formats/mp4/box_reader.h"
32 #include "media/formats/mp4/es_descriptor.h"
33 #include "media/formats/mp4/rcheck.h"
34 #include "media/formats/mpeg/adts_constants.h"
35 
36 namespace media {
37 namespace mp4 {
38 
39 namespace {
40 
41 const int kMaxEmptySampleLogs = 20;
42 const int kMaxInvalidConversionLogs = 20;
43 const int kMaxVideoKeyframeMismatchLogs = 10;
44 
45 // Caller should be prepared to handle return of EncryptionScheme::kUnencrypted
46 // in case of unsupported scheme.
GetEncryptionScheme(const ProtectionSchemeInfo & sinf)47 EncryptionScheme GetEncryptionScheme(const ProtectionSchemeInfo& sinf) {
48   if (!sinf.HasSupportedScheme())
49     return EncryptionScheme::kUnencrypted;
50   FourCC fourcc = sinf.type.type;
51   switch (fourcc) {
52     case FOURCC_CENC:
53       return EncryptionScheme::kCenc;
54     case FOURCC_CBCS:
55       return EncryptionScheme::kCbcs;
56     default:
57       NOTREACHED();
58       break;
59   }
60   return EncryptionScheme::kUnencrypted;
61 }
62 
ConvertColorParameterInformationToColorSpace(const ColorParameterInformation & info)63 VideoColorSpace ConvertColorParameterInformationToColorSpace(
64     const ColorParameterInformation& info) {
65   auto primary_id =
66       static_cast<VideoColorSpace::PrimaryID>(info.colour_primaries);
67   auto transfer_id =
68       static_cast<VideoColorSpace::TransferID>(info.transfer_characteristics);
69   auto matrix_id =
70       static_cast<VideoColorSpace::MatrixID>(info.matrix_coefficients);
71 
72   // Note that we don't check whether the embedded ids are valid.  We rely on
73   // the underlying video decoder to reject any ids that it doesn't support.
74   return VideoColorSpace(primary_id, transfer_id, matrix_id,
75                          info.full_range ? gfx::ColorSpace::RangeID::FULL
76                                          : gfx::ColorSpace::RangeID::LIMITED);
77 }
78 
ConvertMdcvToMasteringMetadata(const MasteringDisplayColorVolume & mdcv)79 MasteringMetadata ConvertMdcvToMasteringMetadata(
80     const MasteringDisplayColorVolume& mdcv) {
81   MasteringMetadata mastering_metadata;
82 
83   mastering_metadata.primary_r = MasteringMetadata::Chromaticity(
84       mdcv.display_primaries_rx, mdcv.display_primaries_ry);
85   mastering_metadata.primary_g = MasteringMetadata::Chromaticity(
86       mdcv.display_primaries_gx, mdcv.display_primaries_gy);
87   mastering_metadata.primary_b = MasteringMetadata::Chromaticity(
88       mdcv.display_primaries_bx, mdcv.display_primaries_by);
89   mastering_metadata.white_point =
90       MasteringMetadata::Chromaticity(mdcv.white_point_x, mdcv.white_point_y);
91 
92   mastering_metadata.luminance_max =
93       static_cast<float>(mdcv.max_display_mastering_luminance);
94   mastering_metadata.luminance_min =
95       static_cast<float>(mdcv.min_display_mastering_luminance);
96 
97   return mastering_metadata;
98 }
99 
100 }  // namespace
101 
MP4StreamParser(const std::set<int> & audio_object_types,bool has_sbr,bool has_flac)102 MP4StreamParser::MP4StreamParser(const std::set<int>& audio_object_types,
103                                  bool has_sbr,
104                                  bool has_flac)
105     : state_(kWaitingForInit),
106       moof_head_(0),
107       mdat_tail_(0),
108       highest_end_offset_(0),
109       has_audio_(false),
110       has_video_(false),
111       audio_object_types_(audio_object_types),
112       has_sbr_(has_sbr),
113       has_flac_(has_flac),
114       num_empty_samples_skipped_(0),
115       num_invalid_conversions_(0),
116       num_video_keyframe_mismatches_(0) {}
117 
118 MP4StreamParser::~MP4StreamParser() = default;
119 
Init(InitCB init_cb,const NewConfigCB & config_cb,const NewBuffersCB & new_buffers_cb,bool,const EncryptedMediaInitDataCB & encrypted_media_init_data_cb,const NewMediaSegmentCB & new_segment_cb,const EndMediaSegmentCB & end_of_segment_cb,MediaLog * media_log)120 void MP4StreamParser::Init(
121     InitCB init_cb,
122     const NewConfigCB& config_cb,
123     const NewBuffersCB& new_buffers_cb,
124     bool /* ignore_text_tracks */,
125     const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
126     const NewMediaSegmentCB& new_segment_cb,
127     const EndMediaSegmentCB& end_of_segment_cb,
128     MediaLog* media_log) {
129   DCHECK_EQ(state_, kWaitingForInit);
130   DCHECK(!init_cb_);
131   DCHECK(init_cb);
132   DCHECK(config_cb);
133   DCHECK(new_buffers_cb);
134   DCHECK(encrypted_media_init_data_cb);
135   DCHECK(new_segment_cb);
136   DCHECK(end_of_segment_cb);
137 
138   ChangeState(kParsingBoxes);
139   init_cb_ = std::move(init_cb);
140   config_cb_ = config_cb;
141   new_buffers_cb_ = new_buffers_cb;
142   encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
143   new_segment_cb_ = new_segment_cb;
144   end_of_segment_cb_ = end_of_segment_cb;
145   media_log_ = media_log;
146 }
147 
Reset()148 void MP4StreamParser::Reset() {
149   queue_.Reset();
150   runs_.reset();
151   moof_head_ = 0;
152   mdat_tail_ = 0;
153 }
154 
Flush()155 void MP4StreamParser::Flush() {
156   DCHECK_NE(state_, kWaitingForInit);
157   Reset();
158   ChangeState(kParsingBoxes);
159 }
160 
GetGenerateTimestampsFlag() const161 bool MP4StreamParser::GetGenerateTimestampsFlag() const {
162   return false;
163 }
164 
Parse(const uint8_t * buf,int size)165 bool MP4StreamParser::Parse(const uint8_t* buf, int size) {
166   DCHECK_NE(state_, kWaitingForInit);
167 
168   if (state_ == kError)
169     return false;
170 
171   queue_.Push(buf, size);
172 
173   BufferQueueMap buffers;
174 
175   // TODO(sandersd): Remove these bools. ParseResult replaced their purpose, but
176   // this method needs to be refactored to complete that work.
177   bool result = false;
178   bool err = false;
179 
180   do {
181     switch (state_) {
182       case kWaitingForInit:
183       case kError:
184         NOTREACHED();
185         return false;
186 
187       case kParsingBoxes: {
188         ParseResult pr = ParseBox();
189         result = pr == ParseResult::kOk;
190         err = pr == ParseResult::kError;
191         break;
192       }
193 
194       case kWaitingForSampleData:
195         result = HaveEnoughDataToEnqueueSamples();
196         if (result)
197           ChangeState(kEmittingSamples);
198         break;
199 
200       case kEmittingSamples: {
201         ParseResult pr = EnqueueSample(&buffers);
202         result = pr == ParseResult::kOk;
203         err = pr == ParseResult::kError;
204         if (result) {
205           int64_t max_clear = runs_->GetMaxClearOffset() + moof_head_;
206           err = !ReadAndDiscardMDATsUntil(max_clear);
207         }
208         break;
209       }
210     }
211   } while (result && !err);
212 
213   if (!err)
214     err = !SendAndFlushSamples(&buffers);
215 
216   if (err) {
217     DLOG(ERROR) << "Error while parsing MP4";
218     moov_.reset();
219     Reset();
220     ChangeState(kError);
221     return false;
222   }
223 
224   return true;
225 }
226 
ParseBox()227 ParseResult MP4StreamParser::ParseBox() {
228   const uint8_t* buf;
229   int size;
230   queue_.Peek(&buf, &size);
231   if (!size)
232     return ParseResult::kNeedMoreData;
233 
234   std::unique_ptr<BoxReader> reader;
235   ParseResult result =
236       BoxReader::ReadTopLevelBox(buf, size, media_log_, &reader);
237   if (result != ParseResult::kOk)
238     return result;
239 
240   DCHECK(reader);
241   if (reader->type() == FOURCC_MOOV) {
242     if (!ParseMoov(reader.get()))
243       return ParseResult::kError;
244   } else if (reader->type() == FOURCC_MOOF) {
245     moof_head_ = queue_.head();
246     if (!ParseMoof(reader.get()))
247       return ParseResult::kError;
248 
249     // Set up first mdat offset for ReadMDATsUntil().
250     mdat_tail_ = queue_.head() + reader->box_size();
251 
252     // Return early to avoid evicting 'moof' data from queue. Auxiliary info may
253     // be located anywhere in the file, including inside the 'moof' itself.
254     // (Since 'default-base-is-moof' is mandated, no data references can come
255     // before the head of the 'moof', so keeping this box around is sufficient.)
256     return ParseResult::kOk;
257   } else {
258     // TODO(wolenetz,chcunningham): Enforce more strict adherence to MSE byte
259     // stream spec for ftyp and styp. See http://crbug.com/504514.
260     DVLOG(2) << "Skipping unrecognized top-level box: "
261              << FourCCToString(reader->type());
262   }
263 
264   queue_.Pop(reader->box_size());
265   return ParseResult::kOk;
266 }
267 
CalculateRotation(const TrackHeader & track,const MovieHeader & movie)268 VideoTransformation MP4StreamParser::CalculateRotation(
269     const TrackHeader& track,
270     const MovieHeader& movie) {
271   static_assert(kDisplayMatrixDimension == 9, "Display matrix must be 3x3");
272   // 3x3 matrix: [ a b c ]
273   //             [ d e f ]
274   //             [ x y z ]
275   int32_t rotation_matrix[kDisplayMatrixDimension] = {0};
276 
277   // Shift values for fixed point multiplications.
278   const int32_t shifts[kDisplayMatrixHeight] = {16, 16, 30};
279 
280   // Matrix multiplication for
281   // track.display_matrix * movie.display_matrix
282   // with special consideration taken that entries a-f are 16.16 fixed point
283   // decimals and x-z are 2.30 fixed point decimals.
284   for (int i = 0; i < kDisplayMatrixWidth; i++) {
285     for (int j = 0; j < kDisplayMatrixHeight; j++) {
286       for (int e = 0; e < kDisplayMatrixHeight; e++) {
287         rotation_matrix[i * kDisplayMatrixHeight + j] +=
288             ((int64_t)track.display_matrix[i * kDisplayMatrixHeight + e] *
289              movie.display_matrix[e * kDisplayMatrixHeight + j]) >>
290             shifts[e];
291       }
292     }
293   }
294 
295   int32_t rotation_only[4] = {rotation_matrix[0], rotation_matrix[1],
296                               rotation_matrix[3], rotation_matrix[4]};
297   return VideoTransformation(rotation_only);
298 }
299 
ParseMoov(BoxReader * reader)300 bool MP4StreamParser::ParseMoov(BoxReader* reader) {
301   moov_.reset(new Movie);
302   RCHECK(moov_->Parse(reader));
303   runs_.reset();
304   audio_track_ids_.clear();
305   video_track_ids_.clear();
306 
307   has_audio_ = false;
308   has_video_ = false;
309 
310   std::unique_ptr<MediaTracks> media_tracks(new MediaTracks());
311   AudioDecoderConfig audio_config;
312   VideoDecoderConfig video_config;
313   int detected_audio_track_count = 0;
314   int detected_video_track_count = 0;
315   int detected_text_track_count = 0;
316 
317   for (std::vector<Track>::const_iterator track = moov_->tracks.begin();
318        track != moov_->tracks.end(); ++track) {
319     const SampleDescription& samp_descr =
320         track->media.information.sample_table.description;
321 
322     // TODO(wolenetz): When codec reconfigurations are supported, detect and
323     // send a codec reconfiguration for fragments using a sample description
324     // index different from the previous one. See https://crbug.com/748250.
325     size_t desc_idx = 0;
326     for (size_t t = 0; t < moov_->extends.tracks.size(); t++) {
327       const TrackExtends& trex = moov_->extends.tracks[t];
328       if (trex.track_id == track->header.track_id) {
329         desc_idx = trex.default_sample_description_index;
330         break;
331       }
332     }
333     RCHECK(desc_idx > 0);
334     desc_idx -= 1;  // BMFF descriptor index is one-based
335 
336     if (track->media.handler.type == kAudio) {
337       detected_audio_track_count++;
338 
339       RCHECK(!samp_descr.audio_entries.empty());
340 
341       // It is not uncommon to find otherwise-valid files with incorrect sample
342       // description indices, so we fail gracefully in that case.
343       if (desc_idx >= samp_descr.audio_entries.size())
344         desc_idx = 0;
345       const AudioSampleEntry& entry = samp_descr.audio_entries[desc_idx];
346 
347       // For encrypted audio streams entry.format is FOURCC_ENCA and actual
348       // format is in entry.sinf.format.format.
349       FourCC audio_format = (entry.format == FOURCC_ENCA)
350                                 ? entry.sinf.format.format
351                                 : entry.format;
352 
353       if (audio_format != FOURCC_OPUS && audio_format != FOURCC_FLAC &&
354 #if BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
355           audio_format != FOURCC_AC3 && audio_format != FOURCC_EAC3 &&
356 #endif
357 #if BUILDFLAG(ENABLE_PLATFORM_MPEG_H_AUDIO)
358           audio_format != FOURCC_MHM1 && audio_format != FOURCC_MHA1 &&
359 #endif
360           audio_format != FOURCC_MP4A) {
361         MEDIA_LOG(ERROR, media_log_)
362             << "Unsupported audio format 0x" << std::hex << entry.format
363             << " in stsd box.";
364         return false;
365       }
366 
367       AudioCodec codec = kUnknownAudioCodec;
368       AudioCodecProfile profile = AudioCodecProfile::kUnknown;
369       ChannelLayout channel_layout = CHANNEL_LAYOUT_NONE;
370       int sample_per_second = 0;
371       int codec_delay_in_frames = 0;
372       base::TimeDelta seek_preroll;
373       std::vector<uint8_t> extra_data;
374       if (audio_format == FOURCC_OPUS) {
375         codec = kCodecOpus;
376         channel_layout = GuessChannelLayout(entry.dops.channel_count);
377         sample_per_second = entry.dops.sample_rate;
378         codec_delay_in_frames = entry.dops.codec_delay_in_frames;
379         seek_preroll = entry.dops.seek_preroll;
380         extra_data = entry.dops.extradata;
381       } else if (audio_format == FOURCC_FLAC) {
382         // FLAC-in-ISOBMFF does not use object type indication. |audio_format|
383         // is sufficient for identifying FLAC codec.
384         if (!has_flac_) {
385           MEDIA_LOG(ERROR, media_log_) << "FLAC audio stream detected in MP4, "
386                                           "mismatching what is specified in "
387                                           "the mimetype.";
388           return false;
389         }
390 
391         codec = kCodecFLAC;
392         channel_layout = GuessChannelLayout(entry.channelcount);
393         sample_per_second = entry.samplerate;
394         extra_data = entry.dfla.stream_info;
395 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
396 #if BUILDFLAG(ENABLE_PLATFORM_MPEG_H_AUDIO)
397       } else if (audio_format == FOURCC_MHM1 || audio_format == FOURCC_MHA1) {
398         codec = kCodecMpegHAudio;
399         channel_layout = CHANNEL_LAYOUT_BITSTREAM;
400         sample_per_second = entry.samplerate;
401         extra_data = entry.dfla.stream_info;
402 #endif
403       } else {
404         uint8_t audio_type = entry.esds.object_type;
405 #if BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
406         if (audio_type == kForbidden) {
407           if (audio_format == FOURCC_AC3)
408             audio_type = kAC3;
409           if (audio_format == FOURCC_EAC3)
410             audio_type = kEAC3;
411         }
412 #endif
413         DVLOG(1) << "audio_type 0x" << std::hex << static_cast<int>(audio_type);
414         if (audio_object_types_.find(audio_type) == audio_object_types_.end()) {
415           MEDIA_LOG(ERROR, media_log_)
416               << "audio object type 0x" << std::hex
417               << static_cast<int>(audio_type)
418               << " does not match what is specified in the mimetype.";
419           return false;
420         }
421 
422         // Check if it is MPEG4 AAC defined in ISO 14496 Part 3 or
423         // supported MPEG2 AAC varients.
424         if (ESDescriptor::IsAAC(audio_type)) {
425           const AAC& aac = entry.esds.aac;
426           codec = kCodecAAC;
427           profile = aac.GetProfile();
428           channel_layout = aac.GetChannelLayout(has_sbr_);
429           sample_per_second = aac.GetOutputSamplesPerSecond(has_sbr_);
430 #if defined(OS_ANDROID)
431           extra_data = aac.codec_specific_data();
432 #endif
433 #if BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
434         } else if (audio_type == kAC3) {
435           codec = kCodecAC3;
436           channel_layout = GuessChannelLayout(entry.channelcount);
437           sample_per_second = entry.samplerate;
438         } else if (audio_type == kEAC3) {
439           codec = kCodecEAC3;
440           channel_layout = GuessChannelLayout(entry.channelcount);
441           sample_per_second = entry.samplerate;
442 #endif
443         } else {
444           MEDIA_LOG(ERROR, media_log_)
445               << "Unsupported audio object type 0x" << std::hex
446               << static_cast<int>(audio_type) << " in esds.";
447           return false;
448         }
449 #endif  // BUILDFLAG(USE_PROPRIETARY_CODECS)
450       }
451 
452       SampleFormat sample_format;
453       if (entry.samplesize == 8) {
454         sample_format = kSampleFormatU8;
455       } else if (entry.samplesize == 16) {
456         sample_format = kSampleFormatS16;
457       } else if (entry.samplesize == 24) {
458         sample_format = kSampleFormatS24;
459       } else if (entry.samplesize == 32) {
460         sample_format = kSampleFormatS32;
461       } else {
462         LOG(ERROR) << "Unsupported sample size.";
463         return false;
464       }
465 
466       uint32_t audio_track_id = track->header.track_id;
467       if (audio_track_ids_.find(audio_track_id) != audio_track_ids_.end()) {
468         MEDIA_LOG(ERROR, media_log_)
469             << "Audio track with track_id=" << audio_track_id
470             << " already present.";
471         return false;
472       }
473       bool is_track_encrypted = entry.sinf.info.track_encryption.is_encrypted;
474       EncryptionScheme scheme = EncryptionScheme::kUnencrypted;
475       if (is_track_encrypted) {
476         scheme = GetEncryptionScheme(entry.sinf);
477         if (scheme == EncryptionScheme::kUnencrypted)
478           return false;
479       }
480       audio_config.Initialize(codec, sample_format, channel_layout,
481                               sample_per_second, extra_data, scheme,
482                               seek_preroll, codec_delay_in_frames);
483       if (codec == kCodecAAC) {
484         audio_config.disable_discard_decoder_delay();
485         audio_config.set_profile(profile);
486       }
487 
488       DVLOG(1) << "audio_track_id=" << audio_track_id
489                << " config=" << audio_config.AsHumanReadableString();
490       if (!audio_config.IsValidConfig()) {
491         MEDIA_LOG(ERROR, media_log_) << "Invalid audio decoder config: "
492                                      << audio_config.AsHumanReadableString();
493         return false;
494       }
495       has_audio_ = true;
496       audio_track_ids_.insert(audio_track_id);
497       const char* track_kind = (audio_track_ids_.size() == 1 ? "main" : "");
498       media_tracks->AddAudioTrack(
499           audio_config, audio_track_id, MediaTrack::Kind(track_kind),
500           MediaTrack::Label(track->media.handler.name),
501           MediaTrack::Language(track->media.header.language()));
502       continue;
503     }
504 
505     if (track->media.handler.type == kVideo) {
506       detected_video_track_count++;
507 
508       RCHECK(!samp_descr.video_entries.empty());
509       if (desc_idx >= samp_descr.video_entries.size())
510         desc_idx = 0;
511       const VideoSampleEntry& entry = samp_descr.video_entries[desc_idx];
512 
513       if (!entry.IsFormatValid()) {
514         MEDIA_LOG(ERROR, media_log_) << "Unsupported video format 0x"
515                                      << std::hex << entry.format
516                                      << " in stsd box.";
517         return false;
518       }
519 
520       // TODO(strobe): Recover correct crop box
521       gfx::Size coded_size(entry.width, entry.height);
522       gfx::Rect visible_rect(coded_size);
523 
524       // If PASP is available, use the coded size and PASP to calculate the
525       // natural size. Otherwise, use the size in track header for natural size.
526       gfx::Size natural_size(visible_rect.size());
527       if (entry.pixel_aspect.h_spacing != 1 ||
528           entry.pixel_aspect.v_spacing != 1) {
529         natural_size =
530             GetNaturalSize(visible_rect.size(), entry.pixel_aspect.h_spacing,
531                            entry.pixel_aspect.v_spacing);
532       } else if (track->header.width && track->header.height) {
533         natural_size =
534             gfx::Size(track->header.width, track->header.height);
535       }
536 
537       uint32_t video_track_id = track->header.track_id;
538       if (video_track_ids_.find(video_track_id) != video_track_ids_.end()) {
539         MEDIA_LOG(ERROR, media_log_)
540             << "Video track with track_id=" << video_track_id
541             << " already present.";
542         return false;
543       }
544       bool is_track_encrypted = entry.sinf.info.track_encryption.is_encrypted;
545       EncryptionScheme scheme = EncryptionScheme::kUnencrypted;
546       if (is_track_encrypted) {
547         scheme = GetEncryptionScheme(entry.sinf);
548         if (scheme == EncryptionScheme::kUnencrypted)
549           return false;
550       }
551       video_config.Initialize(entry.video_codec, entry.video_codec_profile,
552                               VideoDecoderConfig::AlphaMode::kIsOpaque,
553                               VideoColorSpace::REC709(),
554                               CalculateRotation(track->header, moov_->header),
555                               coded_size, visible_rect, natural_size,
556                               // No decoder-specific buffer needed for AVC;
557                               // SPS/PPS are embedded in the video stream
558                               EmptyExtraData(), scheme);
559       video_config.set_level(entry.video_codec_level);
560 
561       if (entry.color_parameter_information) {
562         video_config.set_color_space_info(
563             ConvertColorParameterInformationToColorSpace(
564                 *entry.color_parameter_information));
565 
566         if (entry.mastering_display_color_volume ||
567             entry.content_light_level_information) {
568           HDRMetadata hdr_metadata;
569           if (entry.mastering_display_color_volume) {
570             hdr_metadata.mastering_metadata = ConvertMdcvToMasteringMetadata(
571                 *entry.mastering_display_color_volume);
572           }
573 
574           if (entry.content_light_level_information) {
575             hdr_metadata.max_content_light_level =
576                 entry.content_light_level_information->max_content_light_level;
577             hdr_metadata.max_frame_average_light_level =
578                 entry.content_light_level_information
579                     ->max_pic_average_light_level;
580           }
581           video_config.set_hdr_metadata(hdr_metadata);
582         }
583       }
584 
585       DVLOG(1) << "video_track_id=" << video_track_id
586                << " config=" << video_config.AsHumanReadableString();
587       if (!video_config.IsValidConfig()) {
588         MEDIA_LOG(ERROR, media_log_) << "Invalid video decoder config: "
589                                      << video_config.AsHumanReadableString();
590         return false;
591       }
592       has_video_ = true;
593       video_track_ids_.insert(video_track_id);
594       auto track_kind =
595           MediaTrack::Kind(video_track_ids_.size() == 1 ? "main" : "");
596       media_tracks->AddVideoTrack(
597           video_config, video_track_id, track_kind,
598           MediaTrack::Label(track->media.handler.name),
599           MediaTrack::Language(track->media.header.language()));
600       continue;
601     }
602 
603     // TODO(wolenetz): Investigate support in MSE and Chrome MSE for CEA 608/708
604     // embedded caption data in video track. At time of init segment parsing, we
605     // don't have this data (unless maybe by SourceBuffer's mimetype).
606     // See https://crbug.com/597073
607     if (track->media.handler.type == kText)
608       detected_text_track_count++;
609   }
610 
611   if (!moov_->pssh.empty())
612     OnEncryptedMediaInitData(moov_->pssh);
613 
614   RCHECK(config_cb_.Run(std::move(media_tracks), TextTrackConfigMap()));
615 
616   StreamParser::InitParameters params(kInfiniteDuration);
617   if (moov_->extends.header.fragment_duration > 0) {
618     params.duration = TimeDeltaFromRational(
619         moov_->extends.header.fragment_duration, moov_->header.timescale);
620     if (params.duration == kNoTimestamp) {
621       MEDIA_LOG(ERROR, media_log_) << "Fragment duration exceeds representable "
622                                    << "limit";
623       return false;
624     }
625     params.liveness = DemuxerStream::LIVENESS_RECORDED;
626   } else if (moov_->header.duration > 0 &&
627              ((moov_->header.version == 0 &&
628                moov_->header.duration !=
629                    std::numeric_limits<uint32_t>::max()) ||
630               (moov_->header.version == 1 &&
631                moov_->header.duration !=
632                    std::numeric_limits<uint64_t>::max()))) {
633     // In ISO/IEC 14496-12:2012, 8.2.2.3: "If the duration cannot be determined
634     // then duration is set to all 1s."
635     // The duration field is either 32-bit or 64-bit depending on the version in
636     // MovieHeaderBox. We interpret not 0 and not all 1's here as "known
637     // duration".
638     params.duration =
639         TimeDeltaFromRational(moov_->header.duration, moov_->header.timescale);
640     if (params.duration == kNoTimestamp) {
641       MEDIA_LOG(ERROR, media_log_) << "Movie duration exceeds representable "
642                                    << "limit";
643       return false;
644     }
645     params.liveness = DemuxerStream::LIVENESS_RECORDED;
646   } else {
647     // In ISO/IEC 14496-12:2005(E), 8.30.2: ".. If an MP4 file is created in
648     // real-time, such as used in live streaming, it is not likely that the
649     // fragment_duration is known in advance and this (mehd) box may be
650     // omitted."
651 
652     // We have an unknown duration (neither any mvex fragment_duration nor moov
653     // duration value indicated a known duration, above.)
654 
655     // TODO(wolenetz): Investigate gating liveness detection on timeline_offset
656     // when it's populated. See http://crbug.com/312699
657     params.liveness = DemuxerStream::LIVENESS_LIVE;
658   }
659 
660   DVLOG(1) << "liveness: " << params.liveness;
661 
662   if (init_cb_) {
663     params.detected_audio_track_count = detected_audio_track_count;
664     params.detected_video_track_count = detected_video_track_count;
665     params.detected_text_track_count = detected_text_track_count;
666     std::move(init_cb_).Run(params);
667   }
668 
669   return true;
670 }
671 
ParseMoof(BoxReader * reader)672 bool MP4StreamParser::ParseMoof(BoxReader* reader) {
673   RCHECK(moov_.get());  // Must already have initialization segment
674   MovieFragment moof;
675   RCHECK(moof.Parse(reader));
676   if (!runs_)
677     runs_.reset(new TrackRunIterator(moov_.get(), media_log_));
678   RCHECK(runs_->Init(moof));
679   RCHECK(ComputeHighestEndOffset(moof));
680 
681   if (!moof.pssh.empty())
682     OnEncryptedMediaInitData(moof.pssh);
683 
684   new_segment_cb_.Run();
685   ChangeState(kWaitingForSampleData);
686   return true;
687 }
688 
OnEncryptedMediaInitData(const std::vector<ProtectionSystemSpecificHeader> & headers)689 void MP4StreamParser::OnEncryptedMediaInitData(
690     const std::vector<ProtectionSystemSpecificHeader>& headers) {
691   // TODO(strobe): ensure that the value of init_data (all PSSH headers
692   // concatenated in arbitrary order) matches the EME spec.
693   // See https://www.w3.org/Bugs/Public/show_bug.cgi?id=17673.
694   size_t total_size = 0;
695   for (size_t i = 0; i < headers.size(); i++)
696     total_size += headers[i].raw_box.size();
697 
698   std::vector<uint8_t> init_data(total_size);
699   size_t pos = 0;
700   for (size_t i = 0; i < headers.size(); i++) {
701     memcpy(&init_data[pos], &headers[i].raw_box[0],
702            headers[i].raw_box.size());
703     pos += headers[i].raw_box.size();
704   }
705   encrypted_media_init_data_cb_.Run(EmeInitDataType::CENC, init_data);
706 }
707 
708 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
PrepareAACBuffer(const AAC & aac_config,std::vector<uint8_t> * frame_buf,std::vector<SubsampleEntry> * subsamples) const709 bool MP4StreamParser::PrepareAACBuffer(
710     const AAC& aac_config,
711     std::vector<uint8_t>* frame_buf,
712     std::vector<SubsampleEntry>* subsamples) const {
713   // Append an ADTS header to every audio sample.
714   RCHECK(aac_config.ConvertEsdsToADTS(frame_buf));
715 
716   // As above, adjust subsample information to account for the headers. AAC is
717   // not required to use subsample encryption, so we may need to add an entry.
718   if (subsamples->empty()) {
719     subsamples->push_back(SubsampleEntry(
720         kADTSHeaderMinSize, frame_buf->size() - kADTSHeaderMinSize));
721   } else {
722     (*subsamples)[0].clear_bytes += kADTSHeaderMinSize;
723   }
724   return true;
725 }
726 #endif  // BUILDFLAG(USE_PROPRIETARY_CODECS)
727 
EnqueueSample(BufferQueueMap * buffers)728 ParseResult MP4StreamParser::EnqueueSample(BufferQueueMap* buffers) {
729   DCHECK_EQ(state_, kEmittingSamples);
730 
731   if (!runs_->IsRunValid()) {
732     // Flush any buffers we've gotten in this chunk so that buffers don't
733     // cross |new_segment_cb_| calls
734     if (!SendAndFlushSamples(buffers))
735       return ParseResult::kError;
736 
737     // Remain in kEmittingSamples state, discarding data, until the end of
738     // the current 'mdat' box has been appended to the queue.
739     // TODO(sandersd): As I understand it, this Trim() will always succeed,
740     // since |mdat_tail_| is never outside of the queue. It's also plausible
741     // that this Trim() is always a no-op, but perhaps if all runs are empty
742     // this still does something?
743     if (!queue_.Trim(mdat_tail_))
744       return ParseResult::kNeedMoreData;
745 
746     ChangeState(kParsingBoxes);
747     end_of_segment_cb_.Run();
748     return ParseResult::kOk;
749   }
750 
751   if (!runs_->IsSampleValid()) {
752     if (!runs_->AdvanceRun())
753       return ParseResult::kError;
754     return ParseResult::kOk;
755   }
756 
757   const uint8_t* buf;
758   int buf_size;
759   queue_.Peek(&buf, &buf_size);
760   if (!buf_size)
761     return ParseResult::kNeedMoreData;
762 
763   bool audio =
764       audio_track_ids_.find(runs_->track_id()) != audio_track_ids_.end();
765   bool video =
766       video_track_ids_.find(runs_->track_id()) != video_track_ids_.end();
767 
768   // Skip this entire track if it's not one we're interested in
769   if (!audio && !video) {
770     if (!runs_->AdvanceRun())
771       return ParseResult::kError;
772     return ParseResult::kOk;
773   }
774 
775   // Attempt to cache the auxiliary information first. Aux info is usually
776   // placed in a contiguous block before the sample data, rather than being
777   // interleaved. If we didn't cache it, this would require that we retain the
778   // start of the segment buffer while reading samples. Aux info is typically
779   // quite small compared to sample data, so this pattern is useful on
780   // memory-constrained devices where the source buffer consumes a substantial
781   // portion of the total system memory.
782   if (runs_->AuxInfoNeedsToBeCached()) {
783     queue_.PeekAt(runs_->aux_info_offset() + moof_head_, &buf, &buf_size);
784     if (buf_size < runs_->aux_info_size())
785       return ParseResult::kNeedMoreData;
786     if (!runs_->CacheAuxInfo(buf, buf_size))
787       return ParseResult::kError;
788     return ParseResult::kOk;
789   }
790 
791   queue_.PeekAt(runs_->sample_offset() + moof_head_, &buf, &buf_size);
792 
793   if (runs_->sample_size() >
794       static_cast<uint32_t>(std::numeric_limits<int>::max())) {
795     MEDIA_LOG(ERROR, media_log_) << "Sample size is too large";
796     return ParseResult::kError;
797   }
798 
799   int sample_size = base::checked_cast<int>(runs_->sample_size());
800 
801   if (buf_size < sample_size)
802     return ParseResult::kNeedMoreData;
803 
804   if (sample_size == 0) {
805     // Generally not expected, but spec allows it. Code below this block assumes
806     // the current sample is not empty.
807     LIMITED_MEDIA_LOG(DEBUG, media_log_, num_empty_samples_skipped_,
808                       kMaxEmptySampleLogs)
809         << "Skipping 'trun' sample with size of 0.";
810     if (!runs_->AdvanceSample())
811       return ParseResult::kError;
812     return ParseResult::kOk;
813   }
814 
815   std::unique_ptr<DecryptConfig> decrypt_config;
816   std::vector<SubsampleEntry> subsamples;
817   if (runs_->is_encrypted()) {
818     decrypt_config = runs_->GetDecryptConfig();
819     if (!decrypt_config)
820       return ParseResult::kError;
821     subsamples = decrypt_config->subsamples();
822   }
823 
824   // This may change if analysis results indicate runs_->is_keyframe() is
825   // opposite of what the coded frame contains.
826   bool is_keyframe = runs_->is_keyframe();
827 
828   std::vector<uint8_t> frame_buf(buf, buf + sample_size);
829   if (video) {
830     if (runs_->video_description().video_codec == kCodecH264 ||
831         runs_->video_description().video_codec == kCodecHEVC ||
832         runs_->video_description().video_codec == kCodecDolbyVision) {
833       DCHECK(runs_->video_description().frame_bitstream_converter);
834       BitstreamConverter::AnalysisResult analysis;
835       if (!runs_->video_description()
836                .frame_bitstream_converter->ConvertAndAnalyzeFrame(
837                    &frame_buf, is_keyframe, &subsamples, &analysis)) {
838         MEDIA_LOG(ERROR, media_log_)
839             << "Failed to prepare video sample for decode";
840         return ParseResult::kError;
841       }
842 
843       // If conformance analysis was not actually performed, assume the frame is
844       // conformant.  If it was performed and found to be non-conformant, log
845       // it.
846       if (!analysis.is_conformant.value_or(true)) {
847         LIMITED_MEDIA_LOG(DEBUG, media_log_, num_invalid_conversions_,
848                           kMaxInvalidConversionLogs)
849             << "Prepared video sample is not conformant";
850       }
851 
852       // Use |analysis.is_keyframe|, if it was actually determined, for logging
853       // if the analysis mismatches the container's keyframe metadata for
854       // |frame_buf|.
855       if (analysis.is_keyframe.has_value() &&
856           is_keyframe != analysis.is_keyframe.value()) {
857         LIMITED_MEDIA_LOG(DEBUG, media_log_, num_video_keyframe_mismatches_,
858                           kMaxVideoKeyframeMismatchLogs)
859             << "ISO-BMFF container metadata for video frame indicates that the "
860                "frame is "
861             << (is_keyframe ? "" : "not ")
862             << "a keyframe, but the video frame contents indicate the "
863                "opposite.";
864         // As of September 2018, it appears that all of Edge, Firefox, Safari
865         // work with content that marks non-avc-keyframes as a keyframe in the
866         // container. Encoders/muxers/old streams still exist that produce
867         // all-keyframe mp4 video tracks, though many of the coded frames are
868         // not keyframes (likely workaround due to the impact on low-latency
869         // live streams until https://crbug.com/229412 was fixed).  We'll trust
870         // the AVC frame's keyframe-ness over the mp4 container's metadata if
871         // they mismatch. If other out-of-order codecs in mp4 (e.g. HEVC, DV)
872         // implement keyframe analysis in their frame_bitstream_converter, we'll
873         // similarly trust that analysis instead of the mp4.
874         is_keyframe = analysis.is_keyframe.value();
875       }
876     }
877   }
878 
879   if (audio) {
880     if (ESDescriptor::IsAAC(runs_->audio_description().esds.object_type)) {
881 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
882       if (!PrepareAACBuffer(runs_->audio_description().esds.aac, &frame_buf,
883                             &subsamples)) {
884         MEDIA_LOG(ERROR, media_log_)
885             << "Failed to prepare AAC sample for decode";
886         return ParseResult::kError;
887       }
888 #else
889       return ParseResult::kError;
890 #endif  // BUILDFLAG(USE_PROPRIETARY_CODECS)
891     }
892   }
893 
894   if (decrypt_config) {
895     if (!subsamples.empty()) {
896       // Create a new config with the updated subsamples.
897       decrypt_config.reset(
898           new DecryptConfig(decrypt_config->encryption_scheme(),
899                             decrypt_config->key_id(), decrypt_config->iv(),
900                             subsamples, decrypt_config->encryption_pattern()));
901     }
902     // else, use the existing config.
903   }
904 
905   StreamParserBuffer::Type buffer_type = audio ? DemuxerStream::AUDIO :
906       DemuxerStream::VIDEO;
907 
908   scoped_refptr<StreamParserBuffer> stream_buf =
909       StreamParserBuffer::CopyFrom(&frame_buf[0], frame_buf.size(), is_keyframe,
910                                    buffer_type, runs_->track_id());
911 
912   if (decrypt_config)
913     stream_buf->set_decrypt_config(std::move(decrypt_config));
914 
915   if (runs_->duration() != kNoTimestamp) {
916     stream_buf->set_duration(runs_->duration());
917   } else {
918     MEDIA_LOG(ERROR, media_log_) << "Frame duration exceeds representable "
919                                  << "limit";
920     return ParseResult::kError;
921   }
922 
923   if (runs_->cts() != kNoTimestamp) {
924     stream_buf->set_timestamp(runs_->cts());
925   } else {
926     MEDIA_LOG(ERROR, media_log_) << "Frame PTS exceeds representable limit";
927     return ParseResult::kError;
928   }
929 
930   if (runs_->dts() != kNoDecodeTimestamp()) {
931     stream_buf->SetDecodeTimestamp(runs_->dts());
932   } else {
933     MEDIA_LOG(ERROR, media_log_) << "Frame DTS exceeds representable limit";
934     return ParseResult::kError;
935   }
936 
937   DVLOG(3) << "Emit " << (audio ? "audio" : "video") << " frame: "
938            << " track_id=" << runs_->track_id() << ", key=" << is_keyframe
939            << ", dur=" << runs_->duration().InMilliseconds()
940            << ", dts=" << runs_->dts().InMilliseconds()
941            << ", cts=" << runs_->cts().InMilliseconds()
942            << ", size=" << sample_size;
943 
944   (*buffers)[runs_->track_id()].push_back(stream_buf);
945   if (!runs_->AdvanceSample())
946     return ParseResult::kError;
947   return ParseResult::kOk;
948 }
949 
SendAndFlushSamples(BufferQueueMap * buffers)950 bool MP4StreamParser::SendAndFlushSamples(BufferQueueMap* buffers) {
951   if (buffers->empty())
952     return true;
953   bool success = new_buffers_cb_.Run(*buffers);
954   buffers->clear();
955   return success;
956 }
957 
ReadAndDiscardMDATsUntil(int64_t max_clear_offset)958 bool MP4StreamParser::ReadAndDiscardMDATsUntil(int64_t max_clear_offset) {
959   ParseResult result = ParseResult::kOk;
960   int64_t upper_bound = std::min(max_clear_offset, queue_.tail());
961   while (mdat_tail_ < upper_bound) {
962     const uint8_t* buf = NULL;
963     int size = 0;
964     queue_.PeekAt(mdat_tail_, &buf, &size);
965 
966     FourCC type;
967     size_t box_sz;
968     result = BoxReader::StartTopLevelBox(buf, size, media_log_, &type, &box_sz);
969     if (result != ParseResult::kOk)
970       break;
971 
972     if (type != FOURCC_MDAT) {
973       MEDIA_LOG(DEBUG, media_log_)
974           << "Unexpected box type while parsing MDATs: "
975           << FourCCToString(type);
976     }
977     // TODO(chcunningham): Fix mdat_tail_ and ByteQueue classes to use size_t.
978     // TODO(sandersd): The whole |mdat_tail_| mechanism appears to be pointless
979     // because StartTopLevelBox() only succeeds for complete boxes. Either
980     // remove |mdat_tail_| throughout this class or implement the ability to
981     // discard partial mdats.
982     mdat_tail_ += base::checked_cast<int64_t>(box_sz);
983   }
984   queue_.Trim(std::min(mdat_tail_, upper_bound));
985   return result != ParseResult::kError;
986 }
987 
ChangeState(State new_state)988 void MP4StreamParser::ChangeState(State new_state) {
989   DVLOG(2) << "Changing state: " << new_state;
990   state_ = new_state;
991 }
992 
HaveEnoughDataToEnqueueSamples()993 bool MP4StreamParser::HaveEnoughDataToEnqueueSamples() {
994   DCHECK_EQ(state_, kWaitingForSampleData);
995   // For muxed content, make sure we have data up to |highest_end_offset_|
996   // so we can ensure proper enqueuing behavior. Otherwise assume we have enough
997   // data and allow per sample offset checks to meter sample enqueuing.
998   // TODO(acolwell): Fix trun box handling so we don't have to special case
999   // muxed content.
1000   return !(has_audio_ && has_video_ &&
1001            queue_.tail() < highest_end_offset_ + moof_head_);
1002 }
1003 
ComputeHighestEndOffset(const MovieFragment & moof)1004 bool MP4StreamParser::ComputeHighestEndOffset(const MovieFragment& moof) {
1005   highest_end_offset_ = 0;
1006 
1007   TrackRunIterator runs(moov_.get(), media_log_);
1008   RCHECK(runs.Init(moof));
1009 
1010   while (runs.IsRunValid()) {
1011     int64_t aux_info_end_offset = runs.aux_info_offset() + runs.aux_info_size();
1012     if (aux_info_end_offset > highest_end_offset_)
1013       highest_end_offset_ = aux_info_end_offset;
1014 
1015     while (runs.IsSampleValid()) {
1016       int64_t sample_end_offset = runs.sample_offset() + runs.sample_size();
1017       if (sample_end_offset > highest_end_offset_)
1018         highest_end_offset_ = sample_end_offset;
1019       if (!runs.AdvanceSample())
1020         return false;
1021     }
1022     if (!runs.AdvanceRun())
1023       return false;
1024   }
1025 
1026   return true;
1027 }
1028 
1029 }  // namespace mp4
1030 }  // namespace media
1031