1import collections 2from functools import reduce 3 4__author__ = 'Gilles Boccon-Gibod (bok@bok.net)' 5__copyright__ = 'Copyright 2011-2020 Axiomatic Systems, LLC.' 6 7import sys 8import os 9import os.path as path 10from subprocess import check_output, CalledProcessError 11import json 12import io 13import struct 14import operator 15import hashlib 16import fractions 17import xml.sax.saxutils as saxutils 18import base64 19 20LanguageCodeMap = { 21 'aar': 'aa', 'abk': 'ab', 'afr': 'af', 'aka': 'ak', 'alb': 'sq', 'amh': 'am', 'ara': 'ar', 'arg': 'an', 22 'arm': 'hy', 'asm': 'as', 'ava': 'av', 'ave': 'ae', 'aym': 'ay', 'aze': 'az', 'bak': 'ba', 'bam': 'bm', 23 'baq': 'eu', 'bel': 'be', 'ben': 'bn', 'bih': 'bh', 'bis': 'bi', 'bod': 'bo', 'bos': 'bs', 'bre': 'br', 24 'bul': 'bg', 'bur': 'my', 'cat': 'ca', 'ces': 'cs', 'cha': 'ch', 'che': 'ce', 'chi': 'zh', 'chu': 'cu', 25 'chv': 'cv', 'cor': 'kw', 'cos': 'co', 'cre': 'cr', 'cym': 'cy', 'cze': 'cs', 'dan': 'da', 'deu': 'de', 26 'div': 'dv', 'dut': 'nl', 'dzo': 'dz', 'ell': 'el', 'eng': 'en', 'epo': 'eo', 'est': 'et', 'eus': 'eu', 27 'ewe': 'ee', 'fao': 'fo', 'fas': 'fa', 'fij': 'fj', 'fin': 'fi', 'fra': 'fr', 'fre': 'fr', 'fry': 'fy', 28 'ful': 'ff', 'geo': 'ka', 'ger': 'de', 'gla': 'gd', 'gle': 'ga', 'glg': 'gl', 'glv': 'gv', 'gre': 'el', 29 'grn': 'gn', 'guj': 'gu', 'hat': 'ht', 'hau': 'ha', 'heb': 'he', 'her': 'hz', 'hin': 'hi', 'hmo': 'ho', 30 'hrv': 'hr', 'hun': 'hu', 'hye': 'hy', 'ibo': 'ig', 'ice': 'is', 'ido': 'io', 'iii': 'ii', 'iku': 'iu', 31 'ile': 'ie', 'ina': 'ia', 'ind': 'id', 'ipk': 'ik', 'isl': 'is', 'ita': 'it', 'jav': 'jv', 'jpn': 'ja', 32 'kal': 'kl', 'kan': 'kn', 'kas': 'ks', 'kat': 'ka', 'kau': 'kr', 'kaz': 'kk', 'khm': 'km', 'kik': 'ki', 33 'kin': 'rw', 'kir': 'ky', 'kom': 'kv', 'kon': 'kg', 'kor': 'ko', 'kua': 'kj', 'kur': 'ku', 'lao': 'lo', 34 'lat': 'la', 'lav': 'lv', 'lim': 'li', 'lin': 'ln', 'lit': 'lt', 'ltz': 'lb', 'lub': 'lu', 'lug': 'lg', 35 'mac': 'mk', 'mah': 'mh', 'mal': 'ml', 'mao': 'mi', 'mar': 'mr', 'may': 'ms', 'mkd': 'mk', 'mlg': 'mg', 36 'mlt': 'mt', 'mon': 'mn', 'mri': 'mi', 'msa': 'ms', 'mya': 'my', 'nau': 'na', 'nav': 'nv', 'nbl': 'nr', 37 'nde': 'nd', 'ndo': 'ng', 'nep': 'ne', 'nld': 'nl', 'nno': 'nn', 'nob': 'nb', 'nor': 'no', 'nya': 'ny', 38 'oci': 'oc', 'oji': 'oj', 'ori': 'or', 'orm': 'om', 'oss': 'os', 'pan': 'pa', 'per': 'fa', 'pli': 'pi', 39 'pol': 'pl', 'por': 'pt', 'pus': 'ps', 'que': 'qu', 'roh': 'rm', 'ron': 'ro', 'rum': 'ro', 'run': 'rn', 40 'rus': 'ru', 'sag': 'sg', 'san': 'sa', 'sin': 'si', 'slk': 'sk', 'slo': 'sk', 'slv': 'sl', 'sme': 'se', 41 'smo': 'sm', 'sna': 'sn', 'snd': 'sd', 'som': 'so', 'sot': 'st', 'spa': 'es', 'sqi': 'sq', 'srd': 'sc', 42 'srp': 'sr', 'ssw': 'ss', 'sun': 'su', 'swa': 'sw', 'swe': 'sv', 'tah': 'ty', 'tam': 'ta', 'tat': 'tt', 43 'tel': 'te', 'tgk': 'tg', 'tgl': 'tl', 'tha': 'th', 'tib': 'bo', 'tir': 'ti', 'ton': 'to', 'tsn': 'tn', 44 'tso': 'ts', 'tuk': 'tk', 'tur': 'tr', 'twi': 'tw', 'uig': 'ug', 'ukr': 'uk', 'urd': 'ur', 'uzb': 'uz', 45 'ven': 've', 'vie': 'vi', 'vol': 'vo', 'wel': 'cy', 'wln': 'wa', 'wol': 'wo', 'xho': 'xh', 'yid': 'yi', 46 'yor': 'yo', 'zha': 'za', 'zho': 'zh', 'zul': 'zu', '```': 'und' 47} 48 49LanguageNames = { 50 "aa": "Qafara", 51 "ab": "Аҧсуа", 52 "ae": "Avesta", 53 "af": "Afrikaans", 54 "ak": "Akana", 55 "am": "አማርኛ", 56 "an": "Aragonés", 57 "ar": "العربية", 58 "as": "অসমীয়া", 59 "av": "авар мацӀ; магӀарул мацӀ", 60 "ay": "Aymar Aru", 61 "az": "Azərbaycanca", 62 "ba": "башҡорт теле", 63 "be": "Беларуская мова", 64 "bg": "български език", 65 "bh": "भोजपुरी", 66 "bi": "Bislama", 67 "bm": "Bamanankan", 68 "bn": "বাংলা", 69 "bo": "བོད་ཡིག", 70 "br": "Brezhoneg", 71 "bs": "Bosanski", 72 "ca": "Català", 73 "ce": "нохчийн мотт", 74 "ch": "Chamoru", 75 "co": "Corsu", 76 "cr": "ᓀᐦᐃᔭᐍᐏᐣ", 77 "cs": "čeština", 78 "cu": "ѩзыкъ словѣньскъ", 79 "cv": "чӑваш чӗлхи", 80 "cy": "Cymraeg", 81 "da": "Dansk", 82 "de": "Deutsch", 83 "dz": "རྫོང་ཁ", 84 "ee": "Ɛʋɛgbɛ", 85 "el": "Ελληνικά", 86 "en": "English", 87 "eo": "Esperanto", 88 "es": "Español", 89 "et": "Eesti Keel", 90 "eu": "Euskara", 91 "fa": "فارسی", 92 "ff": "Fulfulde", 93 "fi": "Suomi", 94 "fj": "Vosa Vakaviti", 95 "fo": "Føroyskt", 96 "fr": "Français", 97 "fy": "Frysk", 98 "ga": "Gaeilge", 99 "gd": "Gàidhlig", 100 "gl": "Galego", 101 "gn": "Avañe'ẽ", 102 "gu": "ગુજરાતી", 103 "gv": "Gaelg; Manninagh", 104 "ha": "Hausancī; هَوُسَ", 105 "he": "עִבְרִית; עברית", 106 "hi": "हिन्दी", 107 "ho": "Hiri Motu", 108 "hr": "Hrvatski", 109 "ht": "Kreyòl ayisyen", 110 "hu": "Magyar", 111 "hy": "Հայերեն լեզու", 112 "hz": "Otjiherero", 113 "ia": "Interlingua", 114 "id": "Bahasa Indonesia", 115 "ie": "Interlingue", 116 "ig": "Igbo", 117 "ii": "ꆇꉙ", 118 "ik": "Iñupiaq; Iñupiatun", 119 "io": "Ido", 120 "is": "íslenska", 121 "it": "Italiano", 122 "iu": "ᐃᓄᒃᑎᑐᑦ", 123 "ja": "日本語", 124 "ka": "ქართული ენა (kartuli ena)", 125 "kg": "Kikongo", 126 "ki": "Gĩkũyũ", 127 "kj": "Kuanyama", 128 "kk": "Қазақ тілі", 129 "kl": "Kalaallisut", 130 "km": "ភាសាខ្មែរ", 131 "kn": "ಕನ್ನಡ", 132 "ko": "한국어 (韓國語); 조선말 (朝鮮語)", 133 "kr": "Kanuri", 134 "ks": "कॉशुर; کٲشُر", 135 "ku": "Kurdî; كوردي", 136 "kv": "коми кыв", 137 "kw": "Kernewek", 138 "ky": "кыргыз тили", 139 "la": "latine; lingua Latina", 140 "lb": "Lëtzebuergesch", 141 "lg": "Luganda", 142 "li": "Limburgs", 143 "ln": "Lingala", 144 "lo": "ພາສາລາວ", 145 "lt": "Lietuvių Kalba", 146 "lv": "Latviešu Valoda", 147 "mg": "Malagasy fiteny", 148 "mh": "Kajin M̧ajeļ", 149 "mi": "Te Reo Māori", 150 "mk": "македонски јазик", 151 "ml": "മലയാളം", 152 "mn": "монгол хэл", 153 "mr": "मराठी", 154 "ms": "Bahasa Melayu; بهاس ملايو", 155 "mt": "Malti", 156 "my": "မြန်မာစာ", 157 "na": "Ekakairũ Naoero", 158 "nb": "Bokmål", 159 "nd": "isiNdebele", 160 "ne": "नेपाली", 161 "ng": "Owambo", 162 "nl": "Nederlands", 163 "nn": "Nynorsk", 164 "no": "Norsk", 165 "nr": "isiNdebele", 166 "nv": "Diné bizaad; Dinékʼehǰí", 167 "ny": "chiCheŵa; chinyanja", 168 "oc": "Occitan", 169 "oj": "ᐊᓂᔑᓇᐯᒧᐏᐣ (Anishinaabemowin)", 170 "om": "Afaan Oromoo", 171 "or": "ଓଡ଼ିଆ", 172 "os": "ирон ӕвзаг", 173 "pa": "ਪੰਜਾਬੀ; پنجابی", 174 "pi": "पालि", 175 "pl": "Polski", 176 "ps": "پښتو", 177 "pt": "Português", 178 "qu": "Runa Simi; Kichwa", 179 "rm": "Rumantsch Grischun", 180 "rn": "Rundi", 181 "ro": "Română", 182 "ru": "Русский", 183 "rw": "Ikinyarwanda", 184 "sa": "संस्कृतम्", 185 "sc": "Sardu", 186 "sd": "سنڌي، سندھی; सिन्धी", 187 "se": "sámi; sámegiella", 188 "sg": "yângâ tî sängö", 189 "si": "සිංහල", 190 "sk": "Slovenčina", 191 "sl": "Slovenščina", 192 "sm": "Gagana fa'a Samoa", 193 "sn": "chiShona", 194 "so": "Soomaaliga; af Soomaali", 195 "sq": "Shqip", 196 "sr": "српски језик; srpski jezik", 197 "ss": "siSwati", 198 "st": "Sesotho", 199 "su": "Basa Sunda", 200 "sv": "Svenska", 201 "sw": "Kiswahili", 202 "ta": "தமிழ்", 203 "te": "తెలుగు", 204 "tg": "тоҷикӣ; تاجیکی", 205 "th": "ภาษาไทย", 206 "ti": "ትግርኛ", 207 "tk": "Түркмен", 208 "tl": "Wikang Tagalog; ᜏᜒᜃᜅ᜔ ᜆᜄᜎᜓᜄ᜔", 209 "tn": "Setswana", 210 "to": "Faka-Tonga", 211 "tr": "Türkçe", 212 "ts": "Xitsonga", 213 "tt": "татарча; tatarça; تاتارچا", 214 "tw": "Twi", 215 "ty": "te reo Tahiti; te reo Māʼohi", 216 "ug": "Uyƣurqə; Uyğurçe; ئۇيغۇرچ", 217 "uk": "українська мова", 218 "ur": "اردو", 219 "uz": "O'zbek; Ўзбек; أۇزبېك", 220 "ve": "Tshivenḓa", 221 "vi": "Tiếng Việt", 222 "vo": "Volapük", 223 "wa": "Walon", 224 "wo": "Wolof", 225 "xh": "isiXhosa", 226 "yi": "ייִדיש", 227 "yo": "Yorùbá", 228 "za": "Saɯ cueŋƅ; Saw cuengh", 229 "zh": "漢語; 汉语; 中文", 230 "zu": "isiZulu", 231 "und": "Unknown" 232} 233 234def PrintErrorAndExit(message): 235 sys.stderr.write(message+'\n') 236 sys.exit(1) 237 238def XmlDuration(d): 239 h = int(d) // 3600 240 d -= h*3600 241 m = int(d) // 60 242 s = d-m*60 243 xsd = 'PT' 244 if h: 245 xsd += str(h)+'H' 246 if h or m: 247 xsd += str(m)+'M' 248 if s: 249 xsd += ('%.3fS' % (s)) 250 return xsd 251 252def BooleanFromString(string): 253 if string is None: 254 return False 255 return string.lower() in ['yes', 'true', 'on', '1'] 256 257def Base64Encode(x): 258 return base64.b64encode(x).decode('ascii') 259 260def Base64Decode(x): 261 return base64.b64decode(x) 262 263def Bento4Command(options, name, *args, **kwargs): 264 executable = path.join(options.exec_dir, name) if options.exec_dir != '-' else name 265 cmd = [executable] 266 for kwarg in kwargs: 267 arg = kwarg.replace('_', '-') 268 if isinstance(kwargs[kwarg], bool): 269 cmd.append('--'+arg) 270 else : 271 if isinstance(kwargs[kwarg], list): 272 for element in kwargs[kwarg]: 273 cmd.append('--'+arg) 274 cmd.append(element) 275 else: 276 cmd.append('--'+arg) 277 cmd.append(kwargs[kwarg]) 278 279 cmd += args 280 if options.debug: 281 print('COMMAND: ', " ".join(cmd), cmd) 282 try: 283 try: 284 return check_output(cmd) 285 except OSError as e: 286 if options.debug: 287 print('executable ' + executable + ' not found in exec_dir, trying with PATH') 288 cmd[0] = path.basename(cmd[0]) 289 return check_output(cmd) 290 except CalledProcessError as e: 291 message = "binary tool failed with error %d" % e.returncode 292 if options.verbose: 293 message += " - " + str(cmd) 294 raise Exception(message) 295 except OSError as e: 296 raise Exception('executable "'+name+'" not found, ensure that it is in your path or in the directory '+options.exec_dir) 297 298 299def Mp4Info(options, filename, *args, **kwargs): 300 return Bento4Command(options, 'mp4info', filename, *args, **kwargs) 301 302def Mp4Dump(options, filename, *args, **kwargs): 303 return Bento4Command(options, 'mp4dump', filename, *args, **kwargs) 304 305def Mp4Split(options, filename, *args, **kwargs): 306 return Bento4Command(options, 'mp4split', filename, *args, **kwargs) 307 308def Mp4Fragment(options, input_filename, output_filename, *args, **kwargs): 309 return Bento4Command(options, 'mp4fragment', input_filename, output_filename, *args, **kwargs) 310 311def Mp4Encrypt(options, input_filename, output_filename, *args, **kwargs): 312 return Bento4Command(options, 'mp4encrypt', input_filename, output_filename, *args, **kwargs) 313 314def Mp42Hls(options, input_filename, *args, **kwargs): 315 return Bento4Command(options, 'mp42hls', input_filename, *args, **kwargs) 316 317def Mp4IframeIndex(options, input_filename, *args, **kwargs): 318 return Bento4Command(options, 'mp4iframeindex', input_filename, *args, **kwargs) 319 320class Mp4Atom: 321 def __init__(self, type, size, position): 322 self.type = type 323 self.size = size 324 self.position = position 325 326 def __str__(self): 327 return 'ATOM: ' + self.type + ',' + str(self.size) + '@' + str(self.position) 328 329 330def WalkAtoms(filename, until=None): 331 cursor = 0 332 atoms = [] 333 file = io.FileIO(filename, "rb") 334 while True: 335 try: 336 size = struct.unpack('>I', file.read(4))[0] 337 type = file.read(4).decode('ascii') 338 if type == until: 339 break 340 if size == 1: 341 size = struct.unpack('>Q', file.read(8))[0] 342 atoms.append(Mp4Atom(type, size, cursor)) 343 cursor += size 344 file.seek(cursor) 345 except Exception: 346 break 347 348 return atoms 349 350 351def FilterChildren(parent, type): 352 if isinstance(parent, list): 353 children = parent 354 else: 355 children = parent['children'] 356 return [child for child in children if child['name'] == type] 357 358def FindChild(top, path): 359 for entry in path: 360 children = FilterChildren(top, entry) 361 if not children: return None 362 top = children[0] 363 return top 364 365class Mp4Track: 366 def __init__(self, parent, info): 367 self.parent = parent 368 self.info = info 369 self.default_sample_duration = 0 370 self.timescale = 0 371 self.moofs = [] 372 self.sample_counts = [] 373 self.segment_sizes = [] 374 self.segment_durations = [] 375 self.segment_scaled_durations = [] 376 self.segment_bitrates = [] 377 self.total_sample_count = 0 378 self.total_duration = 0 379 self.total_scaled_duration = 0 380 self.media_size = 0 381 self.average_segment_duration = 0 382 self.average_segment_bitrate = 0 383 self.max_segment_bitrate = 0 384 self.bandwidth = 0 385 self.language = '' 386 self.language_name = '' 387 self.order_index = 0 388 self.key_info = {} 389 self.id = info['id'] 390 if info['type'] == 'Audio': 391 self.type = 'audio' 392 elif info['type'] == 'Video': 393 self.type = 'video' 394 elif info['type'] == 'Subtitles': 395 self.type = 'subtitles' 396 else: 397 self.type = 'other' 398 399 sample_desc = info['sample_descriptions'][0] 400 401 self.codec_family = sample_desc['coding'] 402 if 'codecs_string' in sample_desc: 403 self.codec = sample_desc['codecs_string'] 404 else: 405 self.codec = self.codec_family 406 407 if self.type == 'video': 408 # set the scan type (hardcoded for now) 409 self.scan_type = 'progressive' 410 411 # set the width and height 412 self.width = sample_desc['width'] 413 self.height = sample_desc['height'] 414 415 # add dolby vision signaling if present 416 if 'dolby_vision' in sample_desc: 417 dv_info = sample_desc['dolby_vision'] 418 if sample_desc['coding'] in ['dvav', 'dva1', 'dvhe', 'dvh1']: 419 # non-backward-compatible 420 self.codec = sample_desc['coding'] + ('.%02d.%02d' % (dv_info['profile'], dv_info['level'])) 421 else: 422 # backward-compatible 423 coding_map = { 424 'avc1': 'dva1', 425 'avc3': 'dvav', 426 'hev1': 'dvhe', 427 'hvc1': 'dvh1' 428 } 429 dv_coding = coding_map.get(sample_desc['coding']) 430 if dv_coding: 431 dv_string = dv_coding + ('.%02d.%02d' % (dv_info['profile'], dv_info['level'])) 432 self.codec += ','+dv_string 433 434 if self.type == 'audio': 435 self.sample_rate = sample_desc['sample_rate'] 436 self.channels = sample_desc['channels'] 437 438 self.language = info['language'] 439 self.language_name = LanguageNames.get(LanguageCodeMap.get(self.language, 'und'), '') 440 441 def update(self, options): 442 # compute the total number of samples 443 self.total_sample_count = reduce(operator.add, self.sample_counts, 0) 444 445 # compute the total duration 446 self.total_duration = reduce(operator.add, self.segment_durations, 0) 447 self.total_scaled_duration = reduce(operator.add, self.segment_scaled_durations, 0) 448 449 # compute the average segment durations 450 segment_count = len(self.segment_durations) 451 if segment_count > 2: 452 # do not count the last two segments, which could be shorter 453 self.average_segment_duration = reduce(operator.add, self.segment_durations[:-2], 0)/float(segment_count-2) 454 elif segment_count > 0: 455 self.average_segment_duration = self.segment_durations[0] 456 else: 457 self.average_segment_duration = 0 458 459 # compute the average segment bitrates 460 self.media_size = reduce(operator.add, self.segment_sizes, 0) 461 if self.total_duration: 462 self.average_segment_bitrate = int(8.0*float(self.media_size)/self.total_duration) 463 464 # compute the max segment bitrates 465 if len(self.segment_bitrates) > 1: 466 self.max_segment_bitrate = max(self.segment_bitrates[:-1]) 467 else: 468 self.max_segment_bitrate = self.average_segment_bitrate 469 470 # compute the bandwidth 471 if options.min_buffer_time == 0.0: 472 options.min_buffer_time = self.average_segment_duration 473 self.bandwidth = ComputeBandwidth(options.min_buffer_time, self.segment_sizes, self.segment_durations) 474 475 if self.type == 'video': 476 # compute the frame rate 477 if self.total_duration: 478 self.frame_rate = self.total_sample_count / self.total_duration 479 self.frame_rate_ratio = str(fractions.Fraction(str(self.frame_rate)).limit_denominator(100000)) 480 else: 481 self.frame_rate = 0.0 482 self.frame_rate_ratio = "0" 483 484 def compute_kid(self): 485 moov = FilterChildren(self.parent.tree, 'moov')[0] 486 traks = FilterChildren(moov, 'trak') 487 for trak in traks: 488 tkhd = FindChild(trak, ['tkhd']) 489 if tkhd['id'] == self.id: 490 tenc = FindChild(trak, ('mdia', 'minf', 'stbl', 'stsd', 'encv', 'sinf', 'schi', 'tenc')) 491 if tenc is None: 492 tenc = FindChild(trak, ('mdia', 'minf', 'stbl', 'stsd', 'enca', 'sinf', 'schi', 'tenc')) 493 if tenc and 'default_KID' in tenc: 494 self.key_info['kid'] = tenc['default_KID'].strip('[]').replace(' ', '') 495 break 496 497 def __repr__(self): 498 return 'File '+str(self.parent.file_list_index)+'#'+str(self.id) 499 500class Mp4File: 501 def __init__(self, options, media_source): 502 self.media_source = media_source 503 self.info = media_source.mp4_info 504 self.tracks = {} 505 self.file_list_index = 0 # used to keep a sequence number just amongst all sources 506 507 filename = media_source.filename 508 if options.debug: 509 print('Processing MP4 file', filename) 510 511 # by default, the media name is the basename of the source file 512 self.media_name = path.basename(filename) 513 514 # walk the atom structure 515 self.atoms = WalkAtoms(filename) 516 self.segments = [] 517 for atom in self.atoms: 518 if atom.type == 'moov': 519 self.init_segment = atom 520 elif atom.type == 'moof': 521 self.segments.append([atom]) 522 else: 523 if self.segments: 524 self.segments[-1].append(atom) 525 #print self.segments 526 if options.debug: 527 print(' found', len(self.segments), 'segments') 528 529 for track in self.info['tracks']: 530 self.tracks[track['id']] = Mp4Track(self, track) 531 532 # get a complete file dump 533 json_dump = Mp4Dump(options, filename, format='json', verbosity='1') 534 self.tree = json.loads(json_dump, strict=False, object_pairs_hook=collections.OrderedDict) 535 536 # look for KIDs 537 for track in self.tracks.values(): 538 track.compute_kid() 539 540 # compute default sample durations and timescales 541 for atom in self.tree: 542 if atom['name'] == 'moov': 543 for c1 in atom['children']: 544 if c1['name'] == 'mvex': 545 for c2 in c1['children']: 546 if c2['name'] == 'trex': 547 self.tracks[c2['track id']].default_sample_duration = c2['default sample duration'] 548 elif c1['name'] == 'trak': 549 track_id = 0 550 for c2 in c1['children']: 551 if c2['name'] == 'tkhd': 552 track_id = c2['id'] 553 for c2 in c1['children']: 554 if c2['name'] == 'mdia': 555 for c3 in c2['children']: 556 if c3['name'] == 'mdhd': 557 self.tracks[track_id].timescale = c3['timescale'] 558 559 # partition the segments 560 segment_index = 0 561 track = None 562 segment_size = 0 563 segment_duration_sec = 0.0 564 for atom in self.tree: 565 segment_size += atom['size'] 566 if atom['name'] == 'moof': 567 segment_size = atom['size'] 568 trafs = FilterChildren(atom, 'traf') 569 if len(trafs) != 1: 570 PrintErrorAndExit('ERROR: unsupported input file, more than one "traf" box in fragment') 571 tfhd = FilterChildren(trafs[0], 'tfhd')[0] 572 track = self.tracks[tfhd['track ID']] 573 track.moofs.append(segment_index) 574 segment_duration = 0 575 default_sample_duration = tfhd.get('default sample duration', track.default_sample_duration) 576 for trun in FilterChildren(trafs[0], 'trun'): 577 track.sample_counts.append(trun['sample count']) 578 for entry in trun['entries']: 579 sample_duration = int(entry.get('d', default_sample_duration)) 580 segment_duration += sample_duration 581 track.segment_scaled_durations.append(segment_duration) 582 segment_duration_sec = float(segment_duration) / float(track.timescale) 583 track.segment_durations.append(segment_duration_sec) 584 segment_index += 1 585 586 # remove the 'trun' entries to save some memory 587 for traf in trafs: 588 traf['children'] = [x for x in traf['children'] if x['name'] != 'trun'] 589 elif atom['name'] == 'mdat': 590 # end of fragment on 'mdat' atom 591 if track: 592 track.segment_sizes.append(segment_size) 593 if segment_duration_sec > 0.0: 594 segment_bitrate = int((8.0 * float(segment_size)) / segment_duration_sec) 595 else: 596 segment_bitrate = 0 597 track.segment_bitrates.append(segment_bitrate) 598 segment_size = 0 599 600 # parse the 'mfra' index if there is one and update segment durations. 601 # this is needed to deal with input files that have an 'mfra' index that 602 # does not exactly match the sample durations (because of rounding errors), 603 # which will make the Smooth Streaming URL mapping fail since the IIS Smooth Streaming 604 # server uses the 'mfra' index to locate the segments in the source .ismv file 605 mfra = FindChild(self.tree, ['mfra']) 606 if mfra: 607 for tfra in FilterChildren(mfra, 'tfra'): 608 track_id = tfra['track_ID'] 609 if track_id not in self.tracks: 610 continue 611 track = self.tracks[track_id] 612 moof_pointers = [] 613 for (name, value) in list(tfra.items()): 614 if name.startswith('['): 615 attributes = value.split(',') 616 attribute_dict = {} 617 for attribute in attributes: 618 (attribute_name, attribute_value) = attribute.strip().split('=') 619 attribute_dict[attribute_name] = int(attribute_value) 620 if attribute_dict['traf_number'] == 1 and attribute_dict['trun_number'] == 1 and attribute_dict['sample_number'] == 1: 621 # this points to the first sample of the first trun of the first traf, use it as a start time indication 622 moof_pointers.append(attribute_dict) 623 if len(moof_pointers) > 1: 624 for i in range(len(moof_pointers)-1): 625 if i+1 >= len(track.moofs): 626 break 627 628 moof1 = self.segments[track.moofs[i]][0] 629 moof2 = self.segments[track.moofs[i+1]][0] 630 if moof1.position == moof_pointers[i]['moof_offset'] and moof2.position == moof_pointers[i+1]['moof_offset']: 631 # pointers match two consecutive moofs 632 moof_duration = moof_pointers[i+1]['time'] - moof_pointers[i]['time'] 633 moof_duration_sec = float(moof_duration) / float(track.timescale) 634 track.segment_durations[i] = moof_duration_sec 635 track.segment_scaled_durations[i] = moof_duration 636 637 # compute the total numer of samples for each track 638 for track_id in self.tracks: 639 self.tracks[track_id].update(options) 640 641 # print debug info if requested 642 if options.debug: 643 for track in self.tracks.values(): 644 print('Track ID =', track.id) 645 print(' Segment Count =', len(track.segment_durations)) 646 print(' Type =', track.type) 647 print(' Sample Count =', track.total_sample_count) 648 print(' Average segment bitrate =', track.average_segment_bitrate) 649 print(' Max segment bitrate =', track.max_segment_bitrate) 650 print(' Required bandwidth =', int(track.bandwidth)) 651 print(' Average segment duration =', track.average_segment_duration) 652 653 def find_track_by_id(self, track_id_to_find): 654 for track_id in self.tracks: 655 if track_id_to_find == 0 or track_id_to_find == track_id: 656 return self.tracks[track_id] 657 658 return None 659 660 def find_tracks_by_type(self, track_type_to_find): 661 return [track for track in list(self.tracks.values()) if track_type_to_find == '' or track_type_to_find == track.type] 662 663class MediaSource: 664 def __init__(self, options, name): 665 self.name = name 666 self.mp4_info = None 667 self.key_infos = {} # key infos indexed by track ID 668 if name.startswith('[') and ']' in name: 669 try: 670 params = name[1:name.find(']')] 671 self.filename = name[2+len(params):] 672 self.spec = dict([x.split('=') for x in params.split(',')]) 673 for int_param in ['track']: 674 if int_param in self.spec: self.spec[int_param] = int(self.spec[int_param]) 675 except: 676 raise Exception('Invalid syntax for media file spec "'+name+'"') 677 else: 678 self.filename = name 679 self.spec = {} 680 681 if 'type' not in self.spec: self.spec['type'] = '' 682 if 'track' not in self.spec: self.spec['track'] = 0 683 if 'language' not in self.spec: self.spec['language'] = '' 684 685 # check if we have an explicit format (default=mp4) 686 if '+format' in self.spec: 687 self.format = self.spec['+format'] 688 else: 689 self.format = 'mp4' 690 691 # if the file is an mp4 file, get the mp4 info now 692 if self.format == 'mp4': 693 json_info = Mp4Info(options, self.filename, format='json', fast=True) 694 self.mp4_info = json.loads(json_info, strict=False) 695 696 # keep a record of our original filename in case it gets changed later 697 self.original_filename = self.filename 698 699 def __repr__(self): 700 return self.name 701 702def ComputeBandwidth(buffer_time, sizes, durations): 703 bandwidth = 0.0 704 for i in range(len(sizes)): 705 accu_size = 0 706 accu_duration = 0 707 buffer_size = (buffer_time*bandwidth)/8.0 708 for j in range(i, len(sizes)): 709 accu_size += sizes[j] 710 accu_duration += durations[j] 711 max_avail = buffer_size+accu_duration*bandwidth/8.0 712 if accu_size > max_avail and accu_duration != 0: 713 bandwidth = 8.0*(accu_size-buffer_size)/accu_duration 714 break 715 return int(bandwidth) 716 717def MakeNewDir(dir, exit_if_exists=False, severity=None, recursive=False): 718 if path.exists(dir): 719 if severity: 720 sys.stderr.write(severity+': ') 721 sys.stderr.write('directory "'+dir+'" already exists\n') 722 if exit_if_exists: 723 sys.exit(1) 724 elif recursive: 725 os.makedirs(dir) 726 else: 727 os.mkdir(dir) 728 729def MakePsshBox(system_id, payload): 730 pssh_size = 12+16+4+len(payload) 731 return struct.pack('>I', pssh_size)+b'pssh'+struct.pack('>I',0)+system_id+struct.pack('>I', len(payload))+payload 732 733def MakePsshBoxV1(system_id, kids, payload): 734 pssh_size = 12+16+4+(16*len(kids))+4+len(payload) 735 pssh = struct.pack('>I', pssh_size)+b'pssh'+struct.pack('>I',0x01000000)+system_id+struct.pack('>I', len(kids)) 736 for kid in kids: 737 pssh += bytes.fromhex(kid) 738 pssh += struct.pack('>I', len(payload))+payload 739 return pssh 740 741def GetEncryptionKey(options, spec): 742 if options.debug: 743 print('Resolving KID and Key from spec:', spec) 744 if spec.startswith('skm:'): 745 import skm 746 return skm.ResolveKey(options, spec[4:]) 747 else: 748 raise Exception('Key Locator scheme not supported') 749 750# Compute the Dolby Digital AudioChannelConfiguration value 751# 752# (MSB = 0) 753# 0 L 754# 1 C 755# 2 R 756# 3 Ls 757# 4 Rs 758# 5 Lc/Rc pair 759# 6 Lrs/Rrs pair 760# 7 Cs 761# 8 Ts 762# 9 Lsd/Rsd pair 763# 10 Lw/Rw pair 764# 11 Vhl/Vhr pair 765# 12 Vhc 766# 13 Lts/Rts pair 767# 14 LFE2 768# 15 LFE 769# 770# Using acmod 771# 000 Ch1, Ch2 772# 001 C 773# 010 L, R 774# 011 L, C, R 775# 100 L, R, S 776# 101 L, C, R, S 777# 110 L, R, SL, SR 778# 111 L, C, R, SL, SR 779# 780# chan_loc 781# 0 Lc/Rc pair 782# 1 Lrs/Rrs pair 783# 2 Cs 784# 3 Ts 785# 4 Lsd/Rsd pair 786# 5 Lw/Rw pair 787# 6 Lvh/Rvh pair 788# 7 Cvh 789# 8 LFE2 790# 791# The Digital Cinema specification, which is also referenced from the 792# Blu-ray Disc Specification, Specifies this speaker layout: 793# 794# +---+ +---+ +---+ 795# |Vhl| |Vhc| |Vhr| "High" speakers 796# +---+ +---+ +---+ 797# +---+ +---+ +---+ +---+ +---+ +---+ +---+ 798# |Lw | | L | |Lc | | C | |Rc | | R | |Rw | 799# +---+ +---+ +---+ +---+ +---+ +---+ +---+ 800# +----+ +----+ 801# |LFE1] |LFE2| 802# +---+ +----++---++----+ +---+ 803# |Ls | |Ts | |Rs | 804# +---+ +---+ +---+ 805# 806# +---+ +---+ 807# |Lsd| |Rsd| 808# +---+ +---+ +---+ +---+ +---+ 809# |Rls| |Cs | |Rrs| 810# +---+ +---+ +---+ 811# 812# Other names: 813# Constant | HDMI | Digital Cinema | DTS extension 814# ==============================|================|============== 815# FRONT_LEFT | FL | L | L 816# FRONT_RIGHT | FR | R | R 817# FRONT_CENTER | FC | C | C 818# LOW_FREQUENCY | LFE | LFE | LFE 819# BACK_LEFT | (RLC) | Rls | Lsr 820# BACK_RIGHT | (RRC) | Rrs | Rsr 821# FRONT_LEFT_OF_CENTER | FLC | Lc | Lc 822# FRONT_RIGHT_OF_CENTER | FRC | Rc | Rc 823# BACK_CENTER | RC | Cs | Cs 824# SIDE_LEFT | (RL) | Ls | Lss 825# SIDE_RIGHT | (RR) | Rs | Rss 826# TOP_CENTER | TC | Ts | Oh 827# TOP_FRONT_LEFT | FLH | Vhl | Lh 828# TOP_FRONT_CENTER | FCH | Vhc | Ch 829# TOP_FRONT_RIGHT | FRH | Vhr | Rh 830# TOP_BACK_LEFT | | | Chr 831# TOP_BACK_CENTER | | | Lhr 832# TOP_BACK_RIGHT | | | Rhr 833# STEREO_LEFT | | | 834# STEREO_RIGHT | | | 835# WIDE_LEFT | FLW | Lw | Lw 836# WIDE_RIGHT | FRW | Rw | Rw 837# SURROUND_DIRECT_LEFT | | Lsd | Ls 838# SURROUND_DIRECT_RIGHT | | Rsd | Rs 839 840DolbyDigital_chan_loc = { 841 0: 'Lc/Rc', 842 1: 'Lrs/Rrs', 843 2: 'Cs', 844 3: 'Ts', 845 4: 'Lsd/Rsd', 846 5: 'Lw/Rw', 847 6: 'Vhl/Vhr', 848 7: 'Vhc', 849 8: 'LFE2' 850} 851 852DolbyDigital_acmod = { 853 0: ['L', 'R'], # in theory this is not supported but we'll pick a reasonable value 854 1: ['C'], 855 2: ['L', 'R'], 856 3: ['L', 'C', 'R'], 857 4: ['L', 'R', 'Cs'], 858 5: ['L', 'C', 'R', 'Cs'], 859 6: ['L', 'R', 'Ls', 'Rs'], 860 7: ['L', 'C', 'R', 'Ls', 'Rs'] 861} 862 863def GetDolbyDigitalPlusChannels(track): 864 sample_desc = track.info['sample_descriptions'][0] 865 if 'dolby_digital_info' not in sample_desc: 866 return (track.channels, []) 867 dd_info = sample_desc['dolby_digital_info']['substreams'][0] 868 channels = DolbyDigital_acmod[dd_info['acmod']][:] 869 if dd_info['lfeon'] == 1: 870 channels.append('LFE') 871 if dd_info['num_dep_sub'] and 'chan_loc' in dd_info: 872 chan_loc_value = dd_info['chan_loc'] 873 for i in range(9): 874 if chan_loc_value & (1<<i): 875 channels.append(DolbyDigital_chan_loc[i]) 876 channel_count = 0 877 for channel in channels: 878 if '/' in channel: 879 channel_count += 2 880 else: 881 channel_count += 1 882 return (channel_count, channels) 883 884def ComputeDolbyDigitalPlusAudioChannelConfig(track): 885 flags = { 886 'L': 1<<15, 887 'C': 1<<14, 888 'R': 1<<13, 889 'Ls': 1<<12, 890 'Rs': 1<<11, 891 'Lc/Rc': 1<<10, 892 'Lrs/Rrs': 1<<9, 893 'Cs': 1<<8, 894 'Ts': 1<<7, 895 'Lsd/Rsd': 1<<6, 896 'Lw/Rw': 1<<5, 897 'Vhl/Vhr': 1<<4, 898 'Vhc': 1<<3, 899 'Lts/Rts': 1<<2, 900 'LFE2': 1<<1, 901 'LFE': 1<<0 902 } 903 (channel_count, channels) = GetDolbyDigitalPlusChannels(track) 904 if not channels: 905 return str(channel_count) 906 config = 0 907 for channel in channels: 908 if channel in flags: 909 config |= flags[channel] 910 return hex(config).upper()[2:] 911 912def ComputeDolbyAc4AudioChannelConfig(track): 913 sample_desc = track.info['sample_descriptions'][0] 914 if 'dolby_ac4_info' in sample_desc: 915 dolby_ac4_info = sample_desc['dolby_ac4_info'] 916 if 'presentations' in dolby_ac4_info and dolby_ac4_info['presentations']: 917 presentation = dolby_ac4_info['presentations'][0] 918 if 'presentation_channel_mask_v1' in presentation: 919 return '%06x' % presentation['presentation_channel_mask_v1'] 920 921 return '000000' 922 923def ComputeDolbyDigitalPlusAudioChannelMask(track): 924 masks = { 925 'L': 0x1, # SPEAKER_FRONT_LEFT 926 'R': 0x2, # SPEAKER_FRONT_RIGHT 927 'C': 0x4, # SPEAKER_FRONT_CENTER 928 'LFE': 0x8, # SPEAKER_LOW_FREQUENCY 929 'Ls': 0x10, # SPEAKER_BACK_LEFT 930 'Rs': 0x20, # SPEAKER_BACK_RIGHT 931 'Lc': 0x40, # SPEAKER_FRONT_LEFT_OF_CENTER 932 'Rc': 0x80, # SPEAKER_FRONT_RIGHT_OF_CENTER 933 'Cs': 0x100, # SPEAKER_BACK_CENTER 934 'Lrs': 0x200, # SPEAKER_SIDE_LEFT 935 'Rrs': 0x400, # SPEAKER_SIDE_RIGHT 936 'Ts': 0x800, # SPEAKER_TOP_CENTER 937 'Vhl/Vhr': 0x1000 | 0x4000, # SPEAKER_TOP_FRONT_LEFT/SPEAKER_TOP_FRONT_RIGHT 938 'Vhc': 0x2000, # SPEAKER_TOP_FRONT_CENTER 939 } 940 (channel_count, channels) = GetDolbyDigitalPlusChannels(track) 941 if not channels: 942 return (channel_count, 3) 943 channel_mask = 0 944 for channel in channels: 945 if channel in masks: 946 channel_mask |= masks[channel] 947 else: 948 (channel1, channel2) = channel.split('/') 949 if channel1 in masks: 950 channel_mask |= masks[channel1] 951 if channel2 in masks: 952 channel_mask |= masks[channel2] 953 return (channel_count, channel_mask) 954 955def ComputeDolbyDigitalPlusSmoothStreamingInfo(track): 956 (channel_count, channel_mask) = ComputeDolbyDigitalPlusAudioChannelMask(track) 957 info = "0006" # 1536 in little-endian 958 mask_hex_be = "{0:0{1}x}".format(channel_mask, 4) 959 info += mask_hex_be[2:4]+mask_hex_be[0:2]+'0000' 960 info += "af87fba7022dfb42a4d405cd93843bdd" 961 info += track.info['sample_descriptions'][0]['dolby_digital_info']['dec3_payload'] 962 return (channel_count, info.lower()) 963 964def ComputeMarlinPssh(options): 965 # create a dummy (empty) Marlin PSSH 966 return struct.pack('>I4sI4sII', 24, b'marl', 16, b'mkid', 0, 0) 967 968def DerivePlayReadyKey(seed, kid, swap=True): 969 if len(seed) < 30: 970 raise Exception('seed must be >= 30 bytes') 971 if len(kid) != 16: 972 raise Exception('kid must be 16 bytes') 973 974 if swap: 975 kid = kid[3:4]+kid[2:3]+kid[1:2]+kid[0:1]+kid[5:6]+kid[4:5]+kid[7:8]+kid[6:7]+kid[8:] 976 977 seed = seed[:30] 978 979 sha = hashlib.sha256() 980 sha.update(seed) 981 sha.update(kid) 982 sha_A = sha.digest() 983 984 sha = hashlib.sha256() 985 sha.update(seed) 986 sha.update(kid) 987 sha.update(seed) 988 sha_B = sha.digest() 989 990 sha = hashlib.sha256() 991 sha.update(seed) 992 sha.update(kid) 993 sha.update(seed) 994 sha.update(kid) 995 sha_C = sha.digest() 996 997 content_key = bytes([sha_A[i] ^ sha_A[i+16] ^ sha_B[i] ^ sha_B[i+16] ^ sha_C[i] ^ sha_C[i+16] for i in range(16)]) 998 999 return content_key 1000 1001def ComputePlayReadyChecksum(kid, key): 1002 import aes 1003 return aes.rijndael(key).encrypt(kid)[:8] 1004 1005def WrapPlayReadyHeaderXml(header_xml): 1006 # encode the XML header into UTF-16 little-endian 1007 header_utf16_le = header_xml.encode('utf-16-le') 1008 rm_record = struct.pack('<HH', 1, len(header_utf16_le))+header_utf16_le 1009 return struct.pack('<IH', len(rm_record)+6, 1)+rm_record 1010 1011def ComputePlayReadyKeyInfo(key_spec): 1012 (kid_hex, key_hex) = key_spec 1013 kid = bytes.fromhex(kid_hex) 1014 kid = kid[3:4]+kid[2:3]+kid[1:2]+kid[0:1]+kid[5:6]+kid[4:5]+kid[7:8]+kid[6:7]+kid[8:] 1015 1016 xml_key_checksum = None 1017 if key_hex: 1018 xml_key_checksum = Base64Encode(ComputePlayReadyChecksum(kid, bytes.fromhex(key_hex))) 1019 1020 return (Base64Encode(kid), xml_key_checksum) 1021 1022def ComputePlayReadyXmlKid(key_spec, algid): 1023 (xml_kid, xml_key_checksum) = ComputePlayReadyKeyInfo(key_spec) 1024 1025 # optional checkum 1026 xml_key_checksum_attribute = '' 1027 if xml_key_checksum: 1028 xml_key_checksum_attribute = 'CHECKSUM="' + xml_key_checksum + '" ' 1029 1030 # KID 1031 return '<KID ALGID="%s" %sVALUE="%s"></KID>' % (algid, xml_key_checksum_attribute, xml_kid) 1032 1033def ComputePlayReadyHeader(version, header_spec, encryption_scheme, key_specs): 1034 # map the encryption scheme and an algorithm ID 1035 scheme_to_id_map = { 1036 'cenc': 'AESCTR', 1037 'cens': 'AESCTR', 1038 'cbc1': 'AESCBC', 1039 'cbcs': 'AESCBC' 1040 } 1041 if encryption_scheme not in scheme_to_id_map: 1042 raise Exception('Encryption scheme not supported by PlayReady') 1043 algid = scheme_to_id_map[encryption_scheme] 1044 1045 # check that the algorithm is supported 1046 if algid == 'AESCBC' and version < "4.3": 1047 raise Exception('AESCBC requires PlayReady 4.3 or higher') 1048 1049 # construct the base64 header 1050 if header_spec is None: 1051 header_spec = '' 1052 if header_spec.startswith('#'): 1053 header_b64 = header_spec[1:] 1054 header = Base64Decode(header_b64) 1055 if not header: 1056 raise Exception('invalid base64 encoding') 1057 return header 1058 elif header_spec.startswith('@') or path.exists(header_spec): 1059 # check that the file exists 1060 if header_spec.startswith('@'): 1061 header_spec = header_spec[1:] 1062 if not path.exists(header_spec): 1063 raise Exception('header data file does not exist') 1064 1065 # read the header from the file 1066 header = open(header_spec, 'rb').read() 1067 header_xml = None 1068 if (header[0] == 0xff and header[1] == 0xfe) or (header[0] == 0xfe and header[1] == 0xff): 1069 # this is UTF-16 XML 1070 header_xml = header.decode('utf-16') 1071 elif header[0] == '<' and header[1] != 0x00: 1072 # this is ASCII or UTF-8 XML 1073 header_xml = header.decode('utf-8') 1074 elif header[0] == '<' and header[1] == 0x00: 1075 # this UTF-16LE XML without charset header 1076 header_xml = header.decode('utf-16-le') 1077 if header_xml is not None: 1078 header = WrapPlayReadyHeaderXml(header_xml) 1079 return header 1080 else: 1081 try: 1082 pairs = header_spec.split('#') 1083 fields = {} 1084 for pair in pairs: 1085 if not pair: continue 1086 name, value = pair.split(':', 1) 1087 fields[name] = value 1088 except: 1089 raise Exception('invalid syntax for argument') 1090 1091 xml_protectinfo = '' 1092 xml_extras = '' 1093 if version == "4.0": 1094 # 4.0 version only 1095 if len(key_specs) != 1: 1096 raise Exception("PlayReady 4.0 only supports 1 key") 1097 1098 (xml_kid, xml_key_checksum) = ComputePlayReadyKeyInfo(key_specs[0]) 1099 xml_protectinfo = '<KEYLEN>16</KEYLEN><ALGID>AESCTR</ALGID>' 1100 xml_extras = '<KID>' + xml_kid +'</KID>' 1101 if xml_key_checksum: 1102 xml_extras += '<CHECKSUM>' + xml_key_checksum + '</CHECKSUM>' 1103 else: 1104 # 4.1 and above 1105 if version == "4.1": 1106 # 4.1 only 1107 if len(key_specs) != 1: 1108 raise Exception("PlayReady 4.1 only supports 1 key") 1109 1110 # single KID 1111 xml_protectinfo += ComputePlayReadyXmlKid(key_specs[0], algid) 1112 else: 1113 # 4.2 and above 1114 1115 # list of KIDS 1116 xml_protectinfo += '<KIDS>' 1117 for key_spec in key_specs: 1118 xml_protectinfo += ComputePlayReadyXmlKid(key_spec, algid) 1119 xml_protectinfo += '</KIDS>' 1120 1121 header_xml = '<WRMHEADER xmlns="http://schemas.microsoft.com/DRM/2007/03/PlayReadyHeader" version="%s.0.0"><DATA>' % version 1122 header_xml += '<PROTECTINFO>' + xml_protectinfo + '</PROTECTINFO>' 1123 header_xml += xml_extras 1124 if 'CUSTOMATTRIBUTES' in fields: 1125 header_xml += '<CUSTOMATTRIBUTES>'+Base64Decode(fields['CUSTOMATTRIBUTES']).decode('utf-8').replace('\n', '')+'</CUSTOMATTRIBUTES>' 1126 if 'LA_URL' in fields: 1127 header_xml += '<LA_URL>'+saxutils.escape(fields['LA_URL'])+'</LA_URL>' 1128 if 'LUI_URL' in fields: 1129 header_xml += '<LUI_URL>'+saxutils.escape(fields['LUI_URL'])+'</LUI_URL>' 1130 if 'DS_ID' in fields: 1131 header_xml += '<DS_ID>'+saxutils.escape(fields['DS_ID'])+'</DS_ID>' 1132 1133 header_xml += '</DATA></WRMHEADER>' 1134 return WrapPlayReadyHeaderXml(header_xml) 1135 1136def ComputePrimetimeMetaData(metadata_spec, kid_hex): 1137 # construct the base64 header 1138 if metadata_spec is None: 1139 metadata_spec = '' 1140 if metadata_spec.startswith('#'): 1141 metadata_b64 = metadata_spec[1:] 1142 metadata = Base64Decode(metadata_b64) 1143 if not metadata: 1144 raise Exception('invalid base64 encoding') 1145 elif metadata_spec.startswith('@'): 1146 metadata_filename = metadata_spec[1:] 1147 if not path.exists(metadata_filename): 1148 raise Exception('data file does not exist') 1149 1150 # read the header from the file 1151 metadata = open(metadata_filename, 'rb').read() 1152 1153 amet_size = 12+4+16 1154 amet_flags = 0 1155 if metadata: 1156 amet_flags |= 2 1157 amet_size += 4+len(metadata) 1158 amet_box = struct.pack('>I4sII', amet_size, 'amet', amet_flags, 1) + bytes.fromhex(kid_hex) 1159 if metadata: 1160 amet_box += struct.pack('>I', len(metadata))+metadata 1161 1162 return amet_box 1163 1164def WidevineVarInt(value): 1165 parts = [value % 128] 1166 value >>= 7 1167 while value: 1168 parts.append(value % 128) 1169 value >>= 7 1170 for i in range(len(parts) - 1): 1171 parts[i] |= (1<<7) 1172 return bytes(parts) 1173 1174def WidevineMakeHeader(fields): 1175 buffer = b'' 1176 for (field_num, field_val) in fields: 1177 if type(field_val) == int: 1178 wire_type = 0 # varint 1179 wire_val = WidevineVarInt(field_val) 1180 elif type(field_val) == str: 1181 wire_type = 2 1182 wire_val = WidevineVarInt(len(field_val)) + field_val.encode('ascii') 1183 elif type(field_val) == bytes: 1184 wire_type = 2 1185 wire_val = WidevineVarInt(len(field_val)) + field_val 1186 buffer += bytes([(field_num << 3) | wire_type]) + wire_val 1187 return buffer 1188 1189def ComputeWidevineHeader(header_spec, encryption_scheme, kid_hex): 1190 try: 1191 pairs = header_spec.split('#') 1192 fields = {} 1193 for pair in pairs: 1194 name, value = pair.split(':', 1) 1195 fields[name] = value 1196 except: 1197 raise Exception('invalid syntax for argument') 1198 1199 protobuf_fields = [(2, bytes.fromhex(kid_hex))] 1200 if 'provider' in fields: 1201 protobuf_fields.append((3, fields['provider'])) 1202 if 'content_id' in fields: 1203 protobuf_fields.append((4, bytes.fromhex(fields['content_id']))) 1204 if 'policy' in fields: 1205 protobuf_fields.append((6, fields['policy'])) 1206 1207 if encryption_scheme == 'cenc': 1208 protobuf_fields.append((1, 1)) 1209 1210 four_cc = struct.unpack('>I', encryption_scheme.encode('ascii'))[0] 1211 protobuf_fields.append((9, four_cc)) 1212 1213 return WidevineMakeHeader(protobuf_fields) 1214 1215############################################# 1216# Module Exports 1217############################################# 1218__all__ = [ 1219 'LanguageCodeMap', 1220 'LanguageNames', 1221 'PrintErrorAndExit', 1222 'XmlDuration', 1223 'Base64Encode', 1224 'Base64Decode', 1225 'Bento4Command', 1226 'Mp4Info', 1227 'Mp4Dump', 1228 'Mp4Split', 1229 'Mp4Fragment', 1230 'Mp4Encrypt', 1231 'Mp42Hls', 1232 'Mp4IframeIndex', 1233 'WalkAtoms', 1234 'Mp4Track', 1235 'Mp4File', 1236 'MediaSource', 1237 'ComputeBandwidth', 1238 'MakeNewDir', 1239 'MakePsshBox', 1240 'MakePsshBoxV1', 1241 'GetEncryptionKey', 1242 'GetDolbyDigitalPlusChannels', 1243 'ComputeDolbyDigitalPlusAudioChannelConfig', 1244 'ComputeDolbyAc4AudioChannelConfig', 1245 'ComputeDolbyDigitalPlusSmoothStreamingInfo', 1246 'ComputeMarlinPssh', 1247 'DerivePlayReadyKey', 1248 'ComputePlayReadyHeader', 1249 'ComputePrimetimeMetaData', 1250 'ComputeWidevineHeader' 1251] 1252