1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "AppleUtils.h"
8 #include "MP4Decoder.h"
9 #include "mp4_demuxer/Adts.h"
10 #include "MediaInfo.h"
11 #include "AppleATDecoder.h"
12 #include "mozilla/Logging.h"
13 #include "mozilla/SyncRunnable.h"
14 #include "mozilla/UniquePtr.h"
15
16 #define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
17 #define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
18
19 namespace mozilla {
20
AppleATDecoder(const AudioInfo & aConfig,TaskQueue * aTaskQueue,MediaDataDecoderCallback * aCallback)21 AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
22 TaskQueue* aTaskQueue,
23 MediaDataDecoderCallback* aCallback)
24 : mConfig(aConfig)
25 , mFileStreamError(false)
26 , mTaskQueue(aTaskQueue)
27 , mCallback(aCallback)
28 , mConverter(nullptr)
29 , mStream(nullptr)
30 , mIsFlushing(false)
31 , mParsedFramesForAACMagicCookie(0)
32 , mErrored(false)
33 {
34 MOZ_COUNT_CTOR(AppleATDecoder);
35 LOG("Creating Apple AudioToolbox decoder");
36 LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
37 mConfig.mMimeType.get(),
38 mConfig.mRate,
39 mConfig.mChannels,
40 mConfig.mBitDepth);
41
42 if (mConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
43 mFormatID = kAudioFormatMPEGLayer3;
44 } else if (mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
45 mFormatID = kAudioFormatMPEG4AAC;
46 } else {
47 mFormatID = 0;
48 }
49 }
50
~AppleATDecoder()51 AppleATDecoder::~AppleATDecoder()
52 {
53 MOZ_COUNT_DTOR(AppleATDecoder);
54 MOZ_ASSERT(!mConverter);
55 }
56
57 RefPtr<MediaDataDecoder::InitPromise>
Init()58 AppleATDecoder::Init()
59 {
60 if (!mFormatID) {
61 NS_ERROR("Non recognised format");
62 return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
63 }
64
65 return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
66 }
67
68 void
Input(MediaRawData * aSample)69 AppleATDecoder::Input(MediaRawData* aSample)
70 {
71 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
72 LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
73 aSample,
74 aSample->mDuration,
75 aSample->mTime,
76 aSample->mKeyframe ? " keyframe" : "",
77 (unsigned long long)aSample->Size());
78
79 // Queue a task to perform the actual decoding on a separate thread.
80 nsCOMPtr<nsIRunnable> runnable =
81 NewRunnableMethod<RefPtr<MediaRawData>>(
82 this,
83 &AppleATDecoder::SubmitSample,
84 RefPtr<MediaRawData>(aSample));
85 mTaskQueue->Dispatch(runnable.forget());
86 }
87
88 void
ProcessFlush()89 AppleATDecoder::ProcessFlush()
90 {
91 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
92 mQueuedSamples.Clear();
93 if (mConverter) {
94 OSStatus rv = AudioConverterReset(mConverter);
95 if (rv) {
96 LOG("Error %d resetting AudioConverter", rv);
97 }
98 }
99 if (mErrored) {
100 mParsedFramesForAACMagicCookie = 0;
101 mMagicCookie.Clear();
102 ProcessShutdown();
103 mErrored = false;
104 }
105 }
106
107 void
Flush()108 AppleATDecoder::Flush()
109 {
110 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
111 LOG("Flushing AudioToolbox AAC decoder");
112 mIsFlushing = true;
113 nsCOMPtr<nsIRunnable> runnable =
114 NewRunnableMethod(this, &AppleATDecoder::ProcessFlush);
115 SyncRunnable::DispatchToThread(mTaskQueue, runnable);
116 mIsFlushing = false;
117 }
118
119 void
Drain()120 AppleATDecoder::Drain()
121 {
122 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
123 LOG("Draining AudioToolbox AAC decoder");
124 mTaskQueue->AwaitIdle();
125 mCallback->DrainComplete();
126 Flush();
127 }
128
129 void
Shutdown()130 AppleATDecoder::Shutdown()
131 {
132 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
133 nsCOMPtr<nsIRunnable> runnable =
134 NewRunnableMethod(this, &AppleATDecoder::ProcessShutdown);
135 SyncRunnable::DispatchToThread(mTaskQueue, runnable);
136 }
137
138 void
ProcessShutdown()139 AppleATDecoder::ProcessShutdown()
140 {
141 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
142
143 if (mStream) {
144 OSStatus rv = AudioFileStreamClose(mStream);
145 if (rv) {
146 LOG("error %d disposing of AudioFileStream", rv);
147 return;
148 }
149 mStream = nullptr;
150 }
151
152 if (mConverter) {
153 LOG("Shutdown: Apple AudioToolbox AAC decoder");
154 OSStatus rv = AudioConverterDispose(mConverter);
155 if (rv) {
156 LOG("error %d disposing of AudioConverter", rv);
157 }
158 mConverter = nullptr;
159 }
160 }
161
162 struct PassthroughUserData {
163 UInt32 mChannels;
164 UInt32 mDataSize;
165 const void* mData;
166 AudioStreamPacketDescription mPacket;
167 };
168
169 // Error value we pass through the decoder to signal that nothing
170 // has gone wrong during decoding and we're done processing the packet.
171 const uint32_t kNoMoreDataErr = 'MOAR';
172
173 static OSStatus
_PassthroughInputDataCallback(AudioConverterRef aAudioConverter,UInt32 * aNumDataPackets,AudioBufferList * aData,AudioStreamPacketDescription ** aPacketDesc,void * aUserData)174 _PassthroughInputDataCallback(AudioConverterRef aAudioConverter,
175 UInt32* aNumDataPackets /* in/out */,
176 AudioBufferList* aData /* in/out */,
177 AudioStreamPacketDescription** aPacketDesc,
178 void* aUserData)
179 {
180 PassthroughUserData* userData = (PassthroughUserData*)aUserData;
181 if (!userData->mDataSize) {
182 *aNumDataPackets = 0;
183 return kNoMoreDataErr;
184 }
185
186 if (aPacketDesc) {
187 userData->mPacket.mStartOffset = 0;
188 userData->mPacket.mVariableFramesInPacket = 0;
189 userData->mPacket.mDataByteSize = userData->mDataSize;
190 *aPacketDesc = &userData->mPacket;
191 }
192
193 aData->mBuffers[0].mNumberChannels = userData->mChannels;
194 aData->mBuffers[0].mDataByteSize = userData->mDataSize;
195 aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
196
197 // No more data to provide following this run.
198 userData->mDataSize = 0;
199
200 return noErr;
201 }
202
203 void
SubmitSample(MediaRawData * aSample)204 AppleATDecoder::SubmitSample(MediaRawData* aSample)
205 {
206 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
207
208 if (mIsFlushing) {
209 return;
210 }
211
212 MediaResult rv = NS_OK;
213 if (!mConverter) {
214 rv = SetupDecoder(aSample);
215 if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
216 mCallback->Error(rv);
217 return;
218 }
219 }
220
221 mQueuedSamples.AppendElement(aSample);
222
223 if (rv == NS_OK) {
224 for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
225 rv = DecodeSample(mQueuedSamples[i]);
226 if (NS_FAILED(rv)) {
227 mErrored = true;
228 mCallback->Error(rv);
229 return;
230 }
231 }
232 mQueuedSamples.Clear();
233 }
234 mCallback->InputExhausted();
235 }
236
237 MediaResult
DecodeSample(MediaRawData * aSample)238 AppleATDecoder::DecodeSample(MediaRawData* aSample)
239 {
240 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
241
242 // Array containing the queued decoded audio frames, about to be output.
243 nsTArray<AudioDataValue> outputData;
244 UInt32 channels = mOutputFormat.mChannelsPerFrame;
245 // Pick a multiple of the frame size close to a power of two
246 // for efficient allocation.
247 const uint32_t MAX_AUDIO_FRAMES = 128;
248 const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;
249
250 // Descriptions for _decompressed_ audio packets. ignored.
251 auto packets = MakeUnique<AudioStreamPacketDescription[]>(MAX_AUDIO_FRAMES);
252
253 // This API insists on having packets spoon-fed to it from a callback.
254 // This structure exists only to pass our state.
255 PassthroughUserData userData =
256 { channels, (UInt32)aSample->Size(), aSample->Data() };
257
258 // Decompressed audio buffer
259 AlignedAudioBuffer decoded(maxDecodedSamples);
260 if (!decoded) {
261 return NS_ERROR_OUT_OF_MEMORY;
262 }
263
264 do {
265 AudioBufferList decBuffer;
266 decBuffer.mNumberBuffers = 1;
267 decBuffer.mBuffers[0].mNumberChannels = channels;
268 decBuffer.mBuffers[0].mDataByteSize =
269 maxDecodedSamples * sizeof(AudioDataValue);
270 decBuffer.mBuffers[0].mData = decoded.get();
271
272 // in: the max number of packets we can handle from the decoder.
273 // out: the number of packets the decoder is actually returning.
274 UInt32 numFrames = MAX_AUDIO_FRAMES;
275
276 OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
277 _PassthroughInputDataCallback,
278 &userData,
279 &numFrames /* in/out */,
280 &decBuffer,
281 packets.get());
282
283 if (rv && rv != kNoMoreDataErr) {
284 LOG("Error decoding audio sample: %d\n", rv);
285 return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
286 RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
287 rv, aSample->mTime));
288 }
289
290 if (numFrames) {
291 outputData.AppendElements(decoded.get(), numFrames * channels);
292 }
293
294 if (rv == kNoMoreDataErr) {
295 break;
296 }
297 } while (true);
298
299 if (outputData.IsEmpty()) {
300 return NS_OK;
301 }
302
303 size_t numFrames = outputData.Length() / channels;
304 int rate = mOutputFormat.mSampleRate;
305 media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
306 if (!duration.IsValid()) {
307 NS_WARNING("Invalid count of accumulated audio samples");
308 return MediaResult(
309 NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
310 RESULT_DETAIL(
311 "Invalid count of accumulated audio samples: num:%llu rate:%d",
312 uint64_t(numFrames), rate));
313 }
314
315 #ifdef LOG_SAMPLE_DECODE
316 LOG("pushed audio at time %lfs; duration %lfs\n",
317 (double)aSample->mTime / USECS_PER_S,
318 duration.ToSeconds());
319 #endif
320
321 AudioSampleBuffer data(outputData.Elements(), outputData.Length());
322 if (!data.Data()) {
323 return NS_ERROR_OUT_OF_MEMORY;
324 }
325 if (mChannelLayout && !mAudioConverter) {
326 AudioConfig in(*mChannelLayout.get(), rate);
327 AudioConfig out(channels, rate);
328 if (!in.IsValid() || !out.IsValid()) {
329 return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
330 RESULT_DETAIL("Invalid audio config"));
331 }
332 mAudioConverter = MakeUnique<AudioConverter>(in, out);
333 }
334 if (mAudioConverter) {
335 MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
336 data = mAudioConverter->Process(Move(data));
337 }
338
339 RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
340 aSample->mTime,
341 duration.ToMicroseconds(),
342 numFrames,
343 data.Forget(),
344 channels,
345 rate);
346 mCallback->Output(audio);
347 return NS_OK;
348 }
349
350 MediaResult
GetInputAudioDescription(AudioStreamBasicDescription & aDesc,const nsTArray<uint8_t> & aExtraData)351 AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
352 const nsTArray<uint8_t>& aExtraData)
353 {
354 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
355
356 // Request the properties from CoreAudio using the codec magic cookie
357 AudioFormatInfo formatInfo;
358 PodZero(&formatInfo.mASBD);
359 formatInfo.mASBD.mFormatID = mFormatID;
360 if (mFormatID == kAudioFormatMPEG4AAC) {
361 formatInfo.mASBD.mFormatFlags = mConfig.mExtendedProfile;
362 }
363 formatInfo.mMagicCookieSize = aExtraData.Length();
364 formatInfo.mMagicCookie = aExtraData.Elements();
365
366 UInt32 formatListSize;
367 // Attempt to retrieve the default format using
368 // kAudioFormatProperty_FormatInfo method.
369 // This method only retrieves the FramesPerPacket information required
370 // by the decoder, which depends on the codec type and profile.
371 aDesc.mFormatID = mFormatID;
372 aDesc.mChannelsPerFrame = mConfig.mChannels;
373 aDesc.mSampleRate = mConfig.mRate;
374 UInt32 inputFormatSize = sizeof(aDesc);
375 OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
376 0,
377 NULL,
378 &inputFormatSize,
379 &aDesc);
380 if (NS_WARN_IF(rv)) {
381 return MediaResult(
382 NS_ERROR_FAILURE,
383 RESULT_DETAIL("Unable to get format info:%lld", int64_t(rv)));
384 }
385
386 // If any of the methods below fail, we will return the default format as
387 // created using kAudioFormatProperty_FormatInfo above.
388 rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList,
389 sizeof(formatInfo),
390 &formatInfo,
391 &formatListSize);
392 if (rv || (formatListSize % sizeof(AudioFormatListItem))) {
393 return NS_OK;
394 }
395 size_t listCount = formatListSize / sizeof(AudioFormatListItem);
396 auto formatList = MakeUnique<AudioFormatListItem[]>(listCount);
397
398 rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList,
399 sizeof(formatInfo),
400 &formatInfo,
401 &formatListSize,
402 formatList.get());
403 if (rv) {
404 return NS_OK;
405 }
406 LOG("found %u available audio stream(s)",
407 formatListSize / sizeof(AudioFormatListItem));
408 // Get the index number of the first playable format.
409 // This index number will be for the highest quality layer the platform
410 // is capable of playing.
411 UInt32 itemIndex;
412 UInt32 indexSize = sizeof(itemIndex);
413 rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList,
414 formatListSize,
415 formatList.get(),
416 &indexSize,
417 &itemIndex);
418 if (rv) {
419 return NS_OK;
420 }
421
422 aDesc = formatList[itemIndex].mASBD;
423
424 return NS_OK;
425 }
426
427 AudioConfig::Channel
ConvertChannelLabel(AudioChannelLabel id)428 ConvertChannelLabel(AudioChannelLabel id)
429 {
430 switch (id) {
431 case kAudioChannelLabel_Mono:
432 return AudioConfig::CHANNEL_MONO;
433 case kAudioChannelLabel_Left:
434 return AudioConfig::CHANNEL_LEFT;
435 case kAudioChannelLabel_Right:
436 return AudioConfig::CHANNEL_RIGHT;
437 case kAudioChannelLabel_Center:
438 return AudioConfig::CHANNEL_CENTER;
439 case kAudioChannelLabel_LFEScreen:
440 return AudioConfig::CHANNEL_LFE;
441 case kAudioChannelLabel_LeftSurround:
442 return AudioConfig::CHANNEL_LS;
443 case kAudioChannelLabel_RightSurround:
444 return AudioConfig::CHANNEL_RS;
445 case kAudioChannelLabel_CenterSurround:
446 return AudioConfig::CHANNEL_RCENTER;
447 case kAudioChannelLabel_RearSurroundLeft:
448 return AudioConfig::CHANNEL_RLS;
449 case kAudioChannelLabel_RearSurroundRight:
450 return AudioConfig::CHANNEL_RRS;
451 default:
452 return AudioConfig::CHANNEL_INVALID;
453 }
454 }
455
456 // Will set mChannelLayout if a channel layout could properly be identified
457 // and is supported.
458 nsresult
SetupChannelLayout()459 AppleATDecoder::SetupChannelLayout()
460 {
461 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
462
463 // Determine the channel layout.
464 UInt32 propertySize;
465 UInt32 size;
466 OSStatus status =
467 AudioConverterGetPropertyInfo(mConverter,
468 kAudioConverterOutputChannelLayout,
469 &propertySize, NULL);
470 if (status || !propertySize) {
471 LOG("Couldn't get channel layout property (%s)", FourCC2Str(status));
472 return NS_ERROR_FAILURE;
473 }
474
475 auto data = MakeUnique<uint8_t[]>(propertySize);
476 size = propertySize;
477 status =
478 AudioConverterGetProperty(mConverter, kAudioConverterInputChannelLayout,
479 &size, data.get());
480 if (status || size != propertySize) {
481 LOG("Couldn't get channel layout property (%s)",
482 FourCC2Str(status));
483 return NS_ERROR_FAILURE;
484 }
485
486 AudioChannelLayout* layout =
487 reinterpret_cast<AudioChannelLayout*>(data.get());
488 AudioChannelLayoutTag tag = layout->mChannelLayoutTag;
489
490 // if tag is kAudioChannelLayoutTag_UseChannelDescriptions then the structure
491 // directly contains the the channel layout mapping.
492 // If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will
493 // be defined via the bitmap and can be retrieved using
494 // kAudioFormatProperty_ChannelLayoutForBitmap property.
495 // Otherwise the tag itself describes the layout.
496 if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
497 AudioFormatPropertyID property =
498 tag == kAudioChannelLayoutTag_UseChannelBitmap
499 ? kAudioFormatProperty_ChannelLayoutForBitmap
500 : kAudioFormatProperty_ChannelLayoutForTag;
501
502 if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
503 status =
504 AudioFormatGetPropertyInfo(property,
505 sizeof(UInt32), &layout->mChannelBitmap,
506 &propertySize);
507 } else {
508 status =
509 AudioFormatGetPropertyInfo(property,
510 sizeof(AudioChannelLayoutTag), &tag,
511 &propertySize);
512 }
513 if (status || !propertySize) {
514 LOG("Couldn't get channel layout property info (%s:%s)",
515 FourCC2Str(property), FourCC2Str(status));
516 return NS_ERROR_FAILURE;
517 }
518 data = MakeUnique<uint8_t[]>(propertySize);
519 layout = reinterpret_cast<AudioChannelLayout*>(data.get());
520 size = propertySize;
521
522 if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
523 status = AudioFormatGetProperty(property,
524 sizeof(UInt32), &layout->mChannelBitmap,
525 &size, layout);
526 } else {
527 status = AudioFormatGetProperty(property,
528 sizeof(AudioChannelLayoutTag), &tag,
529 &size, layout);
530 }
531 if (status || size != propertySize) {
532 LOG("Couldn't get channel layout property (%s:%s)",
533 FourCC2Str(property), FourCC2Str(status));
534 return NS_ERROR_FAILURE;
535 }
536 // We have retrieved the channel layout from the tag or bitmap.
537 // We can now directly use the channel descriptions.
538 layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
539 }
540
541 if (layout->mNumberChannelDescriptions > MAX_AUDIO_CHANNELS ||
542 layout->mNumberChannelDescriptions != mOutputFormat.mChannelsPerFrame) {
543 LOG("Nonsensical channel layout or not matching the original channel number");
544 return NS_ERROR_FAILURE;
545 }
546
547 AudioConfig::Channel channels[MAX_AUDIO_CHANNELS];
548 for (uint32_t i = 0; i < layout->mNumberChannelDescriptions; i++) {
549 AudioChannelLabel id = layout->mChannelDescriptions[i].mChannelLabel;
550 AudioConfig::Channel channel = ConvertChannelLabel(id);
551 channels[i] = channel;
552 }
553 mChannelLayout =
554 MakeUnique<AudioConfig::ChannelLayout>(mOutputFormat.mChannelsPerFrame,
555 channels);
556 return NS_OK;
557 }
558
559 MediaResult
SetupDecoder(MediaRawData * aSample)560 AppleATDecoder::SetupDecoder(MediaRawData* aSample)
561 {
562 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
563 static const uint32_t MAX_FRAMES = 2;
564
565 if (mFormatID == kAudioFormatMPEG4AAC &&
566 mConfig.mExtendedProfile == 2 &&
567 mParsedFramesForAACMagicCookie < MAX_FRAMES) {
568 // Check for implicit SBR signalling if stream is AAC-LC
569 // This will provide us with an updated magic cookie for use with
570 // GetInputAudioDescription.
571 if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
572 !mMagicCookie.Length()) {
573 // nothing found yet, will try again later
574 mParsedFramesForAACMagicCookie++;
575 return NS_ERROR_NOT_INITIALIZED;
576 }
577 // An error occurred, fallback to using default stream description
578 }
579
580 LOG("Initializing Apple AudioToolbox decoder");
581
582 AudioStreamBasicDescription inputFormat;
583 PodZero(&inputFormat);
584 MediaResult rv =
585 GetInputAudioDescription(inputFormat,
586 mMagicCookie.Length() ?
587 mMagicCookie : *mConfig.mExtraData);
588 if (NS_FAILED(rv)) {
589 return rv;
590 }
591 // Fill in the output format manually.
592 PodZero(&mOutputFormat);
593 mOutputFormat.mFormatID = kAudioFormatLinearPCM;
594 mOutputFormat.mSampleRate = inputFormat.mSampleRate;
595 mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
596 #if defined(MOZ_SAMPLE_TYPE_FLOAT32)
597 mOutputFormat.mBitsPerChannel = 32;
598 mOutputFormat.mFormatFlags =
599 kLinearPCMFormatFlagIsFloat |
600 0;
601 #elif defined(MOZ_SAMPLE_TYPE_S16)
602 mOutputFormat.mBitsPerChannel = 16;
603 mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 0;
604 #else
605 # error Unknown audio sample type
606 #endif
607 // Set up the decoder so it gives us one sample per frame
608 mOutputFormat.mFramesPerPacket = 1;
609 mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
610 = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
611
612 OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
613 if (status) {
614 LOG("Error %d constructing AudioConverter", status);
615 mConverter = nullptr;
616 return MediaResult(
617 NS_ERROR_FAILURE,
618 RESULT_DETAIL("Error constructing AudioConverter:%lld", int64_t(status)));
619 }
620
621 if (NS_FAILED(SetupChannelLayout())) {
622 NS_WARNING("Couldn't retrieve channel layout, will use default layout");
623 }
624
625 return NS_OK;
626 }
627
628 static void
_MetadataCallback(void * aAppleATDecoder,AudioFileStreamID aStream,AudioFileStreamPropertyID aProperty,UInt32 * aFlags)629 _MetadataCallback(void* aAppleATDecoder,
630 AudioFileStreamID aStream,
631 AudioFileStreamPropertyID aProperty,
632 UInt32* aFlags)
633 {
634 AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
635 LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
636 if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
637 UInt32 size;
638 Boolean writeable;
639 OSStatus rv = AudioFileStreamGetPropertyInfo(aStream,
640 aProperty,
641 &size,
642 &writeable);
643 if (rv) {
644 LOG("Couldn't get property info for '%s' (%s)",
645 FourCC2Str(aProperty), FourCC2Str(rv));
646 decoder->mFileStreamError = true;
647 return;
648 }
649 auto data = MakeUnique<uint8_t[]>(size);
650 rv = AudioFileStreamGetProperty(aStream, aProperty,
651 &size, data.get());
652 if (rv) {
653 LOG("Couldn't get property '%s' (%s)",
654 FourCC2Str(aProperty), FourCC2Str(rv));
655 decoder->mFileStreamError = true;
656 return;
657 }
658 decoder->mMagicCookie.AppendElements(data.get(), size);
659 }
660 }
661
662 static void
_SampleCallback(void * aSBR,UInt32 aNumBytes,UInt32 aNumPackets,const void * aData,AudioStreamPacketDescription * aPackets)663 _SampleCallback(void* aSBR,
664 UInt32 aNumBytes,
665 UInt32 aNumPackets,
666 const void* aData,
667 AudioStreamPacketDescription* aPackets)
668 {
669 }
670
671 nsresult
GetImplicitAACMagicCookie(const MediaRawData * aSample)672 AppleATDecoder::GetImplicitAACMagicCookie(const MediaRawData* aSample)
673 {
674 MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
675
676 // Prepend ADTS header to AAC audio.
677 RefPtr<MediaRawData> adtssample(aSample->Clone());
678 if (!adtssample) {
679 return NS_ERROR_OUT_OF_MEMORY;
680 }
681 int8_t frequency_index =
682 mp4_demuxer::Adts::GetFrequencyIndex(mConfig.mRate);
683
684 bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.mChannels,
685 frequency_index,
686 mConfig.mProfile,
687 adtssample);
688 if (!rv) {
689 NS_WARNING("Failed to apply ADTS header");
690 return NS_ERROR_FAILURE;
691 }
692 if (!mStream) {
693 OSStatus rv = AudioFileStreamOpen(this,
694 _MetadataCallback,
695 _SampleCallback,
696 kAudioFileAAC_ADTSType,
697 &mStream);
698 if (rv) {
699 NS_WARNING("Couldn't open AudioFileStream");
700 return NS_ERROR_FAILURE;
701 }
702 }
703
704 OSStatus status = AudioFileStreamParseBytes(mStream,
705 adtssample->Size(),
706 adtssample->Data(),
707 0 /* discontinuity */);
708 if (status) {
709 NS_WARNING("Couldn't parse sample");
710 }
711
712 if (status || mFileStreamError || mMagicCookie.Length()) {
713 // We have decoded a magic cookie or an error occurred as such
714 // we won't need the stream any longer.
715 AudioFileStreamClose(mStream);
716 mStream = nullptr;
717 }
718
719 return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
720 }
721
722 } // namespace mozilla
723