1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include <CoreFoundation/CFString.h>
8 
9 #include "AppleVTDecoder.h"
10 #include "AppleCMLinker.h"
11 #include "AppleDecoderModule.h"
12 #include "AppleUtils.h"
13 #include "AppleVTLinker.h"
14 #include "MediaData.h"
15 #include "mozilla/ArrayUtils.h"
16 #include "H264.h"
17 #include "nsAutoPtr.h"
18 #include "nsThreadUtils.h"
19 #include "mozilla/Logging.h"
20 #include "VideoUtils.h"
21 #include "gfxPlatform.h"
22 
23 #define LOG(...) DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
24 #define LOGEX(_this, ...) \
25   DDMOZ_LOGEX(_this, sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
26 
27 namespace mozilla {
28 
AppleVTDecoder(const VideoInfo & aConfig,TaskQueue * aTaskQueue,layers::ImageContainer * aImageContainer)29 AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig, TaskQueue* aTaskQueue,
30                                layers::ImageContainer* aImageContainer)
31     : mExtraData(aConfig.mExtraData),
32       mPictureWidth(aConfig.mImage.width),
33       mPictureHeight(aConfig.mImage.height),
34       mDisplayWidth(aConfig.mDisplay.width),
35       mDisplayHeight(aConfig.mDisplay.height),
36       mTaskQueue(aTaskQueue),
37       mMaxRefFrames(H264::ComputeMaxRefFrames(aConfig.mExtraData)),
38       mImageContainer(aImageContainer)
39 #ifdef MOZ_WIDGET_UIKIT
40       ,
41       mUseSoftwareImages(true)
42 #else
43       ,
44       mUseSoftwareImages(false)
45 #endif
46       ,
47       mIsFlushing(false),
48       mMonitor("AppleVTDecoder"),
49       mFormat(nullptr),
50       mSession(nullptr),
51       mIsHardwareAccelerated(false) {
52   MOZ_COUNT_CTOR(AppleVTDecoder);
53   // TODO: Verify aConfig.mime_type.
54   LOG("Creating AppleVTDecoder for %dx%d h.264 video", mDisplayWidth,
55       mDisplayHeight);
56 
57   // To ensure our PromiseHolder is only ever accessed with the monitor held.
58   mPromise.SetMonitor(&mMonitor);
59 }
60 
~AppleVTDecoder()61 AppleVTDecoder::~AppleVTDecoder() { MOZ_COUNT_DTOR(AppleVTDecoder); }
62 
Init()63 RefPtr<MediaDataDecoder::InitPromise> AppleVTDecoder::Init() {
64   MediaResult rv = InitializeSession();
65 
66   if (NS_SUCCEEDED(rv)) {
67     return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
68   }
69 
70   return InitPromise::CreateAndReject(rv, __func__);
71 }
72 
Decode(MediaRawData * aSample)73 RefPtr<MediaDataDecoder::DecodePromise> AppleVTDecoder::Decode(
74     MediaRawData* aSample) {
75   LOG("mp4 input sample %p pts %lld duration %lld us%s %zu bytes", aSample,
76       aSample->mTime.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
77       aSample->mKeyframe ? " keyframe" : "", aSample->Size());
78 
79   RefPtr<AppleVTDecoder> self = this;
80   RefPtr<MediaRawData> sample = aSample;
81   return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
82     RefPtr<DecodePromise> p;
83     {
84       MonitorAutoLock mon(mMonitor);
85       p = mPromise.Ensure(__func__);
86     }
87     ProcessDecode(sample);
88     return p;
89   });
90 }
91 
Flush()92 RefPtr<MediaDataDecoder::FlushPromise> AppleVTDecoder::Flush() {
93   mIsFlushing = true;
94   return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessFlush);
95 }
96 
Drain()97 RefPtr<MediaDataDecoder::DecodePromise> AppleVTDecoder::Drain() {
98   return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessDrain);
99 }
100 
Shutdown()101 RefPtr<ShutdownPromise> AppleVTDecoder::Shutdown() {
102   if (mTaskQueue) {
103     RefPtr<AppleVTDecoder> self = this;
104     return InvokeAsync(mTaskQueue, __func__, [self]() {
105       self->ProcessShutdown();
106       return ShutdownPromise::CreateAndResolve(true, __func__);
107     });
108   }
109   ProcessShutdown();
110   return ShutdownPromise::CreateAndResolve(true, __func__);
111 }
112 
113 // Helper to fill in a timestamp structure.
TimingInfoFromSample(MediaRawData * aSample)114 static CMSampleTimingInfo TimingInfoFromSample(MediaRawData* aSample) {
115   CMSampleTimingInfo timestamp;
116 
117   timestamp.duration =
118       CMTimeMake(aSample->mDuration.ToMicroseconds(), USECS_PER_S);
119   timestamp.presentationTimeStamp =
120       CMTimeMake(aSample->mTime.ToMicroseconds(), USECS_PER_S);
121   timestamp.decodeTimeStamp =
122       CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S);
123 
124   return timestamp;
125 }
126 
ProcessDecode(MediaRawData * aSample)127 void AppleVTDecoder::ProcessDecode(MediaRawData* aSample) {
128   AssertOnTaskQueueThread();
129 
130   if (mIsFlushing) {
131     MonitorAutoLock mon(mMonitor);
132     mPromise.Reject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
133     return;
134   }
135 
136   AutoCFRelease<CMBlockBufferRef> block = nullptr;
137   AutoCFRelease<CMSampleBufferRef> sample = nullptr;
138   VTDecodeInfoFlags infoFlags;
139   OSStatus rv;
140 
141   // FIXME: This copies the sample data. I think we can provide
142   // a custom block source which reuses the aSample buffer.
143   // But note that there may be a problem keeping the samples
144   // alive over multiple frames.
145   rv = CMBlockBufferCreateWithMemoryBlock(
146       kCFAllocatorDefault,  // Struct allocator.
147       const_cast<uint8_t*>(aSample->Data()), aSample->Size(),
148       kCFAllocatorNull,  // Block allocator.
149       NULL,              // Block source.
150       0,                 // Data offset.
151       aSample->Size(), false, block.receive());
152   if (rv != noErr) {
153     NS_ERROR("Couldn't create CMBlockBuffer");
154     MonitorAutoLock mon(mMonitor);
155     mPromise.Reject(
156         MediaResult(NS_ERROR_OUT_OF_MEMORY,
157                     RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)),
158         __func__);
159     return;
160   }
161 
162   CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
163   rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1,
164                             1, &timestamp, 0, NULL, sample.receive());
165   if (rv != noErr) {
166     NS_ERROR("Couldn't create CMSampleBuffer");
167     MonitorAutoLock mon(mMonitor);
168     mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY,
169                                 RESULT_DETAIL("CMSampleBufferCreate:%x", rv)),
170                     __func__);
171     return;
172   }
173 
174   VTDecodeFrameFlags decodeFlags =
175       kVTDecodeFrame_EnableAsynchronousDecompression;
176   rv = VTDecompressionSessionDecodeFrame(
177       mSession, sample, decodeFlags, CreateAppleFrameRef(aSample), &infoFlags);
178   if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
179     LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
180     NS_WARNING("Couldn't pass frame to decoder");
181     // It appears that even when VTDecompressionSessionDecodeFrame returned a
182     // failure. Decoding sometimes actually get processed.
183     MonitorAutoLock mon(mMonitor);
184     mPromise.RejectIfExists(
185         MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
186                     RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)),
187         __func__);
188     return;
189   }
190 }
191 
ProcessShutdown()192 void AppleVTDecoder::ProcessShutdown() {
193   if (mSession) {
194     LOG("%s: cleaning up session %p", __func__, mSession);
195     VTDecompressionSessionInvalidate(mSession);
196     CFRelease(mSession);
197     mSession = nullptr;
198   }
199   if (mFormat) {
200     LOG("%s: releasing format %p", __func__, mFormat);
201     CFRelease(mFormat);
202     mFormat = nullptr;
203   }
204 }
205 
ProcessFlush()206 RefPtr<MediaDataDecoder::FlushPromise> AppleVTDecoder::ProcessFlush() {
207   AssertOnTaskQueueThread();
208   nsresult rv = WaitForAsynchronousFrames();
209   if (NS_FAILED(rv)) {
210     LOG("AppleVTDecoder::Flush failed waiting for platform decoder");
211   }
212   MonitorAutoLock mon(mMonitor);
213   mPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
214 
215   while (!mReorderQueue.IsEmpty()) {
216     mReorderQueue.Pop();
217   }
218   mSeekTargetThreshold.reset();
219   mIsFlushing = false;
220   return FlushPromise::CreateAndResolve(true, __func__);
221 }
222 
ProcessDrain()223 RefPtr<MediaDataDecoder::DecodePromise> AppleVTDecoder::ProcessDrain() {
224   AssertOnTaskQueueThread();
225   nsresult rv = WaitForAsynchronousFrames();
226   if (NS_FAILED(rv)) {
227     LOG("AppleVTDecoder::Drain failed waiting for platform decoder");
228   }
229   MonitorAutoLock mon(mMonitor);
230   DecodedData samples;
231   while (!mReorderQueue.IsEmpty()) {
232     samples.AppendElement(Move(mReorderQueue.Pop()));
233   }
234   return DecodePromise::CreateAndResolve(Move(samples), __func__);
235 }
236 
CreateAppleFrameRef(const MediaRawData * aSample)237 AppleVTDecoder::AppleFrameRef* AppleVTDecoder::CreateAppleFrameRef(
238     const MediaRawData* aSample) {
239   MOZ_ASSERT(aSample);
240   return new AppleFrameRef(*aSample);
241 }
242 
SetSeekThreshold(const media::TimeUnit & aTime)243 void AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime) {
244   LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
245   mSeekTargetThreshold = Some(aTime);
246 }
247 
248 //
249 // Implementation details.
250 //
251 
252 // Callback passed to the VideoToolbox decoder for returning data.
253 // This needs to be static because the API takes a C-style pair of
254 // function and userdata pointers. This validates parameters and
255 // forwards the decoded image back to an object method.
PlatformCallback(void * decompressionOutputRefCon,void * sourceFrameRefCon,OSStatus status,VTDecodeInfoFlags flags,CVImageBufferRef image,CMTime presentationTimeStamp,CMTime presentationDuration)256 static void PlatformCallback(void* decompressionOutputRefCon,
257                              void* sourceFrameRefCon, OSStatus status,
258                              VTDecodeInfoFlags flags, CVImageBufferRef image,
259                              CMTime presentationTimeStamp,
260                              CMTime presentationDuration) {
261   AppleVTDecoder* decoder =
262       static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
263   LOGEX(decoder, "AppleVideoDecoder %s status %d flags %d", __func__,
264         static_cast<int>(status), flags);
265 
266   nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
267       static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
268 
269   // Validate our arguments.
270   if (status != noErr || !image) {
271     NS_WARNING("VideoToolbox decoder returned no data");
272     image = nullptr;
273   } else if (flags & kVTDecodeInfo_FrameDropped) {
274     NS_WARNING("  ...frame tagged as dropped...");
275   } else {
276     MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
277                "VideoToolbox returned an unexpected image type");
278   }
279 
280   decoder->OutputFrame(image, *frameRef);
281 }
282 
283 // Copy and return a decoded frame.
OutputFrame(CVPixelBufferRef aImage,AppleVTDecoder::AppleFrameRef aFrameRef)284 void AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
285                                  AppleVTDecoder::AppleFrameRef aFrameRef) {
286   if (mIsFlushing) {
287     // We are in the process of flushing or shutting down; ignore frame.
288     return;
289   }
290 
291   LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
292       aFrameRef.byte_offset, aFrameRef.decode_timestamp.ToMicroseconds(),
293       aFrameRef.composition_timestamp.ToMicroseconds(),
294       aFrameRef.duration.ToMicroseconds(),
295       aFrameRef.is_sync_point ? " keyframe" : "");
296 
297   if (!aImage) {
298     // Image was dropped by decoder or none return yet.
299     // We need more input to continue.
300     MonitorAutoLock mon(mMonitor);
301     mPromise.Resolve(DecodedData(), __func__);
302     return;
303   }
304 
305   bool useNullSample = false;
306   if (mSeekTargetThreshold.isSome()) {
307     if ((aFrameRef.composition_timestamp + aFrameRef.duration) <
308         mSeekTargetThreshold.ref()) {
309       useNullSample = true;
310     } else {
311       mSeekTargetThreshold.reset();
312     }
313   }
314 
315   // Where our resulting image will end up.
316   RefPtr<MediaData> data;
317   // Bounds.
318   VideoInfo info;
319   info.mDisplay = gfx::IntSize(mDisplayWidth, mDisplayHeight);
320 
321   if (useNullSample) {
322     data = new NullData(aFrameRef.byte_offset, aFrameRef.composition_timestamp,
323                         aFrameRef.duration);
324   } else if (mUseSoftwareImages) {
325     size_t width = CVPixelBufferGetWidth(aImage);
326     size_t height = CVPixelBufferGetHeight(aImage);
327     DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
328     MOZ_ASSERT(planes == 3, "Likely not YUV420 format and it must be.");
329 
330     VideoData::YCbCrBuffer buffer;
331 
332     // Lock the returned image data.
333     CVReturn rv =
334         CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
335     if (rv != kCVReturnSuccess) {
336       NS_ERROR("error locking pixel data");
337       MonitorAutoLock mon(mMonitor);
338       mPromise.Reject(
339           MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
340                       RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)),
341           __func__);
342       return;
343     }
344     // Y plane.
345     buffer.mPlanes[0].mData =
346         static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
347     buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
348     buffer.mPlanes[0].mWidth = width;
349     buffer.mPlanes[0].mHeight = height;
350     buffer.mPlanes[0].mOffset = 0;
351     buffer.mPlanes[0].mSkip = 0;
352     // Cb plane.
353     buffer.mPlanes[1].mData =
354         static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
355     buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
356     buffer.mPlanes[1].mWidth = (width + 1) / 2;
357     buffer.mPlanes[1].mHeight = (height + 1) / 2;
358     buffer.mPlanes[1].mOffset = 0;
359     buffer.mPlanes[1].mSkip = 0;
360     // Cr plane.
361     buffer.mPlanes[2].mData =
362         static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 2));
363     buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 2);
364     buffer.mPlanes[2].mWidth = (width + 1) / 2;
365     buffer.mPlanes[2].mHeight = (height + 1) / 2;
366     buffer.mPlanes[2].mOffset = 0;
367     buffer.mPlanes[2].mSkip = 0;
368 
369     gfx::IntRect visible = gfx::IntRect(0, 0, mPictureWidth, mPictureHeight);
370 
371     // Copy the image data into our own format.
372     data = VideoData::CreateAndCopyData(
373         info, mImageContainer, aFrameRef.byte_offset,
374         aFrameRef.composition_timestamp, aFrameRef.duration, buffer,
375         aFrameRef.is_sync_point, aFrameRef.decode_timestamp, visible);
376     // Unlock the returned image data.
377     CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
378   } else {
379 #ifndef MOZ_WIDGET_UIKIT
380     IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
381     MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
382 
383     RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
384 
385     RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
386 
387     data = VideoData::CreateFromImage(
388         info.mDisplay, aFrameRef.byte_offset, aFrameRef.composition_timestamp,
389         aFrameRef.duration, image.forget(), aFrameRef.is_sync_point,
390         aFrameRef.decode_timestamp);
391 #else
392     MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
393 #endif
394   }
395 
396   if (!data) {
397     NS_ERROR("Couldn't create VideoData for frame");
398     MonitorAutoLock mon(mMonitor);
399     mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
400     return;
401   }
402 
403   // Frames come out in DTS order but we need to output them
404   // in composition order.
405   MonitorAutoLock mon(mMonitor);
406   mReorderQueue.Push(data);
407   DecodedData results;
408   while (mReorderQueue.Length() > mMaxRefFrames) {
409     results.AppendElement(mReorderQueue.Pop());
410   }
411   mPromise.Resolve(Move(results), __func__);
412 
413   LOG("%llu decoded frames queued",
414       static_cast<unsigned long long>(mReorderQueue.Length()));
415 }
416 
WaitForAsynchronousFrames()417 nsresult AppleVTDecoder::WaitForAsynchronousFrames() {
418   OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
419   if (rv != noErr) {
420     NS_ERROR("AppleVTDecoder: Error waiting for asynchronous frames");
421     return NS_ERROR_FAILURE;
422   }
423   return NS_OK;
424 }
425 
InitializeSession()426 MediaResult AppleVTDecoder::InitializeSession() {
427   OSStatus rv;
428 
429   AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
430 
431   rv = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
432                                       kCMVideoCodecType_H264, mPictureWidth,
433                                       mPictureHeight, extensions, &mFormat);
434   if (rv != noErr) {
435     return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
436                        RESULT_DETAIL("Couldn't create format description!"));
437   }
438 
439   // Contruct video decoder selection spec.
440   AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
441 
442   // Contruct output configuration.
443   AutoCFRelease<CFDictionaryRef> outputConfiguration =
444       CreateOutputConfiguration();
445 
446   VTDecompressionOutputCallbackRecord cb = {PlatformCallback, this};
447   rv =
448       VTDecompressionSessionCreate(kCFAllocatorDefault, mFormat,
449                                    spec,  // Video decoder selection.
450                                    outputConfiguration,  // Output video format.
451                                    &cb, &mSession);
452 
453   if (rv != noErr) {
454     return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
455                        RESULT_DETAIL("Couldn't create decompression session!"));
456   }
457 
458   if (AppleVTLinker::skPropUsingHWAccel) {
459     CFBooleanRef isUsingHW = nullptr;
460     rv = VTSessionCopyProperty(mSession, AppleVTLinker::skPropUsingHWAccel,
461                                kCFAllocatorDefault, &isUsingHW);
462     if (rv != noErr) {
463       LOG("AppleVTDecoder: system doesn't support hardware acceleration");
464     }
465     mIsHardwareAccelerated = rv == noErr && isUsingHW == kCFBooleanTrue;
466     LOG("AppleVTDecoder: %s hardware accelerated decoding",
467         mIsHardwareAccelerated ? "using" : "not using");
468   } else {
469     LOG("AppleVTDecoder: couldn't determine hardware acceleration status.");
470   }
471   return NS_OK;
472 }
473 
CreateDecoderExtensions()474 CFDictionaryRef AppleVTDecoder::CreateDecoderExtensions() {
475   AutoCFRelease<CFDataRef> avc_data = CFDataCreate(
476       kCFAllocatorDefault, mExtraData->Elements(), mExtraData->Length());
477 
478   const void* atomsKey[] = {CFSTR("avcC")};
479   const void* atomsValue[] = {avc_data};
480   static_assert(ArrayLength(atomsKey) == ArrayLength(atomsValue),
481                 "Non matching keys/values array size");
482 
483   AutoCFRelease<CFDictionaryRef> atoms = CFDictionaryCreate(
484       kCFAllocatorDefault, atomsKey, atomsValue, ArrayLength(atomsKey),
485       &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
486 
487   const void* extensionKeys[] = {kCVImageBufferChromaLocationBottomFieldKey,
488                                  kCVImageBufferChromaLocationTopFieldKey,
489                                  AppleCMLinker::skPropExtensionAtoms};
490 
491   const void* extensionValues[] = {kCVImageBufferChromaLocation_Left,
492                                    kCVImageBufferChromaLocation_Left, atoms};
493   static_assert(ArrayLength(extensionKeys) == ArrayLength(extensionValues),
494                 "Non matching keys/values array size");
495 
496   return CFDictionaryCreate(kCFAllocatorDefault, extensionKeys, extensionValues,
497                             ArrayLength(extensionKeys),
498                             &kCFTypeDictionaryKeyCallBacks,
499                             &kCFTypeDictionaryValueCallBacks);
500 }
501 
CreateDecoderSpecification()502 CFDictionaryRef AppleVTDecoder::CreateDecoderSpecification() {
503   if (!AppleVTLinker::skPropEnableHWAccel) {
504     return nullptr;
505   }
506 
507   const void* specKeys[] = {AppleVTLinker::skPropEnableHWAccel};
508   const void* specValues[1];
509   if (AppleDecoderModule::sCanUseHardwareVideoDecoder) {
510     specValues[0] = kCFBooleanTrue;
511   } else {
512     // This GPU is blacklisted for hardware decoding.
513     specValues[0] = kCFBooleanFalse;
514   }
515   static_assert(ArrayLength(specKeys) == ArrayLength(specValues),
516                 "Non matching keys/values array size");
517 
518   return CFDictionaryCreate(
519       kCFAllocatorDefault, specKeys, specValues, ArrayLength(specKeys),
520       &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
521 }
522 
CreateOutputConfiguration()523 CFDictionaryRef AppleVTDecoder::CreateOutputConfiguration() {
524   if (mUseSoftwareImages) {
525     // Output format type:
526     SInt32 PixelFormatTypeValue = kCVPixelFormatType_420YpCbCr8Planar;
527     AutoCFRelease<CFNumberRef> PixelFormatTypeNumber = CFNumberCreate(
528         kCFAllocatorDefault, kCFNumberSInt32Type, &PixelFormatTypeValue);
529     const void* outputKeys[] = {kCVPixelBufferPixelFormatTypeKey};
530     const void* outputValues[] = {PixelFormatTypeNumber};
531     static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
532                   "Non matching keys/values array size");
533 
534     return CFDictionaryCreate(
535         kCFAllocatorDefault, outputKeys, outputValues, ArrayLength(outputKeys),
536         &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
537   }
538 
539 #ifndef MOZ_WIDGET_UIKIT
540   // Output format type:
541   SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
542   AutoCFRelease<CFNumberRef> PixelFormatTypeNumber = CFNumberCreate(
543       kCFAllocatorDefault, kCFNumberSInt32Type, &PixelFormatTypeValue);
544   // Construct IOSurface Properties
545   const void* IOSurfaceKeys[] = {MacIOSurfaceLib::kPropIsGlobal};
546   const void* IOSurfaceValues[] = {kCFBooleanTrue};
547   static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
548                 "Non matching keys/values array size");
549 
550   // Contruct output configuration.
551   AutoCFRelease<CFDictionaryRef> IOSurfaceProperties = CFDictionaryCreate(
552       kCFAllocatorDefault, IOSurfaceKeys, IOSurfaceValues,
553       ArrayLength(IOSurfaceKeys), &kCFTypeDictionaryKeyCallBacks,
554       &kCFTypeDictionaryValueCallBacks);
555 
556   const void* outputKeys[] = {kCVPixelBufferIOSurfacePropertiesKey,
557                               kCVPixelBufferPixelFormatTypeKey,
558                               kCVPixelBufferOpenGLCompatibilityKey};
559   const void* outputValues[] = {IOSurfaceProperties, PixelFormatTypeNumber,
560                                 kCFBooleanTrue};
561   static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
562                 "Non matching keys/values array size");
563 
564   return CFDictionaryCreate(
565       kCFAllocatorDefault, outputKeys, outputValues, ArrayLength(outputKeys),
566       &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
567 #else
568   MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
569 #endif
570 }
571 
572 }  // namespace mozilla
573 
574 #undef LOG
575 #undef LOGEX
576