1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include <CoreFoundation/CFString.h>
8
9 #include "AppleCMLinker.h"
10 #include "AppleDecoderModule.h"
11 #include "AppleUtils.h"
12 #include "AppleVTDecoder.h"
13 #include "AppleVTLinker.h"
14 #include "MediaData.h"
15 #include "mozilla/ArrayUtils.h"
16 #include "mp4_demuxer/H264.h"
17 #include "nsAutoPtr.h"
18 #include "nsThreadUtils.h"
19 #include "mozilla/Logging.h"
20 #include "VideoUtils.h"
21 #include "gfxPlatform.h"
22
23 #define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
24
25 namespace mozilla {
26
AppleVTDecoder(const VideoInfo & aConfig,TaskQueue * aTaskQueue,MediaDataDecoderCallback * aCallback,layers::ImageContainer * aImageContainer)27 AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
28 TaskQueue* aTaskQueue,
29 MediaDataDecoderCallback* aCallback,
30 layers::ImageContainer* aImageContainer)
31 : mExtraData(aConfig.mExtraData)
32 , mCallback(aCallback)
33 , mPictureWidth(aConfig.mImage.width)
34 , mPictureHeight(aConfig.mImage.height)
35 , mDisplayWidth(aConfig.mDisplay.width)
36 , mDisplayHeight(aConfig.mDisplay.height)
37 , mTaskQueue(aTaskQueue)
38 , mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
39 , mImageContainer(aImageContainer)
40 , mIsShutDown(false)
41 #ifdef MOZ_WIDGET_UIKIT
42 , mUseSoftwareImages(true)
43 #else
44 , mUseSoftwareImages(false)
45 #endif
46 , mIsFlushing(false)
47 , mMonitor("AppleVideoDecoder")
48 , mFormat(nullptr)
49 , mSession(nullptr)
50 , mIsHardwareAccelerated(false)
51 {
52 MOZ_COUNT_CTOR(AppleVTDecoder);
53 // TODO: Verify aConfig.mime_type.
54 LOG("Creating AppleVTDecoder for %dx%d h.264 video",
55 mDisplayWidth,
56 mDisplayHeight
57 );
58 }
59
~AppleVTDecoder()60 AppleVTDecoder::~AppleVTDecoder()
61 {
62 MOZ_COUNT_DTOR(AppleVTDecoder);
63 }
64
65 RefPtr<MediaDataDecoder::InitPromise>
Init()66 AppleVTDecoder::Init()
67 {
68 nsresult rv = InitializeSession();
69
70 if (NS_SUCCEEDED(rv)) {
71 return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
72 }
73
74 return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
75 }
76
77 void
Input(MediaRawData * aSample)78 AppleVTDecoder::Input(MediaRawData* aSample)
79 {
80 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
81
82 LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
83 aSample,
84 aSample->mTime,
85 aSample->mDuration,
86 aSample->mKeyframe ? " keyframe" : "",
87 aSample->Size());
88
89 mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
90 this, &AppleVTDecoder::ProcessDecode, aSample));
91 }
92
93 void
Flush()94 AppleVTDecoder::Flush()
95 {
96 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
97 mIsFlushing = true;
98 nsCOMPtr<nsIRunnable> runnable =
99 NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
100 SyncRunnable::DispatchToThread(mTaskQueue, runnable);
101 mIsFlushing = false;
102
103 mSeekTargetThreshold.reset();
104 }
105
106 void
Drain()107 AppleVTDecoder::Drain()
108 {
109 MOZ_ASSERT(mCallback->OnReaderTaskQueue());
110 nsCOMPtr<nsIRunnable> runnable =
111 NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
112 mTaskQueue->Dispatch(runnable.forget());
113 }
114
115 void
Shutdown()116 AppleVTDecoder::Shutdown()
117 {
118 MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
119 mIsShutDown = true;
120 if (mTaskQueue) {
121 nsCOMPtr<nsIRunnable> runnable =
122 NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
123 mTaskQueue->Dispatch(runnable.forget());
124 } else {
125 ProcessShutdown();
126 }
127 }
128
129 nsresult
ProcessDecode(MediaRawData * aSample)130 AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
131 {
132 AssertOnTaskQueueThread();
133
134 if (mIsFlushing) {
135 return NS_OK;
136 }
137
138 auto rv = DoDecode(aSample);
139
140 return rv;
141 }
142
143 void
ProcessShutdown()144 AppleVTDecoder::ProcessShutdown()
145 {
146 if (mSession) {
147 LOG("%s: cleaning up session %p", __func__, mSession);
148 VTDecompressionSessionInvalidate(mSession);
149 CFRelease(mSession);
150 mSession = nullptr;
151 }
152 if (mFormat) {
153 LOG("%s: releasing format %p", __func__, mFormat);
154 CFRelease(mFormat);
155 mFormat = nullptr;
156 }
157 }
158
159 void
ProcessFlush()160 AppleVTDecoder::ProcessFlush()
161 {
162 AssertOnTaskQueueThread();
163 nsresult rv = WaitForAsynchronousFrames();
164 if (NS_FAILED(rv)) {
165 LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
166 "with error:%d.", rv);
167 }
168 ClearReorderedFrames();
169 }
170
171 void
ProcessDrain()172 AppleVTDecoder::ProcessDrain()
173 {
174 AssertOnTaskQueueThread();
175 nsresult rv = WaitForAsynchronousFrames();
176 if (NS_FAILED(rv)) {
177 LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
178 "with error:%d.", rv);
179 }
180 DrainReorderedFrames();
181 mCallback->DrainComplete();
182 }
183
184 AppleVTDecoder::AppleFrameRef*
CreateAppleFrameRef(const MediaRawData * aSample)185 AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
186 {
187 MOZ_ASSERT(aSample);
188 return new AppleFrameRef(*aSample);
189 }
190
191 void
DrainReorderedFrames()192 AppleVTDecoder::DrainReorderedFrames()
193 {
194 MonitorAutoLock mon(mMonitor);
195 while (!mReorderQueue.IsEmpty()) {
196 mCallback->Output(mReorderQueue.Pop().get());
197 }
198 }
199
200 void
ClearReorderedFrames()201 AppleVTDecoder::ClearReorderedFrames()
202 {
203 MonitorAutoLock mon(mMonitor);
204 while (!mReorderQueue.IsEmpty()) {
205 mReorderQueue.Pop();
206 }
207 }
208
209 void
SetSeekThreshold(const media::TimeUnit & aTime)210 AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
211 {
212 LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
213 mSeekTargetThreshold = Some(aTime);
214 }
215
216 //
217 // Implementation details.
218 //
219
220 // Callback passed to the VideoToolbox decoder for returning data.
221 // This needs to be static because the API takes a C-style pair of
222 // function and userdata pointers. This validates parameters and
223 // forwards the decoded image back to an object method.
224 static void
PlatformCallback(void * decompressionOutputRefCon,void * sourceFrameRefCon,OSStatus status,VTDecodeInfoFlags flags,CVImageBufferRef image,CMTime presentationTimeStamp,CMTime presentationDuration)225 PlatformCallback(void* decompressionOutputRefCon,
226 void* sourceFrameRefCon,
227 OSStatus status,
228 VTDecodeInfoFlags flags,
229 CVImageBufferRef image,
230 CMTime presentationTimeStamp,
231 CMTime presentationDuration)
232 {
233 LOG("AppleVideoDecoder %s status %d flags %d", __func__, status, flags);
234
235 AppleVTDecoder* decoder =
236 static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
237 nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
238 static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
239
240 // Validate our arguments.
241 if (status != noErr || !image) {
242 NS_WARNING("VideoToolbox decoder returned no data");
243 image = nullptr;
244 } else if (flags & kVTDecodeInfo_FrameDropped) {
245 NS_WARNING(" ...frame tagged as dropped...");
246 } else {
247 MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
248 "VideoToolbox returned an unexpected image type");
249 }
250 decoder->OutputFrame(image, *frameRef);
251 }
252
253 // Copy and return a decoded frame.
254 nsresult
OutputFrame(CVPixelBufferRef aImage,AppleVTDecoder::AppleFrameRef aFrameRef)255 AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
256 AppleVTDecoder::AppleFrameRef aFrameRef)
257 {
258 if (mIsShutDown || mIsFlushing) {
259 // We are in the process of flushing or shutting down; ignore frame.
260 return NS_OK;
261 }
262
263 LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
264 aFrameRef.byte_offset,
265 aFrameRef.decode_timestamp.ToMicroseconds(),
266 aFrameRef.composition_timestamp.ToMicroseconds(),
267 aFrameRef.duration.ToMicroseconds(),
268 aFrameRef.is_sync_point ? " keyframe" : ""
269 );
270
271 if (!aImage) {
272 // Image was dropped by decoder or none return yet.
273 // We need more input to continue.
274 mCallback->InputExhausted();
275 return NS_OK;
276 }
277
278 bool useNullSample = false;
279 if (mSeekTargetThreshold.isSome()) {
280 if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
281 useNullSample = true;
282 } else {
283 mSeekTargetThreshold.reset();
284 }
285 }
286
287 // Where our resulting image will end up.
288 RefPtr<MediaData> data;
289 // Bounds.
290 VideoInfo info;
291 info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
292 gfx::IntRect visible = gfx::IntRect(0,
293 0,
294 mPictureWidth,
295 mPictureHeight);
296
297 if (useNullSample) {
298 data = new NullData(aFrameRef.byte_offset,
299 aFrameRef.composition_timestamp.ToMicroseconds(),
300 aFrameRef.duration.ToMicroseconds());
301 } else if (mUseSoftwareImages) {
302 size_t width = CVPixelBufferGetWidth(aImage);
303 size_t height = CVPixelBufferGetHeight(aImage);
304 DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
305 MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
306
307 VideoData::YCbCrBuffer buffer;
308
309 // Lock the returned image data.
310 CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
311 if (rv != kCVReturnSuccess) {
312 NS_ERROR("error locking pixel data");
313 mCallback->Error(
314 MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
315 RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
316 return NS_ERROR_DOM_MEDIA_DECODE_ERR;
317 }
318 // Y plane.
319 buffer.mPlanes[0].mData =
320 static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
321 buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
322 buffer.mPlanes[0].mWidth = width;
323 buffer.mPlanes[0].mHeight = height;
324 buffer.mPlanes[0].mOffset = 0;
325 buffer.mPlanes[0].mSkip = 0;
326 // Cb plane.
327 buffer.mPlanes[1].mData =
328 static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
329 buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
330 buffer.mPlanes[1].mWidth = (width+1) / 2;
331 buffer.mPlanes[1].mHeight = (height+1) / 2;
332 buffer.mPlanes[1].mOffset = 0;
333 buffer.mPlanes[1].mSkip = 1;
334 // Cr plane.
335 buffer.mPlanes[2].mData =
336 static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
337 buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
338 buffer.mPlanes[2].mWidth = (width+1) / 2;
339 buffer.mPlanes[2].mHeight = (height+1) / 2;
340 buffer.mPlanes[2].mOffset = 1;
341 buffer.mPlanes[2].mSkip = 1;
342
343 // Copy the image data into our own format.
344 data =
345 VideoData::CreateAndCopyData(info,
346 mImageContainer,
347 aFrameRef.byte_offset,
348 aFrameRef.composition_timestamp.ToMicroseconds(),
349 aFrameRef.duration.ToMicroseconds(),
350 buffer,
351 aFrameRef.is_sync_point,
352 aFrameRef.decode_timestamp.ToMicroseconds(),
353 visible);
354 // Unlock the returned image data.
355 CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
356 } else {
357 #ifndef MOZ_WIDGET_UIKIT
358 IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
359 MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
360
361 RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
362
363 RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
364
365 data =
366 VideoData::CreateFromImage(info,
367 aFrameRef.byte_offset,
368 aFrameRef.composition_timestamp.ToMicroseconds(),
369 aFrameRef.duration.ToMicroseconds(),
370 image.forget(),
371 aFrameRef.is_sync_point,
372 aFrameRef.decode_timestamp.ToMicroseconds(),
373 visible);
374 #else
375 MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
376 #endif
377 }
378
379 if (!data) {
380 NS_ERROR("Couldn't create VideoData for frame");
381 mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
382 return NS_ERROR_OUT_OF_MEMORY;
383 }
384
385 // Frames come out in DTS order but we need to output them
386 // in composition order.
387 MonitorAutoLock mon(mMonitor);
388 mReorderQueue.Push(data);
389 if (mReorderQueue.Length() > mMaxRefFrames) {
390 mCallback->Output(mReorderQueue.Pop().get());
391 }
392 mCallback->InputExhausted();
393 LOG("%llu decoded frames queued",
394 static_cast<unsigned long long>(mReorderQueue.Length()));
395
396 return NS_OK;
397 }
398
399 nsresult
WaitForAsynchronousFrames()400 AppleVTDecoder::WaitForAsynchronousFrames()
401 {
402 OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
403 if (rv != noErr) {
404 LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
405 return NS_ERROR_FAILURE;
406 }
407 return NS_OK;
408 }
409
410 // Helper to fill in a timestamp structure.
411 static CMSampleTimingInfo
TimingInfoFromSample(MediaRawData * aSample)412 TimingInfoFromSample(MediaRawData* aSample)
413 {
414 CMSampleTimingInfo timestamp;
415
416 timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
417 timestamp.presentationTimeStamp =
418 CMTimeMake(aSample->mTime, USECS_PER_S);
419 timestamp.decodeTimeStamp =
420 CMTimeMake(aSample->mTimecode, USECS_PER_S);
421
422 return timestamp;
423 }
424
425 MediaResult
DoDecode(MediaRawData * aSample)426 AppleVTDecoder::DoDecode(MediaRawData* aSample)
427 {
428 AssertOnTaskQueueThread();
429
430 // For some reason this gives me a double-free error with stagefright.
431 AutoCFRelease<CMBlockBufferRef> block = nullptr;
432 AutoCFRelease<CMSampleBufferRef> sample = nullptr;
433 VTDecodeInfoFlags infoFlags;
434 OSStatus rv;
435
436 // FIXME: This copies the sample data. I think we can provide
437 // a custom block source which reuses the aSample buffer.
438 // But note that there may be a problem keeping the samples
439 // alive over multiple frames.
440 rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
441 const_cast<uint8_t*>(aSample->Data()),
442 aSample->Size(),
443 kCFAllocatorNull, // Block allocator.
444 NULL, // Block source.
445 0, // Data offset.
446 aSample->Size(),
447 false,
448 block.receive());
449 if (rv != noErr) {
450 NS_ERROR("Couldn't create CMBlockBuffer");
451 mCallback->Error(
452 MediaResult(NS_ERROR_OUT_OF_MEMORY,
453 RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)));
454 return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
455 }
456 CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
457 rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive());
458 if (rv != noErr) {
459 NS_ERROR("Couldn't create CMSampleBuffer");
460 mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
461 RESULT_DETAIL("CMSampleBufferCreate:%x", rv)));
462 return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
463 }
464
465 VTDecodeFrameFlags decodeFlags =
466 kVTDecodeFrame_EnableAsynchronousDecompression;
467 rv = VTDecompressionSessionDecodeFrame(mSession,
468 sample,
469 decodeFlags,
470 CreateAppleFrameRef(aSample),
471 &infoFlags);
472 if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
473 LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
474 NS_WARNING("Couldn't pass frame to decoder");
475 mCallback->Error(
476 MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
477 RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)));
478 return NS_ERROR_DOM_MEDIA_DECODE_ERR;
479 }
480
481 return NS_OK;
482 }
483
484 nsresult
InitializeSession()485 AppleVTDecoder::InitializeSession()
486 {
487 OSStatus rv;
488
489 AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
490
491 rv = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
492 kCMVideoCodecType_H264,
493 mPictureWidth,
494 mPictureHeight,
495 extensions,
496 &mFormat);
497 if (rv != noErr) {
498 NS_ERROR("Couldn't create format description!");
499 return NS_ERROR_FAILURE;
500 }
501
502 // Contruct video decoder selection spec.
503 AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
504
505 // Contruct output configuration.
506 AutoCFRelease<CFDictionaryRef> outputConfiguration =
507 CreateOutputConfiguration();
508
509 VTDecompressionOutputCallbackRecord cb = { PlatformCallback, this };
510 rv = VTDecompressionSessionCreate(kCFAllocatorDefault,
511 mFormat,
512 spec, // Video decoder selection.
513 outputConfiguration, // Output video format.
514 &cb,
515 &mSession);
516
517 if (rv != noErr) {
518 NS_ERROR("Couldn't create decompression session!");
519 return NS_ERROR_FAILURE;
520 }
521
522 if (AppleVTLinker::skPropUsingHWAccel) {
523 CFBooleanRef isUsingHW = nullptr;
524 rv = VTSessionCopyProperty(mSession,
525 AppleVTLinker::skPropUsingHWAccel,
526 kCFAllocatorDefault,
527 &isUsingHW);
528 if (rv != noErr) {
529 LOG("AppleVTDecoder: system doesn't support hardware acceleration");
530 }
531 mIsHardwareAccelerated = rv == noErr && isUsingHW == kCFBooleanTrue;
532 LOG("AppleVTDecoder: %s hardware accelerated decoding",
533 mIsHardwareAccelerated ? "using" : "not using");
534 } else {
535 LOG("AppleVTDecoder: couldn't determine hardware acceleration status.");
536 }
537 return NS_OK;
538 }
539
540 CFDictionaryRef
CreateDecoderExtensions()541 AppleVTDecoder::CreateDecoderExtensions()
542 {
543 AutoCFRelease<CFDataRef> avc_data =
544 CFDataCreate(kCFAllocatorDefault,
545 mExtraData->Elements(),
546 mExtraData->Length());
547
548 const void* atomsKey[] = { CFSTR("avcC") };
549 const void* atomsValue[] = { avc_data };
550 static_assert(ArrayLength(atomsKey) == ArrayLength(atomsValue),
551 "Non matching keys/values array size");
552
553 AutoCFRelease<CFDictionaryRef> atoms =
554 CFDictionaryCreate(kCFAllocatorDefault,
555 atomsKey,
556 atomsValue,
557 ArrayLength(atomsKey),
558 &kCFTypeDictionaryKeyCallBacks,
559 &kCFTypeDictionaryValueCallBacks);
560
561 const void* extensionKeys[] =
562 { kCVImageBufferChromaLocationBottomFieldKey,
563 kCVImageBufferChromaLocationTopFieldKey,
564 AppleCMLinker::skPropExtensionAtoms };
565
566 const void* extensionValues[] =
567 { kCVImageBufferChromaLocation_Left,
568 kCVImageBufferChromaLocation_Left,
569 atoms };
570 static_assert(ArrayLength(extensionKeys) == ArrayLength(extensionValues),
571 "Non matching keys/values array size");
572
573 return CFDictionaryCreate(kCFAllocatorDefault,
574 extensionKeys,
575 extensionValues,
576 ArrayLength(extensionKeys),
577 &kCFTypeDictionaryKeyCallBacks,
578 &kCFTypeDictionaryValueCallBacks);
579 }
580
581 CFDictionaryRef
CreateDecoderSpecification()582 AppleVTDecoder::CreateDecoderSpecification()
583 {
584 if (!AppleVTLinker::skPropEnableHWAccel) {
585 return nullptr;
586 }
587
588 const void* specKeys[] = { AppleVTLinker::skPropEnableHWAccel };
589 const void* specValues[1];
590 if (AppleDecoderModule::sCanUseHardwareVideoDecoder) {
591 specValues[0] = kCFBooleanTrue;
592 } else {
593 // This GPU is blacklisted for hardware decoding.
594 specValues[0] = kCFBooleanFalse;
595 }
596 static_assert(ArrayLength(specKeys) == ArrayLength(specValues),
597 "Non matching keys/values array size");
598
599 return CFDictionaryCreate(kCFAllocatorDefault,
600 specKeys,
601 specValues,
602 ArrayLength(specKeys),
603 &kCFTypeDictionaryKeyCallBacks,
604 &kCFTypeDictionaryValueCallBacks);
605 }
606
607 CFDictionaryRef
CreateOutputConfiguration()608 AppleVTDecoder::CreateOutputConfiguration()
609 {
610 if (mUseSoftwareImages) {
611 // Output format type:
612 SInt32 PixelFormatTypeValue =
613 kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
614 AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
615 CFNumberCreate(kCFAllocatorDefault,
616 kCFNumberSInt32Type,
617 &PixelFormatTypeValue);
618 const void* outputKeys[] = { kCVPixelBufferPixelFormatTypeKey };
619 const void* outputValues[] = { PixelFormatTypeNumber };
620 static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
621 "Non matching keys/values array size");
622
623 return CFDictionaryCreate(kCFAllocatorDefault,
624 outputKeys,
625 outputValues,
626 ArrayLength(outputKeys),
627 &kCFTypeDictionaryKeyCallBacks,
628 &kCFTypeDictionaryValueCallBacks);
629 }
630
631 #ifndef MOZ_WIDGET_UIKIT
632 // Output format type:
633 SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
634 AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
635 CFNumberCreate(kCFAllocatorDefault,
636 kCFNumberSInt32Type,
637 &PixelFormatTypeValue);
638 // Construct IOSurface Properties
639 const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
640 const void* IOSurfaceValues[] = { kCFBooleanTrue };
641 static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
642 "Non matching keys/values array size");
643
644 // Contruct output configuration.
645 AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
646 CFDictionaryCreate(kCFAllocatorDefault,
647 IOSurfaceKeys,
648 IOSurfaceValues,
649 ArrayLength(IOSurfaceKeys),
650 &kCFTypeDictionaryKeyCallBacks,
651 &kCFTypeDictionaryValueCallBacks);
652
653 const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
654 kCVPixelBufferPixelFormatTypeKey,
655 kCVPixelBufferOpenGLCompatibilityKey };
656 const void* outputValues[] = { IOSurfaceProperties,
657 PixelFormatTypeNumber,
658 kCFBooleanTrue };
659 static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
660 "Non matching keys/values array size");
661
662 return CFDictionaryCreate(kCFAllocatorDefault,
663 outputKeys,
664 outputValues,
665 ArrayLength(outputKeys),
666 &kCFTypeDictionaryKeyCallBacks,
667 &kCFTypeDictionaryValueCallBacks);
668 #else
669 MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
670 #endif
671 }
672
673
674 } // namespace mozilla
675