1 /****************************************************************************
2 **
3 ** Copyright (C) 2016 The Qt Company Ltd.
4 ** Contact: https://www.qt.io/licensing/
5 **
6 ** This file is part of the Qt Toolkit.
7 **
8 ** $QT_BEGIN_LICENSE:LGPL$
9 ** Commercial License Usage
10 ** Licensees holding valid commercial Qt licenses may use this file in
11 ** accordance with the commercial license agreement provided with the
12 ** Software or, alternatively, in accordance with the terms contained in
13 ** a written agreement between you and The Qt Company. For licensing terms
14 ** and conditions see https://www.qt.io/terms-conditions. For further
15 ** information use the contact form at https://www.qt.io/contact-us.
16 **
17 ** GNU Lesser General Public License Usage
18 ** Alternatively, this file may be used under the terms of the GNU Lesser
19 ** General Public License version 3 as published by the Free Software
20 ** Foundation and appearing in the file LICENSE.LGPL3 included in the
21 ** packaging of this file. Please review the following information to
22 ** ensure the GNU Lesser General Public License version 3 requirements
23 ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
24 **
25 ** GNU General Public License Usage
26 ** Alternatively, this file may be used under the terms of the GNU
27 ** General Public License version 2.0 or (at your option) the GNU General
28 ** Public license version 3 or any later version approved by the KDE Free
29 ** Qt Foundation. The licenses are as published by the Free Software
30 ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
31 ** included in the packaging of this file. Please review the following
32 ** information to ensure the GNU General Public License requirements will
33 ** be met: https://www.gnu.org/licenses/gpl-2.0.html and
34 ** https://www.gnu.org/licenses/gpl-3.0.html.
35 **
36 ** $QT_END_LICENSE$
37 **
38 ****************************************************************************/
39 
40 #include "mftvideo.h"
41 #include "mfvideoprobecontrol.h"
42 #include <private/qmemoryvideobuffer_p.h>
43 #include <mferror.h>
44 #include <strmif.h>
45 #include <uuids.h>
46 #include <InitGuid.h>
47 #include <d3d9.h>
48 #include <qdebug.h>
49 
50 // This MFT sends all samples it processes to connected video probes.
51 // Sample is sent to probes in ProcessInput.
52 // In ProcessOutput this MFT simply returns the original sample.
53 
54 // The implementation is based on a boilerplate from the MF SDK example.
55 
MFTransform()56 MFTransform::MFTransform():
57     m_cRef(1),
58     m_inputType(0),
59     m_outputType(0),
60     m_sample(0),
61     m_videoSinkTypeHandler(0),
62     m_bytesPerLine(0)
63 {
64 }
65 
~MFTransform()66 MFTransform::~MFTransform()
67 {
68     if (m_inputType)
69         m_inputType->Release();
70 
71     if (m_outputType)
72         m_outputType->Release();
73 
74     if (m_videoSinkTypeHandler)
75         m_videoSinkTypeHandler->Release();
76 }
77 
addProbe(MFVideoProbeControl * probe)78 void MFTransform::addProbe(MFVideoProbeControl *probe)
79 {
80     QMutexLocker locker(&m_videoProbeMutex);
81 
82     if (m_videoProbes.contains(probe))
83         return;
84 
85     m_videoProbes.append(probe);
86 }
87 
removeProbe(MFVideoProbeControl * probe)88 void MFTransform::removeProbe(MFVideoProbeControl *probe)
89 {
90     QMutexLocker locker(&m_videoProbeMutex);
91     m_videoProbes.removeOne(probe);
92 }
93 
setVideoSink(IUnknown * videoSink)94 void MFTransform::setVideoSink(IUnknown *videoSink)
95 {
96     // This transform supports the same input types as the video sink.
97     // Store its type handler interface in order to report the correct supported types.
98 
99     if (m_videoSinkTypeHandler) {
100         m_videoSinkTypeHandler->Release();
101         m_videoSinkTypeHandler = NULL;
102     }
103 
104     if (videoSink)
105         videoSink->QueryInterface(IID_PPV_ARGS(&m_videoSinkTypeHandler));
106 }
107 
QueryInterface(REFIID riid,void ** ppv)108 STDMETHODIMP MFTransform::QueryInterface(REFIID riid, void** ppv)
109 {
110     if (!ppv)
111         return E_POINTER;
112     if (riid == IID_IMFTransform) {
113         *ppv = static_cast<IMFTransform*>(this);
114     } else if (riid == IID_IUnknown) {
115         *ppv = static_cast<IUnknown*>(this);
116     } else {
117         *ppv =  NULL;
118         return E_NOINTERFACE;
119     }
120     AddRef();
121     return S_OK;
122 }
123 
STDMETHODIMP_(ULONG)124 STDMETHODIMP_(ULONG) MFTransform::AddRef()
125 {
126     return InterlockedIncrement(&m_cRef);
127 }
128 
STDMETHODIMP_(ULONG)129 STDMETHODIMP_(ULONG) MFTransform::Release()
130 {
131     ULONG cRef = InterlockedDecrement(&m_cRef);
132     if (cRef == 0) {
133         delete this;
134     }
135     return cRef;
136 }
137 
GetStreamLimits(DWORD * pdwInputMinimum,DWORD * pdwInputMaximum,DWORD * pdwOutputMinimum,DWORD * pdwOutputMaximum)138 STDMETHODIMP MFTransform::GetStreamLimits(DWORD *pdwInputMinimum, DWORD *pdwInputMaximum, DWORD *pdwOutputMinimum, DWORD *pdwOutputMaximum)
139 {
140     if (!pdwInputMinimum || !pdwInputMaximum || !pdwOutputMinimum || !pdwOutputMaximum)
141         return E_POINTER;
142     *pdwInputMinimum = 1;
143     *pdwInputMaximum = 1;
144     *pdwOutputMinimum = 1;
145     *pdwOutputMaximum = 1;
146     return S_OK;
147 }
148 
GetStreamCount(DWORD * pcInputStreams,DWORD * pcOutputStreams)149 STDMETHODIMP MFTransform::GetStreamCount(DWORD *pcInputStreams, DWORD *pcOutputStreams)
150 {
151     if (!pcInputStreams || !pcOutputStreams)
152         return E_POINTER;
153 
154     *pcInputStreams = 1;
155     *pcOutputStreams = 1;
156     return S_OK;
157 }
158 
GetStreamIDs(DWORD dwInputIDArraySize,DWORD * pdwInputIDs,DWORD dwOutputIDArraySize,DWORD * pdwOutputIDs)159 STDMETHODIMP MFTransform::GetStreamIDs(DWORD dwInputIDArraySize, DWORD *pdwInputIDs, DWORD dwOutputIDArraySize, DWORD *pdwOutputIDs)
160 {
161     // streams are numbered consecutively
162     Q_UNUSED(dwInputIDArraySize);
163     Q_UNUSED(pdwInputIDs);
164     Q_UNUSED(dwOutputIDArraySize);
165     Q_UNUSED(pdwOutputIDs);
166     return E_NOTIMPL;
167 }
168 
GetInputStreamInfo(DWORD dwInputStreamID,MFT_INPUT_STREAM_INFO * pStreamInfo)169 STDMETHODIMP MFTransform::GetInputStreamInfo(DWORD dwInputStreamID, MFT_INPUT_STREAM_INFO *pStreamInfo)
170 {
171     QMutexLocker locker(&m_mutex);
172 
173     if (dwInputStreamID > 0)
174         return MF_E_INVALIDSTREAMNUMBER;
175 
176     if (!pStreamInfo)
177         return E_POINTER;
178 
179     pStreamInfo->cbSize = 0;
180     pStreamInfo->hnsMaxLatency = 0;
181     pStreamInfo->cbMaxLookahead = 0;
182     pStreamInfo->cbAlignment = 0;
183     pStreamInfo->dwFlags = MFT_INPUT_STREAM_WHOLE_SAMPLES
184                             | MFT_INPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER
185                             | MFT_INPUT_STREAM_PROCESSES_IN_PLACE;
186 
187     return S_OK;
188 }
189 
GetOutputStreamInfo(DWORD dwOutputStreamID,MFT_OUTPUT_STREAM_INFO * pStreamInfo)190 STDMETHODIMP MFTransform::GetOutputStreamInfo(DWORD dwOutputStreamID, MFT_OUTPUT_STREAM_INFO *pStreamInfo)
191 {
192     QMutexLocker locker(&m_mutex);
193 
194     if (dwOutputStreamID > 0)
195         return MF_E_INVALIDSTREAMNUMBER;
196 
197     if (!pStreamInfo)
198         return E_POINTER;
199 
200     pStreamInfo->cbSize = 0;
201     pStreamInfo->cbAlignment = 0;
202     pStreamInfo->dwFlags = MFT_OUTPUT_STREAM_WHOLE_SAMPLES
203                             | MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER
204                             | MFT_OUTPUT_STREAM_PROVIDES_SAMPLES
205                             | MFT_OUTPUT_STREAM_DISCARDABLE;
206 
207     return S_OK;
208 }
209 
GetAttributes(IMFAttributes ** pAttributes)210 STDMETHODIMP MFTransform::GetAttributes(IMFAttributes **pAttributes)
211 {
212     // This MFT does not support attributes.
213     Q_UNUSED(pAttributes);
214     return E_NOTIMPL;
215 }
216 
GetInputStreamAttributes(DWORD dwInputStreamID,IMFAttributes ** pAttributes)217 STDMETHODIMP MFTransform::GetInputStreamAttributes(DWORD dwInputStreamID, IMFAttributes **pAttributes)
218 {
219     // This MFT does not support input stream attributes.
220     Q_UNUSED(dwInputStreamID);
221     Q_UNUSED(pAttributes);
222     return E_NOTIMPL;
223 }
224 
GetOutputStreamAttributes(DWORD dwOutputStreamID,IMFAttributes ** pAttributes)225 STDMETHODIMP MFTransform::GetOutputStreamAttributes(DWORD dwOutputStreamID, IMFAttributes **pAttributes)
226 {
227     // This MFT does not support output stream attributes.
228     Q_UNUSED(dwOutputStreamID);
229     Q_UNUSED(pAttributes);
230     return E_NOTIMPL;
231 }
232 
DeleteInputStream(DWORD dwStreamID)233 STDMETHODIMP MFTransform::DeleteInputStream(DWORD dwStreamID)
234 {
235     // This MFT has a fixed number of input streams.
236     Q_UNUSED(dwStreamID);
237     return E_NOTIMPL;
238 }
239 
AddInputStreams(DWORD cStreams,DWORD * adwStreamIDs)240 STDMETHODIMP MFTransform::AddInputStreams(DWORD cStreams, DWORD *adwStreamIDs)
241 {
242     // This MFT has a fixed number of input streams.
243     Q_UNUSED(cStreams);
244     Q_UNUSED(adwStreamIDs);
245     return E_NOTIMPL;
246 }
247 
GetInputAvailableType(DWORD dwInputStreamID,DWORD dwTypeIndex,IMFMediaType ** ppType)248 STDMETHODIMP MFTransform::GetInputAvailableType(DWORD dwInputStreamID, DWORD dwTypeIndex, IMFMediaType **ppType)
249 {
250     // We support the same input types as the video sink
251     if (!m_videoSinkTypeHandler)
252         return E_NOTIMPL;
253 
254     if (dwInputStreamID > 0)
255         return MF_E_INVALIDSTREAMNUMBER;
256 
257     if (!ppType)
258         return E_POINTER;
259 
260     return m_videoSinkTypeHandler->GetMediaTypeByIndex(dwTypeIndex, ppType);
261 }
262 
GetOutputAvailableType(DWORD dwOutputStreamID,DWORD dwTypeIndex,IMFMediaType ** ppType)263 STDMETHODIMP MFTransform::GetOutputAvailableType(DWORD dwOutputStreamID, DWORD dwTypeIndex, IMFMediaType **ppType)
264 {
265     // Since we don't modify the samples, the output type must be the same as the input type.
266     // Report our input type as the only available output type.
267 
268     if (dwOutputStreamID > 0)
269         return MF_E_INVALIDSTREAMNUMBER;
270 
271     if (!ppType)
272         return E_POINTER;
273 
274     // Input type must be set first
275     if (!m_inputType)
276         return MF_E_TRANSFORM_TYPE_NOT_SET;
277 
278     if (dwTypeIndex > 0)
279         return MF_E_NO_MORE_TYPES;
280 
281     // Return a copy to make sure our type is not modified
282     if (FAILED(MFCreateMediaType(ppType)))
283         return E_OUTOFMEMORY;
284 
285     return m_inputType->CopyAllItems(*ppType);
286 }
287 
SetInputType(DWORD dwInputStreamID,IMFMediaType * pType,DWORD dwFlags)288 STDMETHODIMP MFTransform::SetInputType(DWORD dwInputStreamID, IMFMediaType *pType, DWORD dwFlags)
289 {
290     if (dwInputStreamID > 0)
291         return MF_E_INVALIDSTREAMNUMBER;
292 
293     QMutexLocker locker(&m_mutex);
294 
295     if (m_sample)
296         return MF_E_TRANSFORM_CANNOT_CHANGE_MEDIATYPE_WHILE_PROCESSING;
297 
298     if (!isMediaTypeSupported(pType))
299         return MF_E_INVALIDMEDIATYPE;
300 
301     if (dwFlags == MFT_SET_TYPE_TEST_ONLY)
302         return pType ? S_OK : E_POINTER;
303 
304     if (m_inputType) {
305         m_inputType->Release();
306         // Input type has changed, discard output type (if it's set) so it's reset later on
307         DWORD flags = 0;
308         if (m_outputType && m_outputType->IsEqual(pType, &flags) != S_OK) {
309             m_outputType->Release();
310             m_outputType = 0;
311         }
312     }
313 
314     m_inputType = pType;
315 
316     if (m_inputType)
317         m_inputType->AddRef();
318 
319     return S_OK;
320 }
321 
SetOutputType(DWORD dwOutputStreamID,IMFMediaType * pType,DWORD dwFlags)322 STDMETHODIMP MFTransform::SetOutputType(DWORD dwOutputStreamID, IMFMediaType *pType, DWORD dwFlags)
323 {
324     if (dwOutputStreamID > 0)
325         return MF_E_INVALIDSTREAMNUMBER;
326 
327     if (dwFlags == MFT_SET_TYPE_TEST_ONLY && !pType)
328         return E_POINTER;
329 
330     QMutexLocker locker(&m_mutex);
331 
332     // Input type must be set first
333     if (!m_inputType)
334         return MF_E_TRANSFORM_TYPE_NOT_SET;
335 
336     if (m_sample)
337         return MF_E_TRANSFORM_CANNOT_CHANGE_MEDIATYPE_WHILE_PROCESSING;
338 
339     DWORD flags = 0;
340     if (pType && m_inputType->IsEqual(pType, &flags) != S_OK)
341         return MF_E_INVALIDMEDIATYPE;
342 
343     if (dwFlags == MFT_SET_TYPE_TEST_ONLY)
344         return pType ? S_OK : E_POINTER;
345 
346     if (m_outputType)
347         m_outputType->Release();
348 
349     m_outputType = pType;
350 
351     if (m_outputType) {
352         m_outputType->AddRef();
353         m_format = videoFormatForMFMediaType(m_outputType, &m_bytesPerLine);
354     }
355 
356     return S_OK;
357 }
358 
GetInputCurrentType(DWORD dwInputStreamID,IMFMediaType ** ppType)359 STDMETHODIMP MFTransform::GetInputCurrentType(DWORD dwInputStreamID, IMFMediaType **ppType)
360 {
361     if (dwInputStreamID > 0)
362         return MF_E_INVALIDSTREAMNUMBER;
363 
364     if (ppType == NULL)
365         return E_POINTER;
366 
367     QMutexLocker locker(&m_mutex);
368 
369     if (!m_inputType)
370         return MF_E_TRANSFORM_TYPE_NOT_SET;
371 
372     // Return a copy to make sure our type is not modified
373     if (FAILED(MFCreateMediaType(ppType)))
374         return E_OUTOFMEMORY;
375 
376     return m_inputType->CopyAllItems(*ppType);
377 }
378 
GetOutputCurrentType(DWORD dwOutputStreamID,IMFMediaType ** ppType)379 STDMETHODIMP MFTransform::GetOutputCurrentType(DWORD dwOutputStreamID, IMFMediaType **ppType)
380 {
381     if (dwOutputStreamID > 0)
382         return MF_E_INVALIDSTREAMNUMBER;
383 
384     if (ppType == NULL)
385         return E_POINTER;
386 
387     QMutexLocker locker(&m_mutex);
388 
389     if (!m_outputType)
390         return MF_E_TRANSFORM_TYPE_NOT_SET;
391 
392     // Return a copy to make sure our type is not modified
393     if (FAILED(MFCreateMediaType(ppType)))
394         return E_OUTOFMEMORY;
395 
396     return m_outputType->CopyAllItems(*ppType);
397 }
398 
GetInputStatus(DWORD dwInputStreamID,DWORD * pdwFlags)399 STDMETHODIMP MFTransform::GetInputStatus(DWORD dwInputStreamID, DWORD *pdwFlags)
400 {
401     if (dwInputStreamID > 0)
402         return MF_E_INVALIDSTREAMNUMBER;
403 
404     if (!pdwFlags)
405         return E_POINTER;
406 
407     QMutexLocker locker(&m_mutex);
408 
409     if (!m_inputType || !m_outputType)
410         return MF_E_TRANSFORM_TYPE_NOT_SET;
411 
412     if (m_sample)
413         *pdwFlags = 0;
414     else
415         *pdwFlags = MFT_INPUT_STATUS_ACCEPT_DATA;
416 
417     return S_OK;
418 }
419 
GetOutputStatus(DWORD * pdwFlags)420 STDMETHODIMP MFTransform::GetOutputStatus(DWORD *pdwFlags)
421 {
422     if (!pdwFlags)
423         return E_POINTER;
424 
425     QMutexLocker locker(&m_mutex);
426 
427     if (!m_inputType || !m_outputType)
428         return MF_E_TRANSFORM_TYPE_NOT_SET;
429 
430     if (m_sample)
431         *pdwFlags = MFT_OUTPUT_STATUS_SAMPLE_READY;
432     else
433         *pdwFlags = 0;
434 
435     return S_OK;
436 }
437 
SetOutputBounds(LONGLONG hnsLowerBound,LONGLONG hnsUpperBound)438 STDMETHODIMP MFTransform::SetOutputBounds(LONGLONG hnsLowerBound, LONGLONG hnsUpperBound)
439 {
440     Q_UNUSED(hnsLowerBound);
441     Q_UNUSED(hnsUpperBound);
442     return E_NOTIMPL;
443 }
444 
ProcessEvent(DWORD dwInputStreamID,IMFMediaEvent * pEvent)445 STDMETHODIMP MFTransform::ProcessEvent(DWORD dwInputStreamID, IMFMediaEvent *pEvent)
446 {
447     // This MFT ignores all events, and the pipeline should send all events downstream.
448     Q_UNUSED(dwInputStreamID);
449     Q_UNUSED(pEvent);
450     return E_NOTIMPL;
451 }
452 
ProcessMessage(MFT_MESSAGE_TYPE eMessage,ULONG_PTR ulParam)453 STDMETHODIMP MFTransform::ProcessMessage(MFT_MESSAGE_TYPE eMessage, ULONG_PTR ulParam)
454 {
455     Q_UNUSED(ulParam);
456 
457     HRESULT hr = S_OK;
458 
459     switch (eMessage)
460     {
461     case MFT_MESSAGE_COMMAND_FLUSH:
462         hr = OnFlush();
463         break;
464 
465     case MFT_MESSAGE_COMMAND_DRAIN:
466         // Drain: Tells the MFT not to accept any more input until
467         // all of the pending output has been processed. That is our
468         // default behevior already, so there is nothing to do.
469         break;
470 
471     case MFT_MESSAGE_SET_D3D_MANAGER:
472         // The pipeline should never send this message unless the MFT
473         // has the MF_SA_D3D_AWARE attribute set to TRUE. However, if we
474         // do get this message, it's invalid and we don't implement it.
475         hr = E_NOTIMPL;
476         break;
477 
478     // The remaining messages do not require any action from this MFT.
479     case MFT_MESSAGE_NOTIFY_BEGIN_STREAMING:
480     case MFT_MESSAGE_NOTIFY_END_STREAMING:
481     case MFT_MESSAGE_NOTIFY_END_OF_STREAM:
482     case MFT_MESSAGE_NOTIFY_START_OF_STREAM:
483         break;
484     }
485 
486     return hr;
487 }
488 
ProcessInput(DWORD dwInputStreamID,IMFSample * pSample,DWORD dwFlags)489 STDMETHODIMP MFTransform::ProcessInput(DWORD dwInputStreamID, IMFSample *pSample, DWORD dwFlags)
490 {
491     if (dwInputStreamID > 0)
492         return MF_E_INVALIDSTREAMNUMBER;
493 
494     if (dwFlags != 0)
495         return E_INVALIDARG; // dwFlags is reserved and must be zero.
496 
497     QMutexLocker locker(&m_mutex);
498 
499     if (!m_inputType)
500         return MF_E_TRANSFORM_TYPE_NOT_SET;
501 
502     if (m_sample)
503         return MF_E_NOTACCEPTING;
504 
505     // Validate the number of buffers. There should only be a single buffer to hold the video frame.
506     DWORD dwBufferCount = 0;
507     HRESULT hr = pSample->GetBufferCount(&dwBufferCount);
508     if (FAILED(hr))
509         return hr;
510 
511     if (dwBufferCount == 0)
512         return E_FAIL;
513 
514     if (dwBufferCount > 1)
515         return MF_E_SAMPLE_HAS_TOO_MANY_BUFFERS;
516 
517     m_sample = pSample;
518     m_sample->AddRef();
519 
520     QMutexLocker lockerProbe(&m_videoProbeMutex);
521 
522     if (!m_videoProbes.isEmpty()) {
523         QVideoFrame frame = makeVideoFrame();
524 
525         for (MFVideoProbeControl* probe : qAsConst(m_videoProbes))
526             probe->bufferProbed(frame);
527     }
528 
529     return S_OK;
530 }
531 
ProcessOutput(DWORD dwFlags,DWORD cOutputBufferCount,MFT_OUTPUT_DATA_BUFFER * pOutputSamples,DWORD * pdwStatus)532 STDMETHODIMP MFTransform::ProcessOutput(DWORD dwFlags, DWORD cOutputBufferCount, MFT_OUTPUT_DATA_BUFFER *pOutputSamples, DWORD *pdwStatus)
533 {
534     if (pOutputSamples == NULL || pdwStatus == NULL)
535         return E_POINTER;
536 
537     if (cOutputBufferCount != 1)
538         return E_INVALIDARG;
539 
540     QMutexLocker locker(&m_mutex);
541 
542     if (!m_inputType)
543         return MF_E_TRANSFORM_TYPE_NOT_SET;
544 
545     if (!m_outputType) {
546         pOutputSamples[0].dwStatus = MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE;
547         return MF_E_TRANSFORM_STREAM_CHANGE;
548     }
549 
550     IMFMediaBuffer *input = NULL;
551     IMFMediaBuffer *output = NULL;
552 
553     if (dwFlags == MFT_PROCESS_OUTPUT_DISCARD_WHEN_NO_BUFFER)
554         goto done;
555     else if (dwFlags != 0)
556         return E_INVALIDARG;
557 
558     if (!m_sample)
559         return MF_E_TRANSFORM_NEED_MORE_INPUT;
560 
561     // Since the MFT_OUTPUT_STREAM_PROVIDES_SAMPLES flag is set, the client
562     // should not be providing samples here
563     if (pOutputSamples[0].pSample != NULL)
564         return E_INVALIDARG;
565 
566     pOutputSamples[0].pSample = m_sample;
567     pOutputSamples[0].pSample->AddRef();
568 
569     // Send video frame to probes
570     // We do it here (instead of inside ProcessInput) to make sure samples discarded by the renderer
571     // are not sent.
572     m_videoProbeMutex.lock();
573     if (!m_videoProbes.isEmpty()) {
574         QVideoFrame frame = makeVideoFrame();
575 
576         for (MFVideoProbeControl* probe : qAsConst(m_videoProbes))
577             probe->bufferProbed(frame);
578     }
579     m_videoProbeMutex.unlock();
580 
581 done:
582     pOutputSamples[0].dwStatus = 0;
583     *pdwStatus = 0;
584 
585     m_sample->Release();
586     m_sample = 0;
587 
588     if (input)
589         input->Release();
590     if (output)
591         output->Release();
592 
593     return S_OK;
594 }
595 
OnFlush()596 HRESULT MFTransform::OnFlush()
597 {
598     QMutexLocker locker(&m_mutex);
599 
600     if (m_sample) {
601         m_sample->Release();
602         m_sample = 0;
603     }
604     return S_OK;
605 }
606 
formatFromSubtype(const GUID & subtype)607 QVideoFrame::PixelFormat MFTransform::formatFromSubtype(const GUID& subtype)
608 {
609     if (subtype == MFVideoFormat_ARGB32)
610         return QVideoFrame::Format_ARGB32;
611     else if (subtype == MFVideoFormat_RGB32)
612         return QVideoFrame::Format_RGB32;
613     else if (subtype == MFVideoFormat_RGB24)
614         return QVideoFrame::Format_RGB24;
615     else if (subtype == MFVideoFormat_RGB565)
616         return QVideoFrame::Format_RGB565;
617     else if (subtype == MFVideoFormat_RGB555)
618         return QVideoFrame::Format_RGB555;
619     else if (subtype == MFVideoFormat_AYUV)
620         return QVideoFrame::Format_AYUV444;
621     else if (subtype == MFVideoFormat_I420)
622         return QVideoFrame::Format_YUV420P;
623     else if (subtype == MFVideoFormat_UYVY)
624         return QVideoFrame::Format_UYVY;
625     else if (subtype == MFVideoFormat_YV12)
626         return QVideoFrame::Format_YV12;
627     else if (subtype == MFVideoFormat_NV12)
628         return QVideoFrame::Format_NV12;
629 
630     return QVideoFrame::Format_Invalid;
631 }
632 
videoFormatForMFMediaType(IMFMediaType * mediaType,int * bytesPerLine)633 QVideoSurfaceFormat MFTransform::videoFormatForMFMediaType(IMFMediaType *mediaType, int *bytesPerLine)
634 {
635     UINT32 stride;
636     if (FAILED(mediaType->GetUINT32(MF_MT_DEFAULT_STRIDE, &stride))) {
637         *bytesPerLine = 0;
638         return QVideoSurfaceFormat();
639     }
640 
641     *bytesPerLine = (int)stride;
642 
643     QSize size;
644     UINT32 width, height;
645     if (FAILED(MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height)))
646         return QVideoSurfaceFormat();
647 
648     size.setWidth(width);
649     size.setHeight(height);
650 
651     GUID subtype = GUID_NULL;
652     if (FAILED(mediaType->GetGUID(MF_MT_SUBTYPE, &subtype)))
653         return QVideoSurfaceFormat();
654 
655     QVideoFrame::PixelFormat pixelFormat = formatFromSubtype(subtype);
656     QVideoSurfaceFormat format(size, pixelFormat);
657 
658     UINT32 num, den;
659     if (SUCCEEDED(MFGetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO, &num, &den))) {
660         format.setPixelAspectRatio(num, den);
661     }
662     if (SUCCEEDED(MFGetAttributeRatio(mediaType, MF_MT_FRAME_RATE, &num, &den))) {
663         format.setFrameRate(qreal(num)/den);
664     }
665 
666     return format;
667 }
668 
makeVideoFrame()669 QVideoFrame MFTransform::makeVideoFrame()
670 {
671     QVideoFrame frame;
672 
673     if (!m_format.isValid())
674         return frame;
675 
676     IMFMediaBuffer *buffer = 0;
677 
678     do {
679         if (FAILED(m_sample->ConvertToContiguousBuffer(&buffer)))
680             break;
681 
682         QByteArray array = dataFromBuffer(buffer, m_format.frameHeight(), &m_bytesPerLine);
683         if (array.isEmpty())
684             break;
685 
686         // Wrapping IMFSample or IMFMediaBuffer in a QVideoFrame is not possible because we cannot hold
687         // IMFSample for a "long" time without affecting the rest of the topology.
688         // If IMFSample is held for more than 5 frames decoder starts to reuse it even though it hasn't been released it yet.
689         // That is why we copy data from IMFMediaBuffer here.
690         frame = QVideoFrame(new QMemoryVideoBuffer(array, m_bytesPerLine), m_format.frameSize(), m_format.pixelFormat());
691 
692         // WMF uses 100-nanosecond units, Qt uses microseconds
693         LONGLONG startTime = -1;
694         if (SUCCEEDED(m_sample->GetSampleTime(&startTime))) {
695             frame.setStartTime(startTime * 0.1);
696 
697             LONGLONG duration = -1;
698             if (SUCCEEDED(m_sample->GetSampleDuration(&duration)))
699                 frame.setEndTime((startTime + duration) * 0.1);
700         }
701     } while (false);
702 
703     if (buffer)
704         buffer->Release();
705 
706     return frame;
707 }
708 
dataFromBuffer(IMFMediaBuffer * buffer,int height,int * bytesPerLine)709 QByteArray MFTransform::dataFromBuffer(IMFMediaBuffer *buffer, int height, int *bytesPerLine)
710 {
711     QByteArray array;
712     BYTE *bytes;
713     DWORD length;
714     HRESULT hr = buffer->Lock(&bytes, NULL, &length);
715     if (SUCCEEDED(hr)) {
716         array = QByteArray((const char *)bytes, (int)length);
717         buffer->Unlock();
718     } else {
719         // try to lock as Direct3DSurface
720         IDirect3DSurface9 *surface = 0;
721         do {
722             if (FAILED(MFGetService(buffer, MR_BUFFER_SERVICE, IID_IDirect3DSurface9, (void**)&surface)))
723                 break;
724 
725             D3DLOCKED_RECT rect;
726             if (FAILED(surface->LockRect(&rect, NULL, D3DLOCK_READONLY)))
727                 break;
728 
729             if (bytesPerLine)
730                 *bytesPerLine = (int)rect.Pitch;
731 
732             array = QByteArray((const char *)rect.pBits, rect.Pitch * height);
733             surface->UnlockRect();
734         } while (false);
735 
736         if (surface) {
737             surface->Release();
738             surface = 0;
739         }
740     }
741 
742     return array;
743 }
744 
isMediaTypeSupported(IMFMediaType * type)745 bool MFTransform::isMediaTypeSupported(IMFMediaType *type)
746 {
747     // If we don't have the video sink's type handler,
748     // assume it supports anything...
749     if (!m_videoSinkTypeHandler || !type)
750         return true;
751 
752     return m_videoSinkTypeHandler->IsMediaTypeSupported(type, NULL) == S_OK;
753 }
754